source
stringlengths
3
92
c
stringlengths
26
2.25M
build_tree2.h
#ifndef build_tree_h #define build_tree_h #include <algorithm> #include "logger.h" #include "thread.h" #include "types.h" #ifndef _OPENMP int omp_get_num_threads() {return 1;} int omp_get_thread_num() {return 0;} #else #include <omp.h> #endif class BuildTree { private: int maxlevel; private: //! Transform Xmin & Xmax to X (center) & R (radius) Box bounds2box(Bounds bounds) { vec3 Xmin = bounds.Xmin; // Set local Xmin vec3 Xmax = bounds.Xmax; // Set local Xmax Box box; // Bounding box for (int d=0; d<3; d++) box.X[d] = (Xmax[d] + Xmin[d]) / 2; // Calculate center of domain box.R = 0; // Initialize localRadius for (int d=0; d<3; d++) { // Loop over dimensions box.R = std::max(box.X[d] - Xmin[d], box.R); // Calculate min distance from center box.R = std::max(Xmax[d] - box.X[d], box.R); // Calculate max distance from center } // End loop over dimensions box.R *= 1.00001; // Add some leeway to radius return box; // Return box.X and box.R } //! Calculate the Morton key inline void getKey(Bodies &bodies, uint64_t * key, Bounds bounds, int level) { Box box = bounds2box(bounds); float d = 2 * box.R / (1 << level); // Cell size at current level #pragma omp parallel for for (int b=0; b<int(bodies.size()); b++) { // Loop over bodies B_iter B=bodies.begin()+b; // Body iterator int ix = (B->X[0] - bounds.Xmin[0]) / d; // Index in x dimension int iy = (B->X[1] - bounds.Xmin[1]) / d; // Index in y dimension int iz = (B->X[2] - bounds.Xmin[2]) / d; // Index in z dimension int id = 0; // Initialize Morton key for( int l=0; l!=level; ++l ) { // Loop over levels id += (ix & 1) << (3 * l); // Interleave x bit id += (iy & 1) << (3 * l + 1); // Interleave y bit id += (iz & 1) << (3 * l + 2); // Interleave z bit ix >>= 1; // Shift x index iy >>= 1; // Shift y index iz >>= 1; // Shift z index } // End loop over levels key[b] = id; // Store Morton key in array B->ICELL = id; // Store Morton key in body struct } // End loop over bodies } void radixSort(uint64_t * key, int * value, uint64_t * buffer, int * permutation, int size) { const int bitStride = 8; const int stride = 1 << bitStride; const int mask = stride - 1; int numThreads; int (*bucketPerThread)[stride]; uint64_t maxKey = 0; uint64_t * maxKeyPerThread; #pragma omp parallel { numThreads = omp_get_num_threads(); #pragma omp single { bucketPerThread = new int [numThreads][stride](); maxKeyPerThread = new uint64_t [numThreads]; for (int i=0; i<numThreads; i++) maxKeyPerThread[i] = 0; } #pragma omp for for (int i=0; i<size; i++) if (key[i] > maxKeyPerThread[omp_get_thread_num()]) maxKeyPerThread[omp_get_thread_num()] = key[i]; #pragma omp single for (int i=0; i<numThreads; i++) if (maxKeyPerThread[i] > maxKey) maxKey = maxKeyPerThread[i]; while (maxKey > 0) { int bucket[stride] = {0}; #pragma omp single for (int t=0; t<numThreads; t++) for (int i=0; i<stride; i++) bucketPerThread[t][i] = 0; #pragma omp for for (int i=0; i<size; i++) bucketPerThread[omp_get_thread_num()][key[i] & mask]++; #pragma omp single { for (int t=0; t<numThreads; t++) for (int i=0; i<stride; i++) bucket[i] += bucketPerThread[t][i]; for (int i=1; i<stride; i++) bucket[i] += bucket[i-1]; for (int i=size-1; i>=0; i--) permutation[i] = --bucket[key[i] & mask]; } #pragma omp for for (int i=0; i<size; i++) buffer[permutation[i]] = value[i]; #pragma omp for for (int i=0; i<size; i++) value[i] = buffer[i]; #pragma omp for for (int i=0; i<size; i++) buffer[permutation[i]] = key[i]; #pragma omp for for (int i=0; i<size; i++) key[i] = buffer[i] >> bitStride; #pragma omp single maxKey >>= bitStride; } } delete[] bucketPerThread; delete[] maxKeyPerThread; } void permute(Bodies & bodies, int * index) { const int n = bodies.size(); Bodies buffer = bodies; #pragma omp parallel for for (int b=0; b<n; b++) bodies[b] = buffer[index[b]]; } void bodies2leafs(Bodies & bodies, Cells & cells, Bounds bounds, int level) { int I = -1; C_iter C; cells.reserve(1 << (3 * level)); Box box = bounds2box(bounds); float d = 2 * box.R / (1 << level); for (B_iter B=bodies.begin(); B!=bodies.end(); B++) { int IC = B->ICELL; int ix = (B->X[0] - bounds.Xmin[0]) / d; int iy = (B->X[1] - bounds.Xmin[1]) / d; int iz = (B->X[2] - bounds.Xmin[2]) / d; if( IC != I ) { Cell cell; cell.NCHILD = 0; cell.NBODY = 0; cell.ICHILD = 0; cell.BODY = B; cell.ICELL = IC; cell.X[0] = d * (ix + .5) + bounds.Xmin[0]; cell.X[1] = d * (iy + .5) + bounds.Xmin[1]; cell.X[2] = d * (iz + .5) + bounds.Xmin[2]; cell.R = d * .5; cells.push_back(cell); C = cells.end()-1; I = IC; } C->NBODY++; } } void leafs2cells(Cells & cells, Bounds bounds, int level) { int begin = 0, end = cells.size(); Box box = bounds2box(bounds); float d = 2 * box.R / (1 << level); for (int l=1; l<=level; l++) { int div = (1 << (3 * l)); d *= 2; int I = -1; int p = end - 1; for (int c=begin; c!=end; c++) { B_iter B = cells[c].BODY; int IC = B->ICELL / div; int ix = (B->X[0] - bounds.Xmin[0]) / d; int iy = (B->X[1] - bounds.Xmin[1]) / d; int iz = (B->X[2] - bounds.Xmin[2]) / d; if (IC != I) { Cell cell; cell.NCHILD = 0; cell.NBODY = 0; cell.ICHILD = c; cell.BODY = cells[c].BODY; cell.ICELL = IC; cell.X[0] = d * (ix + .5) + bounds.Xmin[0]; cell.X[1] = d * (iy + .5) + bounds.Xmin[1]; cell.X[2] = d * (iz + .5) + bounds.Xmin[2]; cell.R = d * .5; cells.push_back(cell); p++; I = IC; } cells[p].NCHILD++; cells[p].NBODY += cells[c].NBODY; cells[c].IPARENT = p; } begin = end; end = cells.size(); } cells.back().IPARENT = 0; } void reverseOrder(Cells & cells, int * permutation) { const int numCells = cells.size(); int ic = numCells - 1; for (int c=0; c<numCells; c++,ic--) { permutation[c] = ic; } for (C_iter C=cells.begin(); C!=cells.end(); C++) { C->ICHILD = permutation[C->ICHILD] - C->NCHILD + 1; C->IPARENT = permutation[C->IPARENT]; } std::reverse(cells.begin(), cells.end()); } public: BuildTree(int, int) : maxlevel(0) {} Cells buildTree(Bodies & bodies, Bounds bounds) { const int numBodies = bodies.size(); const int level = 6; maxlevel = level; uint64_t * key = new uint64_t [numBodies]; uint64_t * buffer = new uint64_t [numBodies]; int * index = new int [numBodies]; int * permutation = new int [numBodies]; Cells cells; for (int b=0; b<int(bodies.size()); b++) { index[b] = b; } logger::startTimer("Morton key"); getKey(bodies, key, bounds, level); logger::stopTimer("Morton key"); logger::startTimer("Radix sort"); radixSort(key, index, buffer, permutation, numBodies); logger::stopTimer("Radix sort"); logger::startTimer("Permutation"); permute(bodies, index); logger::stopTimer("Permutation"); logger::startTimer("Bodies to leafs"); bodies2leafs(bodies, cells, bounds, level); logger::stopTimer("Bodies to leafs"); logger::startTimer("Leafs to cells"); leafs2cells(cells, bounds, level); logger::stopTimer("Leafs to cells"); logger::startTimer("Reverse order"); reverseOrder(cells, permutation); logger::stopTimer("Reverse order"); delete[] key; delete[] buffer; delete[] index; delete[] permutation; return cells; } //! Print tree structure statistics void printTreeData(Cells & cells) { if (logger::verbose && !cells.empty()) { // If verbose flag is true logger::printTitle("Tree stats"); // Print title std::cout << std::setw(logger::stringLength) << std::left// Set format << "Bodies" << " : " << cells.front().NBODY << std::endl// Print number of bodies << std::setw(logger::stringLength) << std::left// Set format << "Cells" << " : " << cells.size() << std::endl// Print number of cells << std::setw(logger::stringLength) << std::left// Set format << "Tree depth" << " : " << maxlevel << std::endl;// Print number of levels } // End if for verbose flag } }; #endif
GB_binop__copysign_fp64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__copysign_fp64) // A.*B function (eWiseMult): GB (_AemultB_08__copysign_fp64) // A.*B function (eWiseMult): GB (_AemultB_02__copysign_fp64) // A.*B function (eWiseMult): GB (_AemultB_04__copysign_fp64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__copysign_fp64) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__copysign_fp64) // C+=b function (dense accum): GB (_Cdense_accumb__copysign_fp64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__copysign_fp64) // C=scalar+B GB (_bind1st__copysign_fp64) // C=scalar+B' GB (_bind1st_tran__copysign_fp64) // C=A+scalar GB (_bind2nd__copysign_fp64) // C=A'+scalar GB (_bind2nd_tran__copysign_fp64) // C type: double // A type: double // A pattern? 0 // B type: double // B pattern? 0 // BinaryOp: cij = copysign (aij, bij) #define GB_ATYPE \ double #define GB_BTYPE \ double #define GB_CTYPE \ double // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ double aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ double bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ double t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = copysign (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 1 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_COPYSIGN || GxB_NO_FP64 || GxB_NO_COPYSIGN_FP64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__copysign_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__copysign_fp64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__copysign_fp64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type double double bwork = (*((double *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *restrict Cx = (double *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *restrict Cx = (double *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__copysign_fp64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; double alpha_scalar ; double beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((double *) alpha_scalar_in)) ; beta_scalar = (*((double *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__copysign_fp64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__copysign_fp64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__copysign_fp64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__copysign_fp64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__copysign_fp64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *Cx = (double *) Cx_output ; double x = (*((double *) x_input)) ; double *Bx = (double *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; double bij = GBX (Bx, p, false) ; Cx [p] = copysign (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__copysign_fp64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; double *Cx = (double *) Cx_output ; double *Ax = (double *) Ax_input ; double y = (*((double *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; double aij = GBX (Ax, p, false) ; Cx [p] = copysign (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = GBX (Ax, pA, false) ; \ Cx [pC] = copysign (x, aij) ; \ } GrB_Info GB (_bind1st_tran__copysign_fp64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ double #if GB_DISABLE return (GrB_NO_VALUE) ; #else double x = (*((const double *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ double } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = GBX (Ax, pA, false) ; \ Cx [pC] = copysign (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__copysign_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double y = (*((const double *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
core_csymm.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/core_blas/core_zsymm.c, normal z -> c, Fri Sep 28 17:38:23 2018 * **/ #include <plasma_core_blas.h> #include "plasma_types.h" #include "core_lapack.h" /***************************************************************************//** * * @ingroup core_symm * * Performs one of the matrix-matrix operations * * \f[ C = \alpha \times A \times B + \beta \times C \f] * or * \f[ C = \alpha \times B \times A + \beta \times C \f] * * where alpha and beta are scalars, A is a symmetric matrix and B and * C are m-by-n matrices. * ******************************************************************************* * * @param[in] side * Specifies whether the symmetric matrix A appears on the * left or right in the operation as follows: * - PlasmaLeft: \f[ C = \alpha \times A \times B + \beta \times C \f] * - PlasmaRight: \f[ C = \alpha \times B \times A + \beta \times C \f] * * @param[in] uplo * Specifies whether the upper or lower triangular part of * the symmetric matrix A is to be referenced as follows: * - PlasmaLower: Only the lower triangular part of the * symmetric matrix A is to be referenced. * - PlasmaUpper: Only the upper triangular part of the * symmetric matrix A is to be referenced. * * @param[in] m * The number of rows of the matrix C. m >= 0. * * @param[in] n * The number of columns of the matrix C. n >= 0. * * @param[in] alpha * The scalar alpha. * * @param[in] A * A is an lda-by-ka matrix, where ka is m when side = PlasmaLeft, * and is n otherwise. Only the uplo triangular part is referenced. * * @param[in] lda * The leading dimension of the array A. lda >= max(1,ka). * * @param[in] B * B is an ldb-by-n matrix, where the leading m-by-n part of * the array B must contain the matrix B. * * @param[in] ldb * The leading dimension of the array B. ldb >= max(1,m). * * @param[in] beta * The scalar beta. * * @param[in,out] C * C is an ldc-by-n matrix. * On exit, the array is overwritten by the m-by-n updated matrix. * * @param[in] ldc * The leading dimension of the array C. ldc >= max(1,m). * ******************************************************************************/ __attribute__((weak)) void plasma_core_csymm(plasma_enum_t side, plasma_enum_t uplo, int m, int n, plasma_complex32_t alpha, const plasma_complex32_t *A, int lda, const plasma_complex32_t *B, int ldb, plasma_complex32_t beta, plasma_complex32_t *C, int ldc) { cblas_csymm(CblasColMajor, (CBLAS_SIDE)side, (CBLAS_UPLO)uplo, m, n, CBLAS_SADDR(alpha), A, lda, B, ldb, CBLAS_SADDR(beta), C, ldc); } /******************************************************************************/ void plasma_core_omp_csymm( plasma_enum_t side, plasma_enum_t uplo, int m, int n, plasma_complex32_t alpha, const plasma_complex32_t *A, int lda, const plasma_complex32_t *B, int ldb, plasma_complex32_t beta, plasma_complex32_t *C, int ldc, plasma_sequence_t *sequence, plasma_request_t *request) { int ak; if (side == PlasmaLeft) ak = m; else ak = n; #pragma omp task depend(in:A[0:lda*ak]) \ depend(in:B[0:ldb*n]) \ depend(inout:C[0:ldc*n]) { if (sequence->status == PlasmaSuccess) plasma_core_csymm(side, uplo, m, n, alpha, A, lda, B, ldb, beta, C, ldc); } }
DRB091-threadprivate2-orig-no.c
/* Copyright (C) 1991-2018 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it andor modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http:www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses Unicode 10.0.0. Version 10.0 of the Unicode Standard is synchronized with ISOIEC 10646:2017, fifth edition, plus the following additions from Amendment 1 to the fifth edition: - 56 emoji characters - 285 hentaigana - 3 additional Zanabazar Square characters */ /* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https:github.comLLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* A file-scope variable used within a function called by a parallel region. Use threadprivate to avoid data races. This is the case for a variable referenced within a construct. */ #include <stdio.h> #include <assert.h> int sum0 = 0, sum1 = 0; int main() { int len = 1000; int i, sum = 0; int _ret_val_0; #pragma cetus private(i) #pragma loop name main#0 #pragma cetus parallel #pragma omp parallel for private(i) for (i=0; i<len; i ++ ) { } sum0+=499500; sum=(sum+sum0); /* reference calculation */ #pragma cetus private(i) #pragma loop name main#1 #pragma cetus parallel #pragma omp parallel for private(i) for (i=0; i<len; i ++ ) { } sum1+=(((-1*len)+(len*len))/2); printf("sum=%d; sum1=%d\n", sum, sum1); (((void)sizeof ((sum==sum1) ? 1 : 0)), ({ if (sum==sum1) { ; } else { __assert_fail("sum==sum1", "DRB091-threadprivate2-orig-no.c", 74, __PRETTY_FUNCTION__); } })); _ret_val_0=0; return _ret_val_0; }
dacemath.c
/****************************************************************************** * * * DIFFERENTIAL ALGEBRA CORE ENGINE * * * ******************************************************************************* * * * Copyright 2016 Politecnico di Milano (2014 Dinamica Srl) * * Licensed under the Apache License, Version 2.0 (the "License"); * * you may not use this file except in compliance with the License. * * You may obtain a copy of the License at * * * * http://www.apache.org/licenses/LICENSE-2.0 * * * * Unless required by applicable law or agreed to in writing, software * * distributed under the License is distributed on an "AS IS" BASIS, * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * * See the License for the specific language governing permissions and * * limitations under the License. * * * *******************************************************************************/ /* * dacemath.c * * Created on: November 18, 2016 * Author: Politecnico di Milano */ /** \addtogroup DACE Core * @{ */ // MS C library needs this to trigger it to define math constants #define _USE_MATH_DEFINES #include <math.h> #include <stdlib.h> #include "dace/config.h" #include "dace/dacebase.h" #include "dace/daceaux.h" #include "dacecontrib.h" // define various math constants in case they have not been defined by math.h // these are non-standard C, but most C libraries have them #ifndef M_PI #define M_PI (3.14159265358979323846) #endif #ifndef M_PI_2 #define M_PI_2 (1.57079632679489661923) #endif /******************************************************************************** * Basic DACE arithmetic operations *********************************************************************************/ /*! Perform addition of two DA objects. \param[in] ina Pointer to the first DA object to operate on \param[in] inb Pointer to the first DA object to operate on \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina or inb. */ void daceAdd(const DACEDA *ina, const DACEDA *inb, DACEDA *inc) { if(!daceIsSameObject(ina, inc) && !daceIsSameObject(inb, inc)) { daceWeightedSum(ina, 1.0, inb, 1.0, inc); } else { DACEDA idaadd; daceAllocateDA(&idaadd, 0); daceWeightedSum(ina, 1.0, inb, 1.0, &idaadd); daceCopy(&idaadd, inc); daceFreeDA(&idaadd); } } /*! Perform subtraction of two DA objects. \param[in] ina Pointer to the first DA object to operate on \param[in] inb Pointer to the first DA object to operate on \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina or inb. */ void daceSubtract(const DACEDA *ina, const DACEDA *inb, DACEDA *inc) { if(!daceIsSameObject(ina, inc) && !daceIsSameObject(inb, inc)) { daceWeightedSum(ina, 1.0, inb, -1.0, inc); } else { DACEDA idasub; daceAllocateDA(&idasub, 0); daceWeightedSum(ina, 1.0, inb, -1.0, &idasub); daceCopy(&idasub, inc); daceFreeDA(&idasub); } } /*! Perform multiplication of two DA objects. \param[in] ina Pointer to the first DA object to operate on \param[in] inb Pointer to the first DA object to operate on \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina or inb. */ void daceMultiply(const DACEDA *ina, const DACEDA *inb, DACEDA *inc) { // These should use thread local storage (TLS) for multithread safe implementations // see https://en.wikipedia.org/wiki/Thread-local_storage #if DACE_MEMORY_MODEL == DACE_MEMORY_STATIC static DACE_THREAD_LOCAL double cc[DACE_STATIC_NMMAX] = {0}; static DACE_THREAD_LOCAL extended_monomial emb[DACE_STATIC_NMMAX]; static DACE_THREAD_LOCAL extended_monomial *ipbeg[DACE_STATIC_NOMAX+1]; static DACE_THREAD_LOCAL extended_monomial *ipend[DACE_STATIC_NOMAX+1]; static DACE_THREAD_LOCAL unsigned int nomax = 0; static DACE_THREAD_LOCAL unsigned int nvmax = 0; // make sure static memory is correctly allocated if(UNLIKELY(nomax != DACECom.nomax || nvmax != DACECom.nvmax)) { nomax = DACECom.nomax; nvmax = DACECom.nvmax; ipbeg[0] = &emb[0]; for(unsigned int i = 1; i <= DACECom.nomax; i++) ipbeg[i] = emb + daceCountMonomials(i - 1, DACECom.nvmax); } #else static DACE_THREAD_LOCAL double *cc = NULL; static DACE_THREAD_LOCAL extended_monomial *emb = NULL; static DACE_THREAD_LOCAL extended_monomial **ipbeg = NULL; static DACE_THREAD_LOCAL extended_monomial **ipend = NULL; static DACE_THREAD_LOCAL unsigned int nomax = 0; static DACE_THREAD_LOCAL unsigned int nvmax = 0; // make sure static memory is correctly allocated if(UNLIKELY(nomax != DACECom.nomax || nvmax != DACECom.nvmax)) { nomax = DACECom.nomax; nvmax = DACECom.nvmax; dacefree(cc); dacefree(emb); dacefree(ipbeg); dacefree(ipend); cc = (double*) dacecalloc(DACECom.nmmax, sizeof(double)); emb = (extended_monomial*) dacecalloc(DACECom.nmmax, sizeof(extended_monomial)); ipbeg = (extended_monomial**) dacecalloc(DACECom.nomax+1, sizeof(extended_monomial*)); ipend = (extended_monomial**) dacecalloc(DACECom.nomax+1, sizeof(extended_monomial*)); ipbeg[0] = &emb[0]; for(unsigned int i = 1; i <= DACECom.nomax; i++) ipbeg[i] = emb + daceCountMonomials(i - 1, DACECom.nvmax); } #endif monomial *ipoa; unsigned int ilma, illa; monomial *ipob; unsigned int ilmb, illb; daceVariableInformation(ina, &ipoa, &ilma, &illa); daceVariableInformation(inb, &ipob, &ilmb, &illb); // sort so that ina is the short DA vector if(illa>illb) { unsigned int t1; t1 = illb; illb = illa; illa = t1; t1 = ilmb; ilmb = ilma; ilma = t1; monomial* t2; t2 = ipoa; ipoa = ipob; ipob = t2; } for(unsigned int i = 0; i <= DACECom_t.nocut; i++) ipend[i] = ipbeg[i]; // sort vector b by order for(monomial *ib = ipob; ib < ipob+illb; ib++) { const unsigned int noib = DACECom.ieo[ib->ii]; if(noib > DACECom_t.nocut) continue; ipend[noib]->i1 = DACECom.ie1[ib->ii]; ipend[noib]->i2 = DACECom.ie2[ib->ii]; ipend[noib]->cc = ib->cc; ipend[noib]++; } // perform actual multiplication for(monomial *ia = ipoa; ia < ipoa+illa; ia++) { const unsigned int i1ia = DACECom.ie1[ia->ii]; const unsigned int i2ia = DACECom.ie2[ia->ii]; const double ccia = ia->cc; // Note: all of these inner loops can safely be run in parallel //#pragma omp parallel for for(int noib = DACECom_t.nocut-DACECom.ieo[ia->ii]; noib >= 0; noib--) { for(extended_monomial *ib = ipbeg[noib]; ib < ipend[noib]; ib++) { const unsigned int ic = DACECom.ia1[i1ia+ib->i1] + DACECom.ia2[i2ia+ib->i2]; cc[ic] += ccia*ib->cc; } } } dacePack(cc, inc); } /*! Multiply two DA vectors component-wise, i.e. each monomial of ina with the corresponding monomial of inb \param[in] ina Pointer to the first DA object to operate on \param[in] inb Pointer to the first DA object to operate on \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina or inb. \sa daceEvalMonomials */ void daceMultiplyMonomials(const DACEDA *ina, const DACEDA *inb, DACEDA *inc) { monomial *ipoa; unsigned int ilma, illa; monomial *ipob; unsigned int ilmb, illb; monomial *ipoc; unsigned int ilmc, illc; daceVariableInformation(ina, &ipoa, &ilma, &illa); daceVariableInformation(inb, &ipob, &ilmb, &illb); daceVariableInformation(inc, &ipoc, &ilmc, &illc); monomial *ib = ipob, *ic = ipoc; monomial *const ibmax = ipob + ilmb, *const icmax = ipoc + ilmc; for (monomial *i = ipoa; i < ipoa + illa; i++) { while (ib->ii < i->ii && ib < ibmax) ib++; if (ib == ibmax) break; if (ib->ii == i->ii) { if (ic >= icmax) { daceSetError(__func__, DACE_ERROR, 21); break; } ic->cc = i->cc*ib->cc; ic->ii = i->ii; ic++; } } } /*! Perform division of two DA objects. \param[in] ina Pointer to the first DA object to operate on \param[in] inb Pointer to the first DA object to operate on \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina or inb. */ void daceDivide(const DACEDA *ina, const DACEDA *inb, DACEDA *inc) { DACEDA idadiv; daceAllocateDA(&idadiv, 0); daceMultiplicativeInverse(inb, &idadiv); daceMultiply(ina, &idadiv, inc); daceFreeDA(&idadiv); } /*! Square a DA object. \param[in] ina Pointer to the DA object to square \param[out] inb Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceSquare(const DACEDA *ina, DACEDA *inb) { daceMultiply(ina, ina, inb); } /*! Add constant to a DA object. \param[in] ina Pointer to the first DA object to operate on \param[in] ckon Constant value to add \param[out] inb Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inb can be the same as ina. */ void daceAddDouble(const DACEDA *ina, const double ckon, DACEDA *inb) { if(!daceIsSameObject(ina, inb)) daceCopy(ina, inb); daceSetCoefficient0(inb, 0, daceGetConstant(inb)+ckon); } /*! Subtract DA object from constant. \param[in] ina Pointer to the first DA object to operate on \param[in] ckon Constant value to subtract from \param[out] inb Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inb can be the same as ina. */ void daceDoubleSubtract(const DACEDA *ina, const double ckon, DACEDA *inb) { daceMultiplyDouble(ina, -1.0, inb); daceSetCoefficient0(inb, 0, daceGetConstant(inb)+ckon); } /*! Subtract constant from a DA object. \param[in] ina Pointer to the first DA object to operate on \param[in] ckon Constant value to subtract \param[out] inb Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inb can be the same as ina. */ void daceSubtractDouble(const DACEDA *ina, const double ckon, DACEDA *inb) { daceAddDouble(ina, -ckon, inb); } /*! Multiply constant and DA object. \param[in] ina Pointer to the first DA object to operate on \param[in] ckon Constant value to multiply by \param[out] inb Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inb can be the same as ina. */ void daceMultiplyDouble(const DACEDA *ina, const double ckon, DACEDA *inb) { monomial *ipoa; unsigned int ilma, illa; monomial *ipob; unsigned int ilmb, illb; daceVariableInformation(ina, &ipoa, &ilma, &illa); daceVariableInformation(inb, &ipob, &ilmb, &illb); monomial *ib = ipob; if(illa <= ilmb) { for(monomial *ia = ipoa; ia < ipoa+illa; ia++) { if(DACECom.ieo[ia->ii] > DACECom_t.nocut) continue; const double c = ia->cc*ckon; if(fabs(c) < DACECom_t.eps) continue; ib->cc = c; ib->ii = ia->ii; ib++; } } else { monomial *const ibmax = ipob+ilmb; for(monomial *ia = ipoa; ia < ipoa+illa; ia++) { if(DACECom.ieo[ia->ii] > DACECom_t.nocut) continue; const double c = ia->cc*ckon; if(fabs(c) < DACECom_t.eps) continue; if(ib >= ibmax) { daceSetError(__func__, DACE_ERROR, 21); break; } ib->cc = c; ib->ii = ia->ii; ib++; } } daceSetLength(inb, ib-ipob); } /*! Divide DA object by a constant. \param[in] ina Pointer to the first DA object to operate on \param[in] ckon Constant value to divide by \param[out] inb Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inb can be the same as ina. */ void daceDivideDouble(const DACEDA *ina, const double ckon, DACEDA *inb) { if(ckon == 0.0) { daceSetError(__func__, DACE_ERROR, 41); daceCreateConstant(inb, 0.0); return; } daceMultiplyDouble(ina, 1.0/ckon, inb); } /*! Divide constant by DA object. \param[in] ina Pointer to the first DA object to operate on \param[in] ckon Constant value to divide \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceDoubleDivide(const DACEDA *ina, const double ckon, DACEDA *inc) { daceMultiplicativeInverse(ina, inc); daceMultiplyDouble(inc, ckon, inc); } /*! Divide a DA vector by a single variable to some power, if possible. \param[in] ina Pointer to the DA object to operate on \param[in] var Number of the independent variable by which to divide \param[in] p Power of independent variable \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceDivideByVariable(const DACEDA *ina, const unsigned int var, const unsigned int p, DACEDA *inc) { monomial *ipoa; unsigned int ilma, illa; monomial *ipoc; unsigned int ilmc, illc; daceVariableInformation(ina, &ipoa, &ilma, &illa); daceVariableInformation(inc, &ipoc, &ilmc, &illc); if(var < 1 || var > DACECom.nvmax) { daceSetError(__func__, DACE_ERROR, 24); daceCreateConstant(inc, 0.0); return; } // treat a few special cases if(p == 0) { // dividing by 1 daceCopy(ina, inc); return; } else if(illa == 0) { // dividing 0 by anything daceCreateConstant(inc, 0.0); return; } else if(p > DACECom.nomax) { // dividing non-zero DA by too high a power daceSetError(__func__, DACE_ERROR, 42); daceCreateConstant(inc, 0.0); return; } const unsigned int ibase = DACECom.nomax+1; unsigned int j = var-1; if(var > DACECom.nv1) j = j-DACECom.nv1; const unsigned int idiv = npown(ibase, j); monomial *ic = ipoc; monomial *const icmax = ipoc+ilmc; if(var > DACECom.nv1) { for(monomial *i = ipoa; i < ipoa+illa; i++) { const unsigned int ic1 = DACECom.ie1[i->ii]; const unsigned int ic2 = DACECom.ie2[i->ii]; const unsigned int ipow = (ic2/idiv)%ibase; if(ipow < p) { daceSetError(__func__, DACE_ERROR, 42); daceCreateConstant(inc, 0.0); return; } if(ic >= icmax) { daceSetError(__func__, DACE_ERROR, 21); break; } ic->ii = DACECom.ia1[ic1] + DACECom.ia2[ic2-p*idiv]; ic->cc = i->cc; ic++; } } else { for(monomial *i = ipoa; i < ipoa+illa; i++) { const unsigned int ic1 = DACECom.ie1[i->ii]; const unsigned int ic2 = DACECom.ie2[i->ii]; const unsigned int ipow = (ic1/idiv)%ibase; if(ipow < p) { daceSetError(__func__, DACE_ERROR, 42); daceCreateConstant(inc, 0.0); return; } if(ic >= icmax) { daceSetError(__func__, DACE_ERROR, 21); break; } ic->ii = DACECom.ia1[ic1-p*idiv] + DACECom.ia2[ic2]; ic->cc = i->cc; ic++; } } daceSetLength(inc, ic-ipoc); } /*! Derivative of DA object with respect to a given independent variable. \param[in] idif Number of the independent variable with respect to which the derivative is taken \param[in] ina Pointer to the DA object to operate on \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceDifferentiate(const unsigned int idif, const DACEDA *ina, DACEDA *inc) { monomial *ipoa; unsigned int ilma, illa; monomial *ipoc; unsigned int ilmc, illc; daceVariableInformation(ina, &ipoa, &ilma, &illa); daceVariableInformation(inc, &ipoc, &ilmc, &illc); if(idif < 1 || idif > DACECom.nvmax) { daceSetError(__func__, DACE_ERROR, 24); daceCreateConstant(inc, 0.0); return; } const unsigned int ibase = DACECom.nomax+1; unsigned int j = idif-1; if(idif > DACECom.nv1) j = j-DACECom.nv1; const unsigned int idiv = npown(ibase, j); monomial *ic = ipoc; monomial *const icmax = ipoc+ilmc; if(idif > DACECom.nv1) { for(monomial *i = ipoa; i < ipoa+illa; i++) { const unsigned int ic1 = DACECom.ie1[i->ii]; const unsigned int ic2 = DACECom.ie2[i->ii]; const unsigned int ipow = (ic2/idiv)%ibase; if(ipow == 0 || DACECom.ieo[i->ii] > DACECom_t.nocut+1) continue; if(ic >= icmax) { daceSetError(__func__, DACE_ERROR, 21); break; } ic->ii = DACECom.ia1[ic1] + DACECom.ia2[ic2-idiv]; ic->cc = i->cc*ipow; ic++; } } else { for(monomial *i = ipoa; i < ipoa+illa; i++) { const unsigned int ic1 = DACECom.ie1[i->ii]; const unsigned int ic2 = DACECom.ie2[i->ii]; const unsigned int ipow = (ic1/idiv)%ibase; if(ipow == 0 || DACECom.ieo[i->ii] > DACECom_t.nocut+1) continue; if(ic >= icmax) { daceSetError(__func__, DACE_ERROR, 21); break; } ic->ii = DACECom.ia1[ic1-idiv] + DACECom.ia2[ic2]; ic->cc = i->cc*ipow; ic++; } } daceSetLength(inc, ic-ipoc); } /*! Integral of DA object with respect to a given independent variable. \param[in] iint Number of the independent variable with respect to which the integral is taken \param[in] ina Pointer to the DA object to operate on \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceIntegrate(const unsigned int iint, const DACEDA *ina, DACEDA *inc) { monomial *ipoa; unsigned int ilma, illa; monomial *ipoc; unsigned int ilmc, illc; daceVariableInformation(ina, &ipoa, &ilma, &illa); daceVariableInformation(inc, &ipoc, &ilmc, &illc); if(iint < 1 || iint > DACECom.nvmax) { daceSetError(__func__, DACE_ERROR, 24); daceCreateConstant(inc, 0.0); return; } const unsigned int ibase = DACECom.nomax+1; unsigned int j = iint-1; if(iint > DACECom.nv1) j = j-DACECom.nv1; const unsigned int idiv = npown(ibase, j); monomial *ic = ipoc; monomial *const icmax = ipoc+ilmc; if(iint > DACECom.nv1) { for(monomial *i = ipoa; i < ipoa+illa; i++) { if(DACECom.ieo[i->ii] >= DACECom_t.nocut) continue; const unsigned int ic1 = DACECom.ie1[i->ii]; const unsigned int ic2 = DACECom.ie2[i->ii]; const unsigned int ipow = (ic2/idiv)%ibase; const double ccc = i->cc/(ipow+1); if(fabs(ccc) < DACECom_t.eps) continue; if(ic >= icmax) { daceSetError(__func__, DACE_ERROR, 21); break; } ic->ii = DACECom.ia1[ic1] + DACECom.ia2[ic2+idiv]; ic->cc = ccc; ic = ic+1; } } else { for(monomial *i = ipoa; i < ipoa+illa; i++) { if(DACECom.ieo[i->ii] >= DACECom_t.nocut) continue; const unsigned int ic1 = DACECom.ie1[i->ii]; const unsigned int ic2 = DACECom.ie2[i->ii]; const unsigned int ipow = (ic1/idiv)%ibase; const double ccc = i->cc/(ipow+1); if(fabs(ccc) < DACECom_t.eps) continue; if(ic >= icmax) { daceSetError(__func__, DACE_ERROR, 21); break; } ic->ii = DACECom.ia1[ic1+idiv] + DACECom.ia2[ic2]; ic->cc = ccc; ic = ic+1; } } daceSetLength(inc, ic-ipoc); } /******************************************************************************** * DACE intrinsic function routines *********************************************************************************/ /*! Truncate the constant part of a DA object to an integer. \param[in] ina Pointer to the DA object to operate on \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceTruncate(const DACEDA *ina, DACEDA *inc) { daceCopy(ina, inc); daceSetCoefficient0(inc, 0, rint(daceGetConstant(inc))); } /*! Round the constant part of a DA object to an integer. \param[in] ina Pointer to the DA object to operate on \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceRound(const DACEDA *ina, DACEDA *inc) { daceCopy(ina, inc); daceSetCoefficient0(inc, 0, round(daceGetConstant(inc))); } /*! Modulo the constant part of a DA object by p. \param[in] ina Pointer to the DA object to operate on \param[in] p Value with respect to which to compute the modulo \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceModulo(const DACEDA *ina, const double p, DACEDA *inc) { daceCopy(ina, inc); daceSetCoefficient0(inc, 0, fmod(daceGetConstant(inc),p)); } /*! Raise a DA object to the p-th power. \param[in] ina Pointer to the DA object to operate on \param[in] p Power to which to raise the DA object \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void dacePowerDouble(const DACEDA *ina, const double p, DACEDA *inc) { // check simple cases if(p == 0.0) { daceCreateConstant(inc, 1.0); return; } else if(p == (int)p) { dacePower(ina, (int)p, inc); return; } const double a0 = daceGetConstant(ina); if(a0 <= 0.0) { daceSetError(__func__, DACE_ERROR, 43); daceCreateConstant(inc, 0.0); return; } #if DACE_MEMORY_MODEL == DACE_MEMORY_STATIC double xf[DACE_STATIC_NOMAX+1]; #else double *xf = (double*) dacecalloc(DACECom_t.nocut+1, sizeof(double)); #endif xf[0] = pow(a0, p); for(unsigned int i = 1; i < DACECom_t.nocut+1; i++) xf[i] = xf[i-1]/i*(p-(i-1)); daceDivideDouble(ina, a0, inc); // more accurate than including a0 in series (uses non-linear part in EvaluateSeries) daceEvaluateSeries(inc, xf, inc); #if DACE_MEMORY_MODEL != DACE_MEMORY_STATIC dacefree(xf); #endif } /*! Raise a DA object to the p-th integer power. \param[in] ina Pointer to the DA object to operate on \param[in] np Power to which to raise the DA object \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void dacePower(const DACEDA *ina, const int np, DACEDA *inc) { DACEDA itemp; // handle some common simple cases directly switch(np) { case 0: daceCreateConstant(inc, 1.0); return; case 1: daceCopy(ina, inc); return; case -1: daceMultiplicativeInverse(ina, inc); return; } // handle all other cases, again with common special cases hard coded switch(abs(np)) { case 2: daceSquare(ina, inc); break; case 3: daceAllocateDA(&itemp, 0); daceSquare(ina, &itemp); daceMultiply(ina, &itemp, inc); daceFreeDA(&itemp); break; case 4: daceAllocateDA(&itemp, 0); daceSquare(ina, &itemp); daceSquare(&itemp, inc); daceFreeDA(&itemp); break; default: daceAllocateDA(&itemp, 0); daceCopy(ina, &itemp); daceCreateConstant(inc, 1.0); unsigned int inp = abs(np); while(inp) { if(inp & 1u) daceMultiply(inc, &itemp, inc); inp >>= 1; if(inp) daceSquare(&itemp, &itemp); } daceFreeDA(&itemp); } if(np < 0) daceMultiplicativeInverse(inc, inc); } /*! Take the np-th root of a DA object. \param[in] ina Pointer to the DA object to operate on \param[in] np Root to take of the DA object \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceRoot(const DACEDA *ina, const int np, DACEDA *inc) { if(np == 0) { daceSetError(__func__, DACE_ERROR, 44); daceCreateConstant(inc, 0.0); return; } const double a0 = daceGetConstant(ina); const unsigned int iodd = abs(np) & 1u; if((iodd == 0) && (a0 <= 0.0)) { daceSetError(__func__, DACE_ERROR, 45); daceCreateConstant(inc, 0.0); return; } else if((iodd == 1) && (a0 == 0.0)) { daceSetError(__func__, DACE_ERROR, 46); daceCreateConstant(inc, 0.0); return; } double cr = 1.0/np; #if DACE_MEMORY_MODEL == DACE_MEMORY_STATIC double xf[DACE_STATIC_NOMAX+1]; #else double *xf = (double*) dacecalloc(DACECom_t.nocut+1, sizeof(double)); #endif xf[0] = copysign(pow(fabs(a0), cr), a0); for(unsigned int i = 1; i < DACECom_t.nocut+1; i++) { xf[i] = xf[i-1]/i*cr; cr--; } daceDivideDouble(ina, a0, inc); // more accurate than including a0 in series (uses non-linear part in EvaluateSeries) daceEvaluateSeries(inc, xf, inc); #if DACE_MEMORY_MODEL != DACE_MEMORY_STATIC dacefree(xf); #endif } /*! Compute the multiplicative inverse of a DA object. \param[in] ina Pointer to the DA object to operate on \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceMultiplicativeInverse(const DACEDA *ina, DACEDA *inc) { const double a0 = daceGetConstant(ina); if(a0 == 0.0) { daceSetError(__func__, DACE_ERROR, 41); daceCreateConstant(inc, 0.0); return; } if(DACECom_t.nocut < 5) { // lower orders: compute series directly daceMultiplicativeInverse0(ina, inc, a0); } else { // higher orders: use iteration const unsigned int nocut = DACECom_t.nocut; DACECom_t.nocut = 2; daceMultiplicativeInverse0(ina, inc, a0); DACEDA temp; daceAllocateDA(&temp, 0); for(unsigned int ord = 3; ord <= nocut; ord *= 2) { DACECom_t.nocut = umin(nocut, 2*ord-1); daceMultiply(ina, inc, &temp); daceDoubleSubtract(&temp, 2.0, &temp); daceMultiply(inc, &temp, inc); } daceFreeDA(&temp); } } /*! Compute the multiplicative inverse of a DA object using series expansion. \param[in] ina Pointer to the DA object to operate on \param[out] inc Pointer to the DA object to store the result in \param[in] a0 Constant part of ina \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceMultiplicativeInverse0(const DACEDA *ina, DACEDA *inc, const double a0) { daceDivideDouble(ina, a0, inc); #if DACE_MEMORY_MODEL == DACE_MEMORY_STATIC double xf[DACE_STATIC_NOMAX+1]; #else double *xf = (double*) dacecalloc(DACECom_t.nocut+1, sizeof(double)); #endif xf[0] = 1.0/a0; for(unsigned int i = 1; i < DACECom_t.nocut+1; i++) xf[i] = -xf[i-1]; daceEvaluateSeries(inc, xf, inc); #if DACE_MEMORY_MODEL != DACE_MEMORY_STATIC dacefree(xf); #endif } /*! Compute the square root of a DA object. \param[in] ina Pointer to the DA object to operate on \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceSquareRoot(const DACEDA *ina, DACEDA *inc) { daceRoot(ina, 2, inc); } /*! Compute the inverse square root of a DA object. \param[in] ina Pointer to the DA object to operate on \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceInverseSquareRoot(const DACEDA *ina, DACEDA *inc) { daceRoot(ina, -2, inc); } /*! Compute the cubic root of a DA object. \param[in] ina Pointer to the DA object to operate on \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceCubicRoot(const DACEDA *ina, DACEDA *inc) { daceRoot(ina, 3, inc); } /*! Compute the inverse cubic root of a DA object. \param[in] ina Pointer to the DA object to operate on \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceInverseCubicRoot(const DACEDA *ina, DACEDA *inc) { daceRoot(ina, -3, inc); } /*! Compute the hypothenuse of two DA objects. \param[in] ina Pointer to the first DA object to operate on \param[in] inb Pointer to the second DA object to operate on \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina or inb. */ void daceHypotenuse(const DACEDA *ina, const DACEDA *inb, DACEDA *inc) { DACEDA itemp1, itemp2; daceAllocateDA(&itemp1, 0); daceAllocateDA(&itemp2, 0); daceSquare(ina, &itemp1); daceSquare(inb, &itemp2); daceAdd(&itemp1, &itemp2, inc); daceRoot(inc, 2, inc); daceFreeDA(&itemp2); daceFreeDA(&itemp1); } /*! Compute the exponential of a DA object. \param[in] ina Pointer to the DA object to operate on \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceExponential(const DACEDA *ina, DACEDA *inc) { #if DACE_MEMORY_MODEL == DACE_MEMORY_STATIC double xf[DACE_STATIC_NOMAX+1]; #else double* xf = (double*) dacecalloc(DACECom_t.nocut+1, sizeof(double)); #endif xf[0] = exp(daceGetConstant(ina)); for(unsigned int i = 1; i < DACECom_t.nocut+1; i++) xf[i] = xf[i-1]/i; daceEvaluateSeries(ina, xf, inc); #if DACE_MEMORY_MODEL != DACE_MEMORY_STATIC dacefree(xf); #endif } /*! Compute the natural logarithm root of a DA object. \param[in] ina Pointer to the DA object to operate on \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceLogarithm(const DACEDA *ina, DACEDA *inc) { const double a0 = daceGetConstant(ina); if(a0 <= 0) { daceSetError(__func__, DACE_ERROR, 47); daceCreateConstant(inc, 0.0); return; } #if DACE_MEMORY_MODEL == DACE_MEMORY_STATIC double xf[DACE_STATIC_NOMAX+1]; #else double* xf = (double*) dacecalloc(DACECom_t.nocut+1, sizeof(double)); #endif daceDivideDouble(ina, a0, inc); xf[0] = log(a0); xf[1] = 1.0; for(unsigned int i = 2; i < DACECom_t.nocut+1; i++) { xf[i] = -xf[i-1]/i*(i-1); } daceEvaluateSeries(inc, xf, inc); #if DACE_MEMORY_MODEL != DACE_MEMORY_STATIC dacefree(xf); #endif } /*! Compute the logarithm with respect to base b of a DA object. \param[in] ina Pointer to the DA object to operate on \param[in] b Base of the logarithm to use \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceLogarithmBase(const DACEDA *ina, const double b, DACEDA *inc) { if(b <= 0) { daceSetError(__func__, DACE_ERROR, 48); daceCreateConstant(inc, 0.0); return; } daceLogarithm(ina, inc); daceMultiplyDouble(inc, 1.0/log(b), inc); } /*! Compute the decadic logarithm of a DA object. \param[in] ina Pointer to the DA object to operate on \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceLogarithm10(const DACEDA *ina, DACEDA *inc) { daceLogarithmBase(ina, 10.0, inc); } /*! Compute the binary logarithm of a DA object. \param[in] ina Pointer to the DA object to operate on \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceLogarithm2(const DACEDA *ina, DACEDA *inc) { daceLogarithmBase(ina, 2.0, inc); } /*! Compute the sine of a DA object. \param[in] ina Pointer to the DA object to operate on \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceSine(const DACEDA *ina, DACEDA *inc) { #if DACE_MEMORY_MODEL == DACE_MEMORY_STATIC double xf[DACE_STATIC_NOMAX+1]; #else double* xf = (double*) dacecalloc(DACECom_t.nocut+1, sizeof(double)); #endif const double a0 = daceGetConstant(ina); xf[0] = sin(a0); xf[1] = cos(a0); for(unsigned int i = 2; i < DACECom_t.nocut+1; i++) { xf[i] = -xf[i-2]/(i*(i-1)); } daceEvaluateSeries(ina, xf, inc); #if DACE_MEMORY_MODEL != DACE_MEMORY_STATIC dacefree(xf); #endif } /*! Compute the cosine of a DA object. \param[in] ina Pointer to the DA object to operate on \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceCosine(const DACEDA *ina, DACEDA *inc) { #if DACE_MEMORY_MODEL == DACE_MEMORY_STATIC double xf[DACE_STATIC_NOMAX+1]; #else double* xf = (double*) dacecalloc(DACECom_t.nocut+1, sizeof(double)); #endif const double a0 = daceGetConstant(ina); xf[0] = cos(a0); xf[1] = -sin(a0); for(unsigned int i = 2; i < DACECom_t.nocut+1; i++) { xf[i] = -xf[i-2]/(i*(i-1)); } daceEvaluateSeries(ina, xf, inc); #if DACE_MEMORY_MODEL != DACE_MEMORY_STATIC dacefree(xf); #endif } /*! Compute the tangent of a DA object. \param[in] ina Pointer to the DA object to operate on \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceTangent(const DACEDA *ina, DACEDA *inc) { DACEDA itemp; if(cos(daceGetConstant(ina)) == 0.0) { daceSetError(__func__, DACE_ERROR, 49); daceCreateConstant(inc, 0.0); return; } daceAllocateDA(&itemp, 0); daceSine(ina, &itemp); daceCosine(ina, inc); daceDivide(&itemp, inc, inc); daceFreeDA(&itemp); } /*! Compute the arcsine of a DA object. \param[in] ina Pointer to the DA object to operate on \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceArcSine(const DACEDA *ina, DACEDA *inc) { DACEDA itemp; if(fabs(daceGetConstant(ina)) >= 1.0) { daceSetError(__func__, DACE_ERROR, 50); daceCreateConstant(inc, 0.0); return; } daceAllocateDA(&itemp, 0); daceSquare(ina, &itemp); daceDoubleSubtract(&itemp, 1.0, &itemp); daceSquareRoot(&itemp, &itemp); daceDivide(ina, &itemp, inc); daceArcTangent(inc, inc); daceFreeDA(&itemp); } /*! Compute the arccosine of a DA object. \param[in] ina Pointer to the DA object to operate on \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceArcCosine(const DACEDA *ina, DACEDA *inc) { if(fabs(daceGetConstant(ina)) >= 1.0) { daceSetError(__func__, DACE_ERROR, 50); daceCreateConstant(inc, 0.0); return; } daceArcSine(ina, inc); daceDoubleSubtract(inc, M_PI_2, inc); } /*! Compute the arctangent of a DA object. \param[in] ina Pointer to the DA object to operate on \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceArcTangent(const DACEDA *ina, DACEDA *inc) { DACEDA iarg; #if DACE_MEMORY_MODEL == DACE_MEMORY_STATIC double xf[DACE_STATIC_NOMAX+1] = {0}; #else double* xf = (double*) dacecalloc(DACECom_t.nocut+1, sizeof(double)); #endif const double a0 = daceGetConstant(ina); daceAllocateDA(&iarg, 0); daceMultiplyDouble(ina, a0, &iarg); daceAddDouble(&iarg, 1.0, &iarg); daceSubtractDouble(ina, a0, inc); daceDivide(inc, &iarg, &iarg); double s = 1.0; xf[0] = atan(a0); for(unsigned int i = 1; i < DACECom_t.nocut+1; i+=2) { xf[i] = s/i; s = -s; } daceEvaluateSeries(&iarg, xf, inc); daceFreeDA(&iarg); #if DACE_MEMORY_MODEL != DACE_MEMORY_STATIC dacefree(xf); #endif } /*! Arctangent of ina/inb with proper sign in [-pi, pi]. This function follows the C standard atan2(y,x) function syntax. \param[in] ina Pointer to the first DA object to operate on \param[in] inb Pointer to the second DA object to operate on \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceArcTangent2(const DACEDA *ina, const DACEDA *inb, DACEDA *inc) { const double cx = daceGetConstant(inb); const double cy = daceGetConstant(ina); if(cx == 0.0 && cy == 0.0) { daceCreateConstant(inc, 0.0); } else { if(fabs(cy) > fabs(cx)) { daceDivide(inb, ina, inc); daceArcTangent(inc, inc); if(cy < 0.0) { daceDoubleSubtract(inc, -M_PI_2, inc); } else { daceDoubleSubtract(inc, M_PI_2, inc); } } else { daceDivide(ina, inb, inc); daceArcTangent(inc, inc); if(cx < 0.0) { if(cy > 0.0) { daceAddDouble(inc, M_PI, inc); } else { daceAddDouble(inc, -M_PI, inc); } } } } } /*! Compute the hyperbolic sine of a DA object. \param[in] ina Pointer to the DA object to operate on \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceHyperbolicSine(const DACEDA *ina, DACEDA *inc) { #if DACE_MEMORY_MODEL == DACE_MEMORY_STATIC double xf[DACE_STATIC_NOMAX+1]; #else double* xf = (double*) dacecalloc(DACECom_t.nocut+1, sizeof(double)); #endif const double a0 = daceGetConstant(ina); xf[0] = sinh(a0); xf[1] = cosh(a0); for(unsigned int i = 2; i < DACECom_t.nocut+1; i++) { xf[i] = xf[i-2]/(i*(i-1)); } daceEvaluateSeries(ina, xf, inc); #if DACE_MEMORY_MODEL != DACE_MEMORY_STATIC dacefree(xf); #endif } /*! Compute the hyperbolic cosine of a DA object. \param[in] ina Pointer to the DA object to operate on \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceHyperbolicCosine(const DACEDA *ina, DACEDA *inc) { #if DACE_MEMORY_MODEL == DACE_MEMORY_STATIC double xf[DACE_STATIC_NOMAX+1]; #else double* xf = (double*) dacecalloc(DACECom_t.nocut+1, sizeof(double)); #endif const double a0 = daceGetConstant(ina); xf[0] = cosh(a0); xf[1] = sinh(a0); for(unsigned int i = 2; i < DACECom_t.nocut+1; i++) { xf[i] = xf[i-2]/(i*(i-1)); } daceEvaluateSeries(ina, xf, inc); #if DACE_MEMORY_MODEL != DACE_MEMORY_STATIC dacefree(xf); #endif } /*! Compute the hyperbolic tangent of a DA object. \param[in] ina Pointer to the DA object to operate on \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceHyperbolicTangent(const DACEDA *ina, DACEDA *inc) { DACEDA itemp; daceAllocateDA(&itemp, 0); daceHyperbolicSine(ina, &itemp); daceHyperbolicCosine(ina, inc); daceDivide(&itemp, inc, inc); daceFreeDA(&itemp); } /*! Compute the hyperbolic arcsince of a DA object. \param[in] ina Pointer to the DA object to operate on \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceHyperbolicArcSine(const DACEDA *ina, DACEDA *inc) { DACEDA itemp; daceAllocateDA(&itemp, 0); daceSquare(ina, inc); daceAddDouble(inc, 1.0, &itemp); daceSquareRoot(&itemp, inc); daceAdd(ina, inc, &itemp); daceLogarithm(&itemp, inc); daceFreeDA(&itemp); } /*! Compute the hyperbolic arccosine of a DA object. \param[in] ina Pointer to the DA object to operate on \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceHyperbolicArcCosine(const DACEDA *ina, DACEDA *inc) { DACEDA itemp; if(daceGetConstant(ina) <= 1.0) { daceSetError(__func__, DACE_ERROR, 50); daceCreateConstant(inc, 0.0); return; } daceAllocateDA(&itemp, 0); daceSquare(ina, inc); daceSubtractDouble(inc, 1.0, &itemp); daceSquareRoot(&itemp, inc); daceAdd(ina, inc, &itemp); daceLogarithm(&itemp, inc); daceFreeDA(&itemp); } /*! Compute the hyperbolic arctangent of a DA object. \param[in] ina Pointer to the DA object to operate on \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceHyperbolicArcTangent(const DACEDA *ina, DACEDA *inc) { DACEDA itemp; if(fabs(daceGetConstant(ina)) >= 1.0) { daceSetError(__func__, DACE_ERROR, 50); daceCreateConstant(inc, 0.0); return; } daceAllocateDA(&itemp, 0); daceAddDouble(ina, 1.0, &itemp); daceDoubleSubtract(ina, 1.0, inc); daceDivide(&itemp, inc, inc); daceLogarithm(inc, &itemp); daceMultiplyDouble(&itemp, 0.5, inc); daceFreeDA(&itemp); } /*! Compute the error function of a DA object. \param[in] ina Pointer to the DA object to operate on \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceErrorFunction(const DACEDA *ina, DACEDA *inc) { #if DACE_MEMORY_MODEL == DACE_MEMORY_STATIC double xf[DACE_STATIC_NOMAX+1]; #else double* xf = (double*) dacecalloc(DACECom_t.nocut+1, sizeof(double)); #endif const double a0 = daceGetConstant(ina); double factor = 2.0*exp(-a0*a0)/sqrt(M_PI); xf[0] = erf(a0); xf[1] = factor; double Hi2 = 1.0; // Hermite polynomial H_{i-2} = H_0 double Hi1 = 2.0*a0; // Hermite polynomial H_{i-1} = H_1 for(unsigned int i = 2; i < DACECom_t.nocut+1; i++) { factor /= -((double)i); xf[i] = factor*Hi1; const double temp = 2.0*a0*Hi1 - 2.0*(i-1)*Hi2; // recursion relation: H_i = 2*x*H_{i-1} - 2*(i-1)*H_{i-2} Hi2 = Hi1; Hi1 = temp; } daceEvaluateSeries(ina, xf, inc); #if DACE_MEMORY_MODEL != DACE_MEMORY_STATIC dacefree(xf); #endif } /*! Compute the complementary error function of a DA object. \param[in] ina Pointer to the DA object to operate on \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceComplementaryErrorFunction(const DACEDA *ina, DACEDA *inc) { #if DACE_MEMORY_MODEL == DACE_MEMORY_STATIC double xf[DACE_STATIC_NOMAX+1]; #else double* xf = (double*) dacecalloc(DACECom_t.nocut+1, sizeof(double)); #endif const double a0 = daceGetConstant(ina); double factor = -2.0*exp(-a0*a0)/sqrt(M_PI); xf[0] = erfc(a0); xf[1] = factor; double Hi2 = 1.0; // Hermite polynomial H_{i-2} = H_0 double Hi1 = 2.0*a0; // Hermite polynomial H_{i-1} = H_1 for(unsigned int i = 2; i < DACECom_t.nocut+1; i++) { factor /= -((double)i); xf[i] = factor*Hi1; const double temp = 2.0*a0*Hi1 - 2.0*(i-1)*Hi2; // recursion relation: H_i = 2*x*H_{i-1} - 2*(i-1)*H_{i-2} Hi2 = Hi1; Hi1 = temp; } daceEvaluateSeries(ina, xf, inc); #if DACE_MEMORY_MODEL != DACE_MEMORY_STATIC dacefree(xf); #endif } /// @cond // Wrappers for contributed netlib Bessel functions (not for public use) /*! Compute value of Bessel functions J_n, Y_n for n in [n0, n1]. \param[in] x function argument (non-negative) \param[in] n0 Lowest order of the Bessel functions to calculate (n0 <= n1) \param[in] n1 Highest order of the Bessel functions to calculate (n0 <= n1) \param[in] type Type of function to evaluate: -1: Bessel J function 1: Bessel Y function \param[out] bz Array of size n1-n0+1 containing the values of B_{n0}, B_{n0+1}, ..., B_{n1} \return Returns 0 if all values are calculated accurately, -1 if x is too large to calculate the result or another error occured, or +1 if some of the results are of reduced accuracy. */ int BesselWrapper(const double x, const int n0, const int n1, const int type, double *bz) { long int nb = (abs(n0) > abs(n1) ? abs(n0) : abs(n1))+1, ncalc; double xx = x, alpha = 0.0; #if DACE_MEMORY_MODEL == DACE_MEMORY_STATIC #define DACE_STATIC_MAX_BESSEL_ORDER 100 if( DACE_STATIC_MAX_BESSEL_ORDER < nb ) return -1; double b[DACE_STATIC_MAX_BESSEL_ORDER]; #else double* b = (double*) dacecalloc(nb, sizeof(double)); #endif if(type < 0) rjbesl_(&xx, &alpha, &nb, b, &ncalc); else rybesl_(&xx, &alpha, &nb, b, &ncalc); // discombobulate results if(ncalc >= 0) { ncalc = (ncalc == nb ? 0 : 1); double s = (n0%2 == 0 ? 1.0 : -1.0); for(int i = n0; i <= n1; i++) { if(i >= 0) *(bz++) = b[i]; else { *(bz++) = s*b[-i]; // for integer orders considered here, (-1)^n J_n = J_{-n}, and (-1)^n Y_n = Y_{-n} s *= -1.0; } } } #if DACE_MEMORY_MODEL != DACE_MEMORY_STATIC dacefree(b); #endif return ncalc < 0 ? -1 : ncalc; } /*! Compute value of modified Bessel functions I_n, K_n for n in [n0, n1]. \param[in] x function argument (non-negative) \param[in] n0 Lowest order of the Bessel functions to calculate (n0 <= n1) \param[in] n1 Highest order of the Bessel functions to calculate (n0 <= n1) \param[in] type Type of function to evaluate: -2: Bessel I function, scaled (i.e. exp(-x)*I_n(x)) -1: Bessel I function 1: Bessel K function 2: Bessel K function, scaled (i.e. exp(x)*K_n(x)) \param[out] bz Array of size n1-n0+1 containing the values of B_{n0}, B_{n0+1}, ..., B_{n1} \return Returns 0 if all values are calculated accurately, -1 if x is too large to calculate the result or another error occured, or +1 if some of the results are of reduced accuracy. */ int ModifiedBesselWrapper(const double x, const int n0, const int n1, const int type, double *bz) { long int nb = (abs(n0) > abs(n1) ? abs(n0) : abs(n1))+1, ize = abs(type), ncalc; double xx = x, alpha = 0.0; #if DACE_MEMORY_MODEL == DACE_MEMORY_STATIC #define DACE_STATIC_MAX_BESSEL_ORDER 100 if( DACE_STATIC_MAX_BESSEL_ORDER < nb ) return -1; double b[DACE_STATIC_MAX_BESSEL_ORDER]; #else double* b = (double*) dacecalloc(nb, sizeof(double)); #endif if(type < 0) ribesl_(&xx, &alpha, &nb, &ize, b, &ncalc); else rkbesl_(&xx, &alpha, &nb, &ize, b, &ncalc); // discombobulate results if(ncalc >= 0) { ncalc = (ncalc == nb ? 0 : 1); for(int i = n0; i <= n1; i++) *(bz++) = b[abs(i)]; // for integer orders considered here, I_n = I_{-n}, and for all orders K_n = K_{-n} } #if DACE_MEMORY_MODEL != DACE_MEMORY_STATIC dacefree(b); #endif return ncalc < 0 ? -1 : ncalc; } /// @endcond /*! Compute the modified Bessel function I_n of a DA object. \param[in] ina Pointer to the DA object to operate on (constant part >= 0) \param[in] n Order of the Bessel function \param[in] scaled If true, the scaled Bessel function is computed (i.e. exp(-x)*I_n(x)) \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceBesselIFunction(const DACEDA *ina, const int n, const bool scaled, DACEDA *inc) { const double a0 = daceGetConstant(ina); if(a0 <= 0.0) { daceSetError(__func__, DACE_ERROR, 50); daceCreateConstant(inc, 0.0); return; } #if DACE_MEMORY_MODEL == DACE_MEMORY_STATIC double bz[2*DACE_STATIC_NOMAX+1]; #else double* bz = (double*) dacecalloc(2*DACECom_t.nocut+1, sizeof(double)); #endif const int res = ModifiedBesselWrapper(a0, n-DACECom_t.nocut, n+DACECom_t.nocut, scaled ? -2 : -1, bz); if(res >= 0) { if(scaled) daceEvaluateScaledModifiedBesselFunction(ina, bz, 1.0, inc); else daceEvaluateBesselFunction(ina, bz, 1.0, 1.0, inc); } else { daceSetError(__func__, DACE_ERROR, 50); daceCreateConstant(inc, 0.0); } #if DACE_MEMORY_MODEL != DACE_MEMORY_STATIC dacefree(bz); #endif } /*! Compute the modified Bessel function K_n of a DA object. \param[in] ina Pointer to the DA object to operate on (constant part >= 0) \param[in] n Order of the Bessel function \param[in] scaled If true, the scaled Bessel function is computed (i.e. exp(x)*K_n(x)) \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceBesselKFunction(const DACEDA *ina, const int n, const bool scaled, DACEDA *inc) { const double a0 = daceGetConstant(ina); if(a0 <= 0.0) { daceSetError(__func__, DACE_ERROR, 50); daceCreateConstant(inc, 0.0); return; } #if DACE_MEMORY_MODEL == DACE_MEMORY_STATIC double bz[2*DACE_STATIC_NOMAX+1]; #else double* bz = (double*) dacecalloc(2*DACECom_t.nocut+1, sizeof(double)); #endif const int res = ModifiedBesselWrapper(a0, n-DACECom_t.nocut, n+DACECom_t.nocut, scaled ? 2 : 1, bz); if(res >= 0) { if(scaled) daceEvaluateScaledModifiedBesselFunction(ina, bz, -1.0, inc); else daceEvaluateBesselFunction(ina, bz, 1.0, -1.0, inc); } else { daceSetError(__func__, DACE_ERROR, 50); daceCreateConstant(inc, 0.0); } #if DACE_MEMORY_MODEL != DACE_MEMORY_STATIC dacefree(bz); #endif } /*! Compute the Bessel function J_n of a DA object. \param[in] ina Pointer to the DA object to operate on (constant part >= 0) \param[in] n Order of the Bessel function \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceBesselJFunction(const DACEDA *ina, const int n, DACEDA *inc) { const double a0 = daceGetConstant(ina); if(a0 <= 0.0) { daceSetError(__func__, DACE_ERROR, 50); daceCreateConstant(inc, 0.0); return; } #if DACE_MEMORY_MODEL == DACE_MEMORY_STATIC double bz[2*DACE_STATIC_NOMAX+1]; #else double* bz = (double*) dacecalloc(2*DACECom_t.nocut+1, sizeof(double)); #endif const int res = BesselWrapper(a0, n-DACECom_t.nocut, n+DACECom_t.nocut, -1, bz); if(res >= 0) daceEvaluateBesselFunction(ina, bz, -1.0, 1.0, inc); else { daceSetError(__func__, DACE_ERROR, 50); daceCreateConstant(inc, 0.0); } #if DACE_MEMORY_MODEL != DACE_MEMORY_STATIC dacefree(bz); #endif } /*! Compute the Bessel function Y_n of a DA object. \param[in] ina Pointer to the DA object to operate on (constant part >= 0) \param[in] n Order of the Bessel function \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceBesselYFunction(const DACEDA *ina, const int n, DACEDA *inc) { const double a0 = daceGetConstant(ina); if(a0 <= 0.0) { daceSetError(__func__, DACE_ERROR, 50); daceCreateConstant(inc, 0.0); return; } #if DACE_MEMORY_MODEL == DACE_MEMORY_STATIC double bz[2*DACE_STATIC_NOMAX+1]; #else double* bz = (double*) dacecalloc(2*DACECom_t.nocut+1, sizeof(double)); #endif const int res = BesselWrapper(a0, n-DACECom_t.nocut, n+DACECom_t.nocut, 1, bz); if(res >= 0) daceEvaluateBesselFunction(ina, bz, -1.0, 1.0, inc); else { daceSetError(__func__, DACE_ERROR, 50); daceCreateConstant(inc, 0.0); } #if DACE_MEMORY_MODEL != DACE_MEMORY_STATIC dacefree(bz); #endif } /*! Evaluate a Bessel function with coefficients bz with the non-constant part of ina. \param[in] ina Pointer to the DA object to operate on \param[in] bz C array of 2*nocut+1 elements containing Bessel functions of orders n-nocut, ..., n+nocut \param[in] type Either -1.0 for normal Bessel functions, or +1.0 for modified Bessel functions. \param[in] ktype Either -1.0 for modified Bessel K function, or +1.0 for all other Bessel functions. \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceEvaluateBesselFunction(const DACEDA *ina, const double bz[], const double type, const double ktype, DACEDA *inc) { #if DACE_MEMORY_MODEL == DACE_MEMORY_STATIC double xf[DACE_STATIC_NOMAX+1]; double binomial[DACE_STATIC_NOMAX+1]; #else double* xf = (double*) dacecalloc(DACECom_t.nocut+1, sizeof(double)); double* binomial = (double*) dacecalloc(DACECom_t.nocut+1, sizeof(double)); #endif xf[0] = bz[DACECom_t.nocut]; binomial[0] = 1.0; double factor = 1.0; for(unsigned int i = 1; i < DACECom_t.nocut+1; i++) { factor *= ktype*0.5/i; // calculate binomial coefficients i choose j based on previously calculated i-1 choose j. binomial[i] = 1.0; for(unsigned int j = i-1; j > 0; j--) binomial[j] += binomial[j-1]; // Calculate n-th derivative of Bessel function C, see http://dlmf.nist.gov/10.6 // bz contains values of C_{n-o} to C_{n+o} of constant part of ina double sign = 1.0, c = 0.0; xf[i] = 0.0; for(unsigned int j = 0; j <= i; j++) { // use Kahan summation, since signs oscillate and magnitudes can also vary greatly const double y = binomial[j]*sign*bz[DACECom_t.nocut-i+2*j] - c; const double t = xf[i] + y; c = (t - xf[i]) - y; xf[i] = t; // in infinite precision the above is equivalent to: // xf[i] += binomial[j]*sign*bz[DACECom_t.nocut-i+2*j]; sign *= type; } xf[i] *= factor; } daceEvaluateSeries(ina, xf, inc); #if DACE_MEMORY_MODEL != DACE_MEMORY_STATIC dacefree(binomial); dacefree(xf); #endif } /*! Evaluate a scaled modified Bessel function with coefficients bz with the non-constant part of ina. \param[in] ina Pointer to the DA object to operate on \param[in] bz C array of 2*nocut+1 elements containing modified Bessel functions of orders n-nocut, ..., n+nocut \param[in] ktype Either -1.0 for scaled Bessel K function, or +1.0 for scaled Bessel I function \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceEvaluateScaledModifiedBesselFunction(const DACEDA *ina, const double bz[], const double ktype, DACEDA *inc) { #if DACE_MEMORY_MODEL == DACE_MEMORY_STATIC double xf[DACE_STATIC_NOMAX+1]; double binomial[2*DACE_STATIC_NOMAX+1]; #else double* xf = (double*) dacecalloc(DACECom_t.nocut+1, sizeof(double)); double* binomial = (double*) dacecalloc(2*DACECom_t.nocut+1, sizeof(double)); #endif xf[0] = bz[DACECom_t.nocut]; binomial[0] = 1.0; double factor = 1.0; for(unsigned int i = 1; i < DACECom_t.nocut+1; i++) { factor *= ktype*0.5/i; // calculate binomial coefficients 2*i-1 choose j based on previously calculated 2*i-2 choose j. binomial[2*i-1] = 1.0; for(unsigned int j = 2*i-2; j > 0; j--) binomial[j] += binomial[j-1]; // calculate binomial coefficients 2*i choose j based on previously calculated 2*i-1 choose j. binomial[2*i] = 1.0; for(unsigned int j = 2*i-1; j > 0; j--) binomial[j] += binomial[j-1]; // Calculate n-th derivative of Bessel function C // bz contains values of C_{n-o} to C_{n+o} of constant part of ina double sign = 1.0, c = 0.0; xf[i] = 0.0; for(unsigned int j = 0; j <= 2*i; j++) { // use Kahan summation, since signs oscillate and magnitudes can also vary greatly const double y = binomial[j]*sign*bz[DACECom_t.nocut-i+j] - c; const double t = xf[i] + y; c = (t - xf[i]) - y; xf[i] = t; // in infinite precision the above is equivalent to: // xf[i] += binomial[j]*sign*bz[DACECom_t.nocut-i+j]; sign *= -1.0; } xf[i] *= factor; } daceEvaluateSeries(ina, xf, inc); #if DACE_MEMORY_MODEL != DACE_MEMORY_STATIC dacefree(binomial); dacefree(xf); #endif } /*! Compute the partial Logarithmic Gamma function of a DA object (without constant part). \param[in] ina Pointer to the DA object to operate on \param[in] a0 Constant part \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. \note No argument checking is performed to ensure values are within allowable range. */ void daceLogGammaFunction0(const DACEDA *ina, const double a0, DACEDA *inc) { #if DACE_MEMORY_MODEL == DACE_MEMORY_STATIC double xf[DACE_STATIC_NOMAX+1]; #else double* xf = (double*) dacecalloc(DACECom_t.nocut+1, sizeof(double)); #endif xf[0] = 0.0; xf[1] = psi_(&a0); double s = 1.0; for(unsigned int i = 2; i < DACECom_t.nocut+1; i++) { xf[i] = (s/i)*zeta_(i, a0, NULL); s *= -1.0; } daceEvaluateSeries(ina, xf, inc); #if DACE_MEMORY_MODEL != DACE_MEMORY_STATIC dacefree(xf); #endif } /*! Compute the Logarithmic Gamma function of a DA object. \param[in] ina Pointer to the DA object to operate on (constant part != 0, -1, -2, ...) \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceLogGammaFunction(const DACEDA *ina, DACEDA *inc) { const double a0 = daceGetConstant(ina); if(a0 <= 0.0 && trunc(a0) == a0) { daceSetError(__func__, DACE_ERROR, 50); daceCreateConstant(inc, 0.0); return; } daceLogGammaFunction0(ina, a0, inc); daceSetCoefficient0(inc, 0, log(dgamma_(&a0))); } /*! Compute the Gamma function of a DA object. \param[in] ina Pointer to the DA object to operate on (constant part != 0, -1, -2, ...) \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceGammaFunction(const DACEDA *ina, DACEDA *inc) { const double a0 = daceGetConstant(ina); if(a0 <= 0.0 && trunc(a0) == a0) { daceSetError(__func__, DACE_ERROR, 50); daceCreateConstant(inc, 0.0); return; } daceLogGammaFunction0(ina, a0, inc); daceExponential(inc, inc); daceMultiplyDouble(inc, dgamma_(&a0), inc); } /*! Compute the n-th Psi function (i.e. the n+1 derivative of the logarithmic gamma function) of a DA object. \param[in] ina Pointer to the DA object to operate on (constant part != 0, -1, -2, ...) \param[in] n Order of the Psi function (n >= 0) \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void dacePsiFunction(const DACEDA *ina, const unsigned int n, DACEDA *inc) { const double a0 = daceGetConstant(ina); if(a0 <= 0.0 && trunc(a0) == a0) { daceSetError(__func__, DACE_ERROR, 50); daceCreateConstant(inc, 0.0); return; } #if DACE_MEMORY_MODEL == DACE_MEMORY_STATIC double xf[DACE_STATIC_NOMAX+1]; #else double* xf = (double*) dacecalloc(DACECom_t.nocut+1, sizeof(double)); #endif if(n == 0) { xf[0] = psi_(&a0); double s = 1.0; for(unsigned int i = 1; i < DACECom_t.nocut+1; i++) { xf[i] = s*zeta_(i+1, a0, NULL); s *= -1.0; } } else { double fac = (n%2 ? 1.0 : -1.0); for(unsigned int i = 2; i <= n; i++) fac *= i; for(unsigned int i = 0; i < DACECom_t.nocut+1; i++) { xf[i] = fac*zeta_(n+i+1, a0, NULL); fac = -(fac/(i+1))*(n+i+1); } } daceEvaluateSeries(ina, xf, inc); #if DACE_MEMORY_MODEL != DACE_MEMORY_STATIC dacefree(xf); #endif } /*! Evaluate a polynomial with coefficients xf with the non-constant part of ina. \param[in] ina Pointer to the DA object to operate on \param[in] xf C array of nocut+1 elements containing the coefficients of the polynomial \param[out] inc Pointer to the DA object to store the result in \note This routine is aliasing safe, i.e. inc can be the same as ina. */ void daceEvaluateSeries(const DACEDA *ina, const double xf[], DACEDA *inc) { DACEDA inon; const unsigned int nocut = DACECom_t.nocut; daceAllocateDA(&inon, 0); daceCopy(ina, &inon); daceSetCoefficient0(&inon, 0, 0.0); DACECom_t.nocut = 1; daceMultiplyDouble(&inon, xf[nocut], inc); daceAddDouble(inc, xf[nocut-1], inc); // evaluate series for(int i = nocut-2; i >= 0; i--) { DACECom_t.nocut = nocut-i; daceMultiply(&inon, inc, inc); daceAddDouble(inc, xf[i], inc); } DACECom_t.nocut = nocut; daceFreeDA(&inon); } /*! Compute the weighted sum of two DA objects. \param[in] ina Pointer to the first DA object to operate on \param[in] afac Weighting factor to multiply ina by \param[in] inb Pointer to the second DA object to operate on \param[in] bfac Weighting factor to multiply inb by \param[out] inc Pointer to the DA object to store the result in \note This routine is NOT aliasing safe! So inc MUST BE DIFFERENT from ina and inb. */ void daceWeightedSum(const DACEDA *ina, const double afac, const DACEDA *inb, const double bfac, DACEDA *inc) { monomial *ipoa; unsigned int ilma, illa; monomial *ipob; unsigned int ilmb, illb; monomial *ipoc; unsigned int ilmc, illc; daceVariableInformation(ina, &ipoa, &ilma, &illa); daceVariableInformation(inb, &ipob, &ilmb, &illb); daceVariableInformation(inc, &ipoc, &ilmc, &illc); monomial *ia = ipoa, *ib = ipob, *ic = ipoc; monomial *const iamax = ipoa+illa, *const ibmax = ipob+illb, *const icmax = ipoc+ilmc; if(illa > 0 && illb > 0) { // both polynomials have coefficients, merge until one runs out unsigned int ja = ia->ii; unsigned int jb = ib->ii; while(true) { if(ja == jb) { // add the two terms if(DACECom.ieo[ja] <= DACECom_t.nocut) { const double ccc = ia->cc*afac + ib->cc*bfac; if(fabs(ccc) >= DACECom_t.eps) { if(ic >= icmax) { daceSetError(__func__, DACE_ERROR, 21); daceSetLength(inc, ilmc); return; } ic->cc = ccc; ic->ii = ia->ii; ic++; } } ia++; ib++; if(ia >= iamax || ib >= ibmax) break; ja = ia->ii; jb = ib->ii; } else if(ja < jb) { // store term a if(DACECom.ieo[ja] <= DACECom_t.nocut) { const double ccc = ia->cc*afac; if(fabs(ccc) >= DACECom_t.eps) { if(ic >= icmax) { daceSetError(__func__, DACE_ERROR, 21); daceSetLength(inc, ilmc); return; } ic->cc = ccc; ic->ii = ia->ii; ic++; } } ia++; if(ia >= iamax) break; ja = ia->ii; } else { // store term b if(DACECom.ieo[jb] <= DACECom_t.nocut) { const double ccc = ib->cc*bfac; if(fabs(ccc) >= DACECom_t.eps) { if(ic >= icmax) { daceSetError(__func__, DACE_ERROR, 21); daceSetLength(inc, ilmc); return; } ic->cc = ccc; ic->ii = ib->ii; ic++; } } ib++; if(ib >= ibmax) break; jb = ib->ii; } } } // copy any remaining terms from either ina or inb monomial *ismin, *ismax; double fac; if(ia < iamax) { ismin = ia; ismax = iamax; fac = afac; } else { ismin = ib; ismax = ibmax; fac = bfac; } for(monomial *is = ismin; is < ismax; is++) { if(DACECom.ieo[is->ii] <= DACECom_t.nocut) { const double ccc = is->cc*fac; if(fabs(ccc) >= DACECom_t.eps) { if(ic >= icmax) { daceSetError(__func__, DACE_ERROR, 21); daceSetLength(inc, ilmc); return; } ic->cc = ccc; ic->ii = is->ii; ic++; } } } daceSetLength(inc, ic-ipoc); } /** @}*/
mixed_tentusscher_myo_epi_2004_S2_13.c
// Scenario 2 - Mixed-Model TenTusscher 2004 (Myocardium + Epicardium) // (AP + max:dvdt) #include <stdio.h> #include "mixed_tentusscher_myo_epi_2004_S2_13.h" GET_CELL_MODEL_DATA(init_cell_model_data) { if(get_initial_v) cell_model->initial_v = INITIAL_V; if(get_neq) cell_model->number_of_ode_equations = NEQ; } SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu) { static bool first_call = true; if(first_call) { print_to_stdout_and_file("Using mixed version of TenTusscher 2004 myocardium + epicardium CPU model\n"); first_call = false; } // Get the mapping array uint32_t *mapping = NULL; if(extra_data) { mapping = (uint32_t*)extra_data; } else { print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n"); } // Initial conditions for TenTusscher myocardium if (mapping[sv_id] == 0) { // Default initial conditions /* sv[0] = INITIAL_V; // V; millivolt sv[1] = 0.f; //M sv[2] = 0.75; //H sv[3] = 0.75f; //J sv[4] = 0.f; //Xr1 sv[5] = 1.f; //Xr2 sv[6] = 0.f; //Xs sv[7] = 1.f; //S sv[8] = 0.f; //R sv[9] = 0.f; //D sv[10] = 1.f; //F sv[11] = 1.f; //FCa sv[12] = 1.f; //G sv[13] = 0.0002; //Cai sv[14] = 0.2f; //CaSR sv[15] = 11.6f; //Nai sv[16] = 138.3f; //Ki */ // Elnaz's steady-state initial conditions real sv_sst[]={-86.3965119057144,0.00133824305081220,0.775463576993407,0.775278393595599,0.000179499343643571,0.483303039835057,0.00297647859235379,0.999998290403642,1.98961879737287e-08,1.93486789479597e-05,0.999599147019885,1.00646342475688,0.999975178010127,5.97703651642618e-05,0.418325344820368,10.7429775420171,138.918155900633}; for (uint32_t i = 0; i < NEQ; i++) sv[i] = sv_sst[i]; } // Initial conditions for TenTusscher epicardium else { // Default initial conditions /* sv[0] = INITIAL_V; // V; millivolt sv[1] = 0.f; //M sv[2] = 0.75; //H sv[3] = 0.75f; //J sv[4] = 0.f; //Xr1 sv[5] = 1.f; //Xr2 sv[6] = 0.f; //Xs sv[7] = 1.f; //S sv[8] = 0.f; //R sv[9] = 0.f; //D sv[10] = 1.f; //F sv[11] = 1.f; //FCa sv[12] = 1.f; //G sv[13] = 0.0002; //Cai sv[14] = 0.2f; //CaSR sv[15] = 11.6f; //Nai sv[16] = 138.3f; //Ki */ // Elnaz's steady-state initial conditions real sv_sst[]={-86.5539038753567,0.00129412448996796,0.779360292837132,0.779249119724812,0.000175123544000358,0.484940918252471,0.00294329963917416,0.999998343965409,1.93809582696782e-08,1.89468693200808e-05,0.999768982807075,1.00735744125472,0.999998089726980,3.89830144793233e-05,1.72829326331064,9.02393126594947,140.110171882154}; for (uint32_t i = 0; i < NEQ; i++) sv[i] = sv_sst[i]; } } SOLVE_MODEL_ODES_CPU(solve_model_odes_cpu) { // Get the mapping array uint32_t *mapping = NULL; if(extra_data) { mapping = (uint32_t*)extra_data; } else { print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n"); } uint32_t sv_id; int i; #pragma omp parallel for private(sv_id) for (i = 0; i < num_cells_to_solve; i++) { if(cells_to_solve) sv_id = cells_to_solve[i]; else sv_id = (uint32_t )i; for (int j = 0; j < num_steps; ++j) { if (mapping[i] == 0) solve_model_ode_cpu_myo(dt, sv + (sv_id * NEQ), stim_currents[i]); else solve_model_ode_cpu_epi(dt, sv + (sv_id * NEQ), stim_currents[i]); } } } void solve_model_ode_cpu_myo (real dt, real *sv, real stim_current) { real rY[NEQ], rDY[NEQ]; for(int i = 0; i < NEQ; i++) rY[i] = sv[i]; RHS_cpu_myo(rY, rDY, stim_current, dt); for(int i = 0; i < NEQ; i++) sv[i] = rDY[i]; } void RHS_cpu_myo(const real *sv, real *rDY_, real stim_current, real dt) { // State variables real svolt = sv[0]; real sm = sv[1]; real sh = sv[2]; real sj = sv[3]; real sxr1 = sv[4]; real sxr2 = sv[5]; real sxs = sv[6]; real ss = sv[7]; real sr = sv[8]; real sd = sv[9]; real sf = sv[10]; real sfca = sv[11]; real sg = sv[12]; real Cai = sv[13]; real CaSR = sv[14]; real Nai = sv[15]; real Ki = sv[16]; //External concentrations real Ko=5.4; real Cao=2.0; real Nao=140.0; //Intracellular volumes real Vc=0.016404; real Vsr=0.001094; //Calcium dynamics real Bufc=0.15f; real Kbufc=0.001f; real Bufsr=10.f; real Kbufsr=0.3f; real taufca=2.f; real taug=2.f; real Vmaxup=0.000425f; real Kup=0.00025f; //Constants const real R = 8314.472f; const real F = 96485.3415f; const real T =310.0f; real RTONF =(R*T)/F; //Cellular capacitance real CAPACITANCE=0.185; //Parameters for currents //Parameters for IKr real Gkr=0.096; //Parameters for Iks real pKNa=0.03; // [!] Myocardium cell real Gks=0.062; //Parameters for Ik1 real GK1=5.405; //Parameters for Ito // [!] Myocardium cell real Gto=0.294; //Parameters for INa real GNa=14.838; //Parameters for IbNa real GbNa=0.00029; //Parameters for INaK real KmK=1.0; real KmNa=40.0; real knak=1.362; //Parameters for ICaL real GCaL=0.000175; //Parameters for IbCa real GbCa=0.000592; //Parameters for INaCa real knaca=1000; real KmNai=87.5; real KmCa=1.38; real ksat=0.1; real n=0.35; //Parameters for IpCa real GpCa=0.825; real KpCa=0.0005; //Parameters for IpK; real GpK=0.0146; real IKr; real IKs; real IK1; real Ito; real INa; real IbNa; real ICaL; real IbCa; real INaCa; real IpCa; real IpK; real INaK; real Irel; real Ileak; real dNai; real dKi; real dCai; real dCaSR; real A; // real BufferFactorc; // real BufferFactorsr; real SERCA; real Caisquare; real CaSRsquare; real CaCurrent; real CaSRCurrent; real fcaold; real gold; real Ek; real Ena; real Eks; real Eca; real CaCSQN; real bjsr; real cjsr; real CaBuf; real bc; real cc; real Ak1; real Bk1; real rec_iK1; real rec_ipK; real rec_iNaK; real AM; real BM; real AH_1; real BH_1; real AH_2; real BH_2; real AJ_1; real BJ_1; real AJ_2; real BJ_2; real M_INF; real H_INF; real J_INF; real TAU_M; real TAU_H; real TAU_J; real axr1; real bxr1; real axr2; real bxr2; real Xr1_INF; real Xr2_INF; real TAU_Xr1; real TAU_Xr2; real Axs; real Bxs; real Xs_INF; real TAU_Xs; real R_INF; real TAU_R; real S_INF; real TAU_S; real Ad; real Bd; real Cd; real TAU_D; real D_INF; real TAU_F; real F_INF; real FCa_INF; real G_INF; real inverseVcF2=1/(2*Vc*F); real inverseVcF=1./(Vc*F); real Kupsquare=Kup*Kup; // real BufcKbufc=Bufc*Kbufc; // real Kbufcsquare=Kbufc*Kbufc; // real Kbufc2=2*Kbufc; // real BufsrKbufsr=Bufsr*Kbufsr; // const real Kbufsrsquare=Kbufsr*Kbufsr; // const real Kbufsr2=2*Kbufsr; const real exptaufca=exp(-dt/taufca); const real exptaug=exp(-dt/taug); real sItot; //Needed to compute currents Ek=RTONF*(log((Ko/Ki))); Ena=RTONF*(log((Nao/Nai))); Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai))); Eca=0.5*RTONF*(log((Cao/Cai))); Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200))); Bk1=(3.*exp(0.0002*(svolt-Ek+100))+ exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek))); rec_iK1=Ak1/(Ak1+Bk1); rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T)))); rec_ipK=1./(1.+exp((25-svolt)/5.98)); //Compute currents INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena); ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))* (exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.); Ito=Gto*sr*ss*(svolt-Ek); IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek); IKs=Gks*sxs*sxs*(svolt-Eks); IK1=GK1*rec_iK1*(svolt-Ek); INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))* (1./(1+ksat*exp((n-1)*svolt*F/(R*T))))* (exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao- exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5); INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK; IpCa=GpCa*Cai/(KpCa+Cai); IpK=GpK*rec_ipK*(svolt-Ek); IbNa=GbNa*(svolt-Ena); IbCa=GbCa*(svolt-Eca); //Determine total current (sItot) = IKr + IKs + IK1 + Ito + INa + IbNa + ICaL + IbCa + INaK + INaCa + IpCa + IpK + stim_current; //update concentrations Caisquare=Cai*Cai; CaSRsquare=CaSR*CaSR; CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE; A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f; Irel=A*sd*sg; Ileak=0.00008f*(CaSR-Cai); SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare)); CaSRCurrent=SERCA-Irel-Ileak; CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr); dCaSR=dt*(Vc/Vsr)*CaSRCurrent; bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr; cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR); CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.; CaBuf=Bufc*Cai/(Cai+Kbufc); dCai=dt*(CaCurrent-CaSRCurrent); bc=Bufc-CaBuf-dCai-Cai+Kbufc; cc=Kbufc*(CaBuf+dCai+Cai); Cai=(sqrt(bc*bc+4*cc)-bc)/2; dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE; Nai+=dt*dNai; dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE; Ki+=dt*dKi; //compute steady state values and time constants AM=1./(1.+exp((-60.-svolt)/5.)); BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.)); TAU_M=AM*BM; M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03))); if (svolt>=-40.) { AH_1=0.; BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1)))); TAU_H= 1.0/(AH_1+BH_1); } else { AH_2=(0.057*exp(-(svolt+80.)/6.8)); BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt)); TAU_H=1.0/(AH_2+BH_2); } H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43))); if(svolt>=-40.) { AJ_1=0.; BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.)))); TAU_J= 1.0/(AJ_1+BJ_1); } else { AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)* exp(-0.04391*svolt))*(svolt+37.78)/ (1.+exp(0.311*(svolt+79.23)))); BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14)))); TAU_J= 1.0/(AJ_2+BJ_2); } J_INF=H_INF; Xr1_INF=1./(1.+exp((-26.-svolt)/7.)); axr1=450./(1.+exp((-45.-svolt)/10.)); bxr1=6./(1.+exp((svolt-(-30.))/11.5)); TAU_Xr1=axr1*bxr1; Xr2_INF=1./(1.+exp((svolt-(-88.))/24.)); axr2=3./(1.+exp((-60.-svolt)/20.)); bxr2=1.12/(1.+exp((svolt-60.)/20.)); TAU_Xr2=axr2*bxr2; Xs_INF=1./(1.+exp((-5.-svolt)/14.)); Axs=1100./(sqrt(1.+exp((-10.-svolt)/6))); Bxs=1./(1.+exp((svolt-60.)/20.)); TAU_Xs=Axs*Bxs; // [!] Myocardium cell R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; D_INF=1./(1.+exp((-5-svolt)/7.5)); Ad=1.4/(1.+exp((-35-svolt)/13))+0.25; Bd=1.4/(1.+exp((svolt+5)/5)); Cd=1./(1.+exp((50-svolt)/20)); TAU_D=Ad*Bd+Cd; F_INF=1./(1.+exp((svolt+20)/7)); //TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10)); TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML FCa_INF=(1./(1.+pow((Cai/0.000325),8))+ 0.1/(1.+exp((Cai-0.0005)/0.0001))+ 0.20/(1.+exp((Cai-0.00075)/0.0008))+ 0.23 )/1.46; if(Cai<0.00035) G_INF=1./(1.+pow((Cai/0.00035),6)); else G_INF=1./(1.+pow((Cai/0.00035),16)); //Update gates rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M); rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H); rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J); rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1); rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2); rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs); rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S); rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R); rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D); rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F); fcaold= sfca; sfca = FCa_INF-(FCa_INF-sfca)*exptaufca; if(sfca>fcaold && (svolt)>-37.0) sfca = fcaold; gold = sg; sg = G_INF-(G_INF-sg)*exptaug; if(sg>gold && (svolt)>-37.0) sg=gold; //update voltage rDY_[0] = svolt + dt*(-sItot); rDY_[11] = sfca; rDY_[12] = sg; rDY_[13] = Cai; rDY_[14] = CaSR; rDY_[15] = Nai; rDY_[16] = Ki; } void solve_model_ode_cpu_epi (real dt, real *sv, real stim_current) { real rY[NEQ], rDY[NEQ]; for(int i = 0; i < NEQ; i++) rY[i] = sv[i]; RHS_cpu_epi(rY, rDY, stim_current, dt); for(int i = 0; i < NEQ; i++) sv[i] = rDY[i]; } void RHS_cpu_epi(const real *sv, real *rDY_, real stim_current, real dt) { // State variables real svolt = sv[0]; real sm = sv[1]; real sh = sv[2]; real sj = sv[3]; real sxr1 = sv[4]; real sxr2 = sv[5]; real sxs = sv[6]; real ss = sv[7]; real sr = sv[8]; real sd = sv[9]; real sf = sv[10]; real sfca = sv[11]; real sg = sv[12]; real Cai = sv[13]; real CaSR = sv[14]; real Nai = sv[15]; real Ki = sv[16]; //External concentrations real Ko=5.4; real Cao=2.0; real Nao=140.0; //Intracellular volumes real Vc=0.016404; real Vsr=0.001094; //Calcium dynamics real Bufc=0.15f; real Kbufc=0.001f; real Bufsr=10.f; real Kbufsr=0.3f; real taufca=2.f; real taug=2.f; real Vmaxup=0.000425f; real Kup=0.00025f; //Constants const real R = 8314.472f; const real F = 96485.3415f; const real T =310.0f; real RTONF =(R*T)/F; //Cellular capacitance real CAPACITANCE=0.185; //Parameters for currents //Parameters for IKr real Gkr=0.096; //Parameters for Iks real pKNa=0.03; // [!] Epicardium cell real Gks=0.245; //Parameters for Ik1 real GK1=5.405; //Parameters for Ito // [!] Epicardium cell real Gto=0.294; //Parameters for INa real GNa=14.838; //Parameters for IbNa real GbNa=0.00029; //Parameters for INaK real KmK=1.0; real KmNa=40.0; real knak=1.362; //Parameters for ICaL real GCaL=0.000175; //Parameters for IbCa real GbCa=0.000592; //Parameters for INaCa real knaca=1000; real KmNai=87.5; real KmCa=1.38; real ksat=0.1; real n=0.35; //Parameters for IpCa real GpCa=0.825; real KpCa=0.0005; //Parameters for IpK; real GpK=0.0146; real parameters []={13.6562217355457,0.000322374069155616,0.000173047827857537,0.000696326655746097,0.278632619847976,0.150655618402204,0.199964802834043,3.69236346658384,0.0186563054578193,2.52084572386843,1094.23899275256,0.000612998466562635,0.523532987188625,0.0168738706166519,0.00354670636620307,9.88277000414890e-06}; GNa=parameters[0]; GbNa=parameters[1]; GCaL=parameters[2]; GbCa=parameters[3]; Gto=parameters[4]; Gkr=parameters[5]; Gks=parameters[6]; GK1=parameters[7]; GpK=parameters[8]; knak=parameters[9]; knaca=parameters[10]; Vmaxup=parameters[11]; GpCa=parameters[12]; real arel=parameters[13]; real crel=parameters[14]; real Vleak=parameters[15]; real IKr; real IKs; real IK1; real Ito; real INa; real IbNa; real ICaL; real IbCa; real INaCa; real IpCa; real IpK; real INaK; real Irel; real Ileak; real dNai; real dKi; real dCai; real dCaSR; real A; // real BufferFactorc; // real BufferFactorsr; real SERCA; real Caisquare; real CaSRsquare; real CaCurrent; real CaSRCurrent; real fcaold; real gold; real Ek; real Ena; real Eks; real Eca; real CaCSQN; real bjsr; real cjsr; real CaBuf; real bc; real cc; real Ak1; real Bk1; real rec_iK1; real rec_ipK; real rec_iNaK; real AM; real BM; real AH_1; real BH_1; real AH_2; real BH_2; real AJ_1; real BJ_1; real AJ_2; real BJ_2; real M_INF; real H_INF; real J_INF; real TAU_M; real TAU_H; real TAU_J; real axr1; real bxr1; real axr2; real bxr2; real Xr1_INF; real Xr2_INF; real TAU_Xr1; real TAU_Xr2; real Axs; real Bxs; real Xs_INF; real TAU_Xs; real R_INF; real TAU_R; real S_INF; real TAU_S; real Ad; real Bd; real Cd; real TAU_D; real D_INF; real TAU_F; real F_INF; real FCa_INF; real G_INF; real inverseVcF2=1/(2*Vc*F); real inverseVcF=1./(Vc*F); real Kupsquare=Kup*Kup; // real BufcKbufc=Bufc*Kbufc; // real Kbufcsquare=Kbufc*Kbufc; // real Kbufc2=2*Kbufc; // real BufsrKbufsr=Bufsr*Kbufsr; // const real Kbufsrsquare=Kbufsr*Kbufsr; // const real Kbufsr2=2*Kbufsr; const real exptaufca=exp(-dt/taufca); const real exptaug=exp(-dt/taug); real sItot; //Needed to compute currents Ek=RTONF*(log((Ko/Ki))); Ena=RTONF*(log((Nao/Nai))); Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai))); Eca=0.5*RTONF*(log((Cao/Cai))); Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200))); Bk1=(3.*exp(0.0002*(svolt-Ek+100))+ exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek))); rec_iK1=Ak1/(Ak1+Bk1); rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T)))); rec_ipK=1./(1.+exp((25-svolt)/5.98)); //Compute currents INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena); ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))* (exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.); Ito=Gto*sr*ss*(svolt-Ek); IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek); IKs=Gks*sxs*sxs*(svolt-Eks); IK1=GK1*rec_iK1*(svolt-Ek); INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))* (1./(1+ksat*exp((n-1)*svolt*F/(R*T))))* (exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao- exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5); INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK; IpCa=GpCa*Cai/(KpCa+Cai); IpK=GpK*rec_ipK*(svolt-Ek); IbNa=GbNa*(svolt-Ena); IbCa=GbCa*(svolt-Eca); //Determine total current (sItot) = IKr + IKs + IK1 + Ito + INa + IbNa + ICaL + IbCa + INaK + INaCa + IpCa + IpK + stim_current; //update concentrations Caisquare=Cai*Cai; CaSRsquare=CaSR*CaSR; CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE; A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel; Irel=A*sd*sg; Ileak=Vleak*(CaSR-Cai); SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare)); CaSRCurrent=SERCA-Irel-Ileak; CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr); dCaSR=dt*(Vc/Vsr)*CaSRCurrent; bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr; cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR); CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.; CaBuf=Bufc*Cai/(Cai+Kbufc); dCai=dt*(CaCurrent-CaSRCurrent); bc=Bufc-CaBuf-dCai-Cai+Kbufc; cc=Kbufc*(CaBuf+dCai+Cai); Cai=(sqrt(bc*bc+4*cc)-bc)/2; dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE; Nai+=dt*dNai; dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE; Ki+=dt*dKi; //compute steady state values and time constants AM=1./(1.+exp((-60.-svolt)/5.)); BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.)); TAU_M=AM*BM; M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03))); if (svolt>=-40.) { AH_1=0.; BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1)))); TAU_H= 1.0/(AH_1+BH_1); } else { AH_2=(0.057*exp(-(svolt+80.)/6.8)); BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt)); TAU_H=1.0/(AH_2+BH_2); } H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43))); if(svolt>=-40.) { AJ_1=0.; BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.)))); TAU_J= 1.0/(AJ_1+BJ_1); } else { AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)* exp(-0.04391*svolt))*(svolt+37.78)/ (1.+exp(0.311*(svolt+79.23)))); BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14)))); TAU_J= 1.0/(AJ_2+BJ_2); } J_INF=H_INF; Xr1_INF=1./(1.+exp((-26.-svolt)/7.)); axr1=450./(1.+exp((-45.-svolt)/10.)); bxr1=6./(1.+exp((svolt-(-30.))/11.5)); TAU_Xr1=axr1*bxr1; Xr2_INF=1./(1.+exp((svolt-(-88.))/24.)); axr2=3./(1.+exp((-60.-svolt)/20.)); bxr2=1.12/(1.+exp((svolt-60.)/20.)); TAU_Xr2=axr2*bxr2; Xs_INF=1./(1.+exp((-5.-svolt)/14.)); Axs=1100./(sqrt(1.+exp((-10.-svolt)/6))); Bxs=1./(1.+exp((svolt-60.)/20.)); TAU_Xs=Axs*Bxs; R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; D_INF=1./(1.+exp((-5-svolt)/7.5)); Ad=1.4/(1.+exp((-35-svolt)/13))+0.25; Bd=1.4/(1.+exp((svolt+5)/5)); Cd=1./(1.+exp((50-svolt)/20)); TAU_D=Ad*Bd+Cd; F_INF=1./(1.+exp((svolt+20)/7)); //TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10)); TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML FCa_INF=(1./(1.+pow((Cai/0.000325),8))+ 0.1/(1.+exp((Cai-0.0005)/0.0001))+ 0.20/(1.+exp((Cai-0.00075)/0.0008))+ 0.23 )/1.46; if(Cai<0.00035) G_INF=1./(1.+pow((Cai/0.00035),6)); else G_INF=1./(1.+pow((Cai/0.00035),16)); //Update gates rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M); rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H); rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J); rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1); rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2); rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs); rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S); rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R); rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D); rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F); fcaold= sfca; sfca = FCa_INF-(FCa_INF-sfca)*exptaufca; if(sfca>fcaold && (svolt)>-37.0) sfca = fcaold; gold = sg; sg = G_INF-(G_INF-sg)*exptaug; if(sg>gold && (svolt)>-37.0) sg=gold; //update voltage rDY_[0] = svolt + dt*(-sItot); rDY_[11] = sfca; rDY_[12] = sg; rDY_[13] = Cai; rDY_[14] = CaSR; rDY_[15] = Nai; rDY_[16] = Ki; }
GB_unop__bnot_uint64_uint64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__bnot_uint64_uint64 // op(A') function: GB_unop_tran__bnot_uint64_uint64 // C type: uint64_t // A type: uint64_t // cast: uint64_t cij = aij // unaryop: cij = ~(aij) #define GB_ATYPE \ uint64_t #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = ~(x) ; // casting #define GB_CAST(z, aij) \ uint64_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint64_t z = aij ; \ Cx [pC] = ~(z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BNOT || GxB_NO_UINT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__bnot_uint64_uint64 ( uint64_t *Cx, // Cx and Ax may be aliased const uint64_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint64_t aij = Ax [p] ; uint64_t z = aij ; Cx [p] = ~(z) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__bnot_uint64_uint64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
shortcut_layer.c
#include "shortcut_layer.h" #include "convolutional_layer.h" #include "dark_cuda.h" #include "blas.h" #include "utils.h" #include "gemm.h" #include <stdio.h> #include <assert.h> layer make_shortcut_layer(int batch, int n, int *input_layers, int* input_sizes, int w, int h, int c, float **layers_output, float **layers_delta, float **layers_output_gpu, float **layers_delta_gpu, WEIGHTS_TYPE_T weights_type, WEIGHTS_NORMALIZATION_T weights_normalization, ACTIVATION activation, int train) { fprintf(stderr, "Shortcut Layer: "); int i; for(i = 0; i < n; ++i) fprintf(stderr, "%d, ", input_layers[i]); layer l = { (LAYER_TYPE)0 }; l.train = train; l.type = SHORTCUT; l.batch = batch; l.activation = activation; l.n = n; l.input_layers = input_layers; l.input_sizes = input_sizes; l.layers_output = layers_output; l.layers_delta = layers_delta; l.weights_type = weights_type; l.weights_normalization = weights_normalization; l.learning_rate_scale = 1; // not necessary //l.w = w2; //l.h = h2; //l.c = c2; l.w = l.out_w = w; l.h = l.out_h = h; l.c = l.out_c = c; l.outputs = w*h*c; l.inputs = l.outputs; //if(w != w2 || h != h2 || c != c2) fprintf(stderr, " w = %d, w2 = %d, h = %d, h2 = %d, c = %d, c2 = %d \n", w, w2, h, h2, c, c2); l.index = l.input_layers[0]; if (train) l.delta = (float*)xcalloc(l.outputs * batch, sizeof(float)); l.output = (float*)xcalloc(l.outputs * batch, sizeof(float)); l.nweights = 0; if (l.weights_type == PER_FEATURE) l.nweights = (l.n + 1); else if (l.weights_type == PER_CHANNEL) l.nweights = (l.n + 1) * l.c; if (l.nweights > 0) { l.weights = (float*)calloc(l.nweights, sizeof(float)); float scale = sqrt(2. / l.nweights); for (i = 0; i < l.nweights; ++i) l.weights[i] = 1;// +0.01*rand_uniform(-1, 1);// scale*rand_uniform(-1, 1); // rand_normal(); if (train) l.weight_updates = (float*)calloc(l.nweights, sizeof(float)); l.update = update_shortcut_layer; } l.forward = forward_shortcut_layer; l.backward = backward_shortcut_layer; #ifndef GPU if (l.activation == SWISH || l.activation == MISH) l.activation_input = (float*)calloc(l.batch*l.outputs, sizeof(float)); #endif // GPU #ifdef GPU if (l.activation == SWISH || l.activation == MISH) l.activation_input_gpu = cuda_make_array(l.activation_input, l.batch*l.outputs); l.forward_gpu = forward_shortcut_layer_gpu; l.backward_gpu = backward_shortcut_layer_gpu; if (l.nweights > 0) { l.update_gpu = update_shortcut_layer_gpu; l.weights_gpu = cuda_make_array(l.weights, l.nweights); if (train) l.weight_updates_gpu = cuda_make_array(l.weight_updates, l.nweights); } if (train) l.delta_gpu = cuda_make_array(l.delta, l.outputs*batch); l.output_gpu = cuda_make_array(l.output, l.outputs*batch); l.input_sizes_gpu = cuda_make_int_array_new_api(input_sizes, l.n); l.layers_output_gpu = (float**)cuda_make_array_pointers((void**)layers_output_gpu, l.n); l.layers_delta_gpu = (float**)cuda_make_array_pointers((void**)layers_delta_gpu, l.n); #endif // GPU l.bflops = l.out_w * l.out_h * l.out_c * l.n / 1000000000.; if (l.weights_type) l.bflops *= 2; fprintf(stderr, " wt = %d, wn = %d, outputs:%4d x%4d x%4d %5.3f BF\n", l.weights_type, l.weights_normalization, l.out_w, l.out_h, l.out_c, l.bflops); return l; } void resize_shortcut_layer(layer *l, int w, int h, network *net) { //assert(l->w == l->out_w); //assert(l->h == l->out_h); l->w = l->out_w = w; l->h = l->out_h = h; l->outputs = w*h*l->out_c; l->inputs = l->outputs; if (l->train) l->delta = (float*)xrealloc(l->delta, l->outputs * l->batch * sizeof(float)); l->output = (float*)xrealloc(l->output, l->outputs * l->batch * sizeof(float)); int i; for (i = 0; i < l->n; ++i) { int index = l->input_layers[i]; l->input_sizes[i] = net->layers[index].outputs; l->layers_output[i] = net->layers[index].output; l->layers_delta[i] = net->layers[index].delta; assert(l->w == net->layers[index].out_w && l->h == net->layers[index].out_h); } if (l->activation == SWISH || l->activation == MISH) l->activation_input = (float*)realloc(l->activation_input, l->batch*l->outputs * sizeof(float)); #ifdef GPU cuda_free(l->output_gpu); l->output_gpu = cuda_make_array(l->output, l->outputs*l->batch); if (l->train) { cuda_free(l->delta_gpu); l->delta_gpu = cuda_make_array(l->delta, l->outputs*l->batch); } float **layers_output_gpu = (float **)calloc(l->n, sizeof(float *)); float **layers_delta_gpu = (float **)calloc(l->n, sizeof(float *)); for (i = 0; i < l->n; ++i) { const int index = l->input_layers[i]; layers_output_gpu[i] = net->layers[index].output_gpu; layers_delta_gpu[i] = net->layers[index].delta_gpu; } memcpy_ongpu(l->input_sizes_gpu, l->input_sizes, l->n * sizeof(int)); memcpy_ongpu(l->layers_output_gpu, layers_output_gpu, l->n * sizeof(float*)); memcpy_ongpu(l->layers_delta_gpu, layers_delta_gpu, l->n * sizeof(float*)); free(layers_output_gpu); free(layers_delta_gpu); if (l->activation == SWISH || l->activation == MISH) { cuda_free(l->activation_input_gpu); l->activation_input_gpu = cuda_make_array(l->activation_input, l->batch*l->outputs); } #endif } void forward_shortcut_layer(const layer l, network_state state) { int from_w = state.net.layers[l.index].w; int from_h = state.net.layers[l.index].h; int from_c = state.net.layers[l.index].c; if (l.nweights == 0 && l.n == 1 && from_w == l.w && from_h == l.h && from_c == l.c) { int size = l.batch * l.w * l.h * l.c; int i; #pragma omp parallel for for(i = 0; i < size; ++i) l.output[i] = state.input[i] + state.net.layers[l.index].output[i]; } else { shortcut_multilayer_cpu(l.outputs * l.batch, l.outputs, l.batch, l.n, l.input_sizes, l.layers_output, l.output, state.input, l.weights, l.nweights, l.weights_normalization); } //copy_cpu(l.outputs*l.batch, state.input, 1, l.output, 1); //shortcut_cpu(l.batch, from_w, from_h, from_c, state.net.layers[l.index].output, l.out_w, l.out_h, l.out_c, l.output); //activate_array(l.output, l.outputs*l.batch, l.activation); if (l.activation == SWISH) activate_array_swish(l.output, l.outputs*l.batch, l.activation_input, l.output); else if (l.activation == MISH) activate_array_mish(l.output, l.outputs*l.batch, l.activation_input, l.output); else activate_array_cpu_custom(l.output, l.outputs*l.batch, l.activation); } void backward_shortcut_layer(const layer l, network_state state) { if (l.activation == SWISH) gradient_array_swish(l.output, l.outputs*l.batch, l.activation_input, l.delta); else if (l.activation == MISH) gradient_array_mish(l.outputs*l.batch, l.activation_input, l.delta); else gradient_array(l.output, l.outputs*l.batch, l.activation, l.delta); backward_shortcut_multilayer_cpu(l.outputs * l.batch, l.outputs, l.batch, l.n, l.input_sizes, l.layers_delta, state.delta, l.delta, l.weights, l.weight_updates, l.nweights, state.input, l.layers_output, l.weights_normalization); //axpy_cpu(l.outputs*l.batch, 1, l.delta, 1, state.delta, 1); //shortcut_cpu(l.batch, l.out_w, l.out_h, l.out_c, l.delta, l.w, l.h, l.c, state.net.layers[l.index].delta); } void update_shortcut_layer(layer l, int batch, float learning_rate_init, float momentum, float decay) { if (l.nweights > 0) { float learning_rate = learning_rate_init*l.learning_rate_scale; //float momentum = a.momentum; //float decay = a.decay; //int batch = a.batch; axpy_cpu(l.nweights, -decay*batch, l.weights, 1, l.weight_updates, 1); axpy_cpu(l.nweights, learning_rate / batch, l.weight_updates, 1, l.weights, 1); scal_cpu(l.nweights, momentum, l.weight_updates, 1); } } #ifdef GPU void forward_shortcut_layer_gpu(const layer l, network_state state) { //copy_ongpu(l.outputs*l.batch, state.input, 1, l.output_gpu, 1); //simple_copy_ongpu(l.outputs*l.batch, state.input, l.output_gpu); //shortcut_gpu(l.batch, l.w, l.h, l.c, state.net.layers[l.index].output_gpu, l.out_w, l.out_h, l.out_c, l.output_gpu); //input_shortcut_gpu(state.input, l.batch, l.w, l.h, l.c, state.net.layers[l.index].output_gpu, l.out_w, l.out_h, l.out_c, l.output_gpu); //----------- //if (l.outputs == l.input_sizes[0]) //if(l.n == 1 && l.nweights == 0) //{ // input_shortcut_gpu(state.input, l.batch, state.net.layers[l.index].w, state.net.layers[l.index].h, state.net.layers[l.index].c, // state.net.layers[l.index].output_gpu, l.out_w, l.out_h, l.out_c, l.output_gpu); //} //else { shortcut_multilayer_gpu(l.outputs, l.batch, l.n, l.input_sizes_gpu, l.layers_output_gpu, l.output_gpu, state.input, l.weights_gpu, l.nweights, l.weights_normalization); } if (l.activation == SWISH) activate_array_swish_ongpu(l.output_gpu, l.outputs*l.batch, l.activation_input_gpu, l.output_gpu); else if (l.activation == MISH) activate_array_mish_ongpu(l.output_gpu, l.outputs*l.batch, l.activation_input_gpu, l.output_gpu); else activate_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation); } void backward_shortcut_layer_gpu(const layer l, network_state state) { if (l.activation == SWISH) gradient_array_swish_ongpu(l.output_gpu, l.outputs*l.batch, l.activation_input_gpu, l.delta_gpu); else if (l.activation == MISH) gradient_array_mish_ongpu(l.outputs*l.batch, l.activation_input_gpu, l.delta_gpu); else gradient_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation, l.delta_gpu); backward_shortcut_multilayer_gpu(l.outputs, l.batch, l.n, l.input_sizes_gpu, l.layers_delta_gpu, state.delta, l.delta_gpu, l.weights_gpu, l.weight_updates_gpu, l.nweights, state.input, l.layers_output_gpu, l.weights_normalization); //axpy_ongpu(l.outputs*l.batch, 1, l.delta_gpu, 1, state.delta, 1); //shortcut_gpu(l.batch, l.out_w, l.out_h, l.out_c, l.delta_gpu, l.w, l.h, l.c, state.net.layers[l.index].delta_gpu); } void update_shortcut_layer_gpu(layer l, int batch, float learning_rate_init, float momentum, float decay, float loss_scale) { if (l.nweights > 0) { float learning_rate = learning_rate_init*l.learning_rate_scale / loss_scale; //float momentum = a.momentum; //float decay = a.decay; //int batch = a.batch; reset_nan_and_inf(l.weight_updates_gpu, l.nweights); fix_nan_and_inf(l.weights_gpu, l.nweights); //constrain_weight_updates_ongpu(l.nweights, 1, l.weights_gpu, l.weight_updates_gpu); constrain_ongpu(l.nweights, 1, l.weight_updates_gpu, 1); /* cuda_pull_array_async(l.weights_gpu, l.weights, l.nweights); cuda_pull_array_async(l.weight_updates_gpu, l.weight_updates, l.nweights); CHECK_CUDA(cudaStreamSynchronize(get_cuda_stream())); for (int i = 0; i < l.nweights; ++i) printf(" %f, ", l.weight_updates[i]); printf(" l.nweights = %d - updates \n", l.nweights); for (int i = 0; i < l.nweights; ++i) printf(" %f, ", l.weights[i]); printf(" l.nweights = %d \n\n", l.nweights); */ //axpy_ongpu(l.nweights, -decay*batch, l.weights_gpu, 1, l.weight_updates_gpu, 1); axpy_ongpu(l.nweights, learning_rate / batch, l.weight_updates_gpu, 1, l.weights_gpu, 1); scal_ongpu(l.nweights, momentum, l.weight_updates_gpu, 1); //fill_ongpu(l.nweights, 0, l.weight_updates_gpu, 1); //if (l.clip) { // constrain_ongpu(l.nweights, l.clip, l.weights_gpu, 1); //} } } void pull_shortcut_layer(layer l) { constrain_ongpu(l.nweights, 1, l.weight_updates_gpu, 1); cuda_pull_array_async(l.weight_updates_gpu, l.weight_updates, l.nweights); cuda_pull_array_async(l.weights_gpu, l.weights, l.nweights); CHECK_CUDA(cudaPeekAtLastError()); CHECK_CUDA(cudaStreamSynchronize(get_cuda_stream())); } void push_shortcut_layer(layer l) { cuda_push_array(l.weights_gpu, l.weights, l.nweights); CHECK_CUDA(cudaPeekAtLastError()); } #endif
GB_binop__cmplx_fp64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__cmplx_fp64) // A.*B function (eWiseMult): GB (_AemultB_08__cmplx_fp64) // A.*B function (eWiseMult): GB (_AemultB_02__cmplx_fp64) // A.*B function (eWiseMult): GB (_AemultB_04__cmplx_fp64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__cmplx_fp64) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__cmplx_fp64) // C+=b function (dense accum): GB (_Cdense_accumb__cmplx_fp64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__cmplx_fp64) // C=scalar+B GB (_bind1st__cmplx_fp64) // C=scalar+B' GB (_bind1st_tran__cmplx_fp64) // C=A+scalar GB (_bind2nd__cmplx_fp64) // C=A'+scalar GB (_bind2nd_tran__cmplx_fp64) // C type: GxB_FC64_t // A type: double // A pattern? 0 // B type: double // B pattern? 0 // BinaryOp: cij = GxB_CMPLX (aij, bij) #define GB_ATYPE \ double #define GB_BTYPE \ double #define GB_CTYPE \ GxB_FC64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ double aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ double bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ GxB_FC64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GxB_CMPLX (GBX (Ax, pA, A_iso), 0) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GxB_CMPLX (GBX (Bx, pB, B_iso), 0) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GxB_CMPLX (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 1 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_CMPLX || GxB_NO_FP64 || GxB_NO_CMPLX_FP64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__cmplx_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__cmplx_fp64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__cmplx_fp64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type double double bwork = (*((double *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t *restrict Cx = (GxB_FC64_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t *restrict Cx = (GxB_FC64_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__cmplx_fp64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; double alpha_scalar ; double beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((double *) alpha_scalar_in)) ; beta_scalar = (*((double *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__cmplx_fp64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__cmplx_fp64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__cmplx_fp64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__cmplx_fp64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__cmplx_fp64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ; double x = (*((double *) x_input)) ; double *Bx = (double *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; double bij = GBX (Bx, p, false) ; Cx [p] = GxB_CMPLX (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__cmplx_fp64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ; double *Ax = (double *) Ax_input ; double y = (*((double *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; double aij = GBX (Ax, p, false) ; Cx [p] = GxB_CMPLX (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = GBX (Ax, pA, false) ; \ Cx [pC] = GxB_CMPLX (x, aij) ; \ } GrB_Info GB (_bind1st_tran__cmplx_fp64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ double #if GB_DISABLE return (GrB_NO_VALUE) ; #else double x = (*((const double *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ double } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = GBX (Ax, pA, false) ; \ Cx [pC] = GxB_CMPLX (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__cmplx_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double y = (*((const double *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
tensor_cpu-inl.h
/*! * Copyright (c) 2014 by Contributors * \file tensor_cpu-inl.h * \brief implementation of CPU host code * \author Bing Xu, Tianqi Chen */ #ifndef MSHADOW_TENSOR_CPU_INL_H_ #define MSHADOW_TENSOR_CPU_INL_H_ #include <cstring> #include <functional> #include <utility> #include <vector> #include "./base.h" #include "./tensor.h" #include "./packet-inl.h" #include "./dot_engine-inl.h" namespace mshadow { template<> inline void InitTensorEngine<cpu>(int dev_id) { } template<> inline void ShutdownTensorEngine<cpu>(void) { } template<> inline void SetDevice<cpu>(int devid) { } template<> inline Stream<cpu> *NewStream<cpu>(bool create_blas_handle, bool create_dnn_handle, int dev_id) { return new Stream<cpu>(); } template<> inline void DeleteStream<cpu>(Stream<cpu> *stream) { delete stream; } template<int ndim> inline std::ostream &operator<<(std::ostream &os, const Shape<ndim> &shape) { // NOLINT(*) os << '('; for (int i = 0; i < ndim; ++i) { if (i != 0) os << ','; os << shape[i]; } // python style tuple if (ndim == 1) os << ','; os << ')'; return os; } template<typename xpu> inline void *AllocHost_(size_t size); template<typename xpu> inline void FreeHost_(void * dptr); #ifdef __CUDACC__ template<> inline void *AllocHost_<gpu>(size_t size) { void *dptr; MSHADOW_CUDA_CALL(cudaMallocHost(&dptr, size, cudaHostAllocPortable)); return dptr; } template<> inline void FreeHost_<gpu>(void *dptr) { MSHADOW_CUDA_CALL(cudaFreeHost(dptr)); } #endif template<> inline void *AllocHost_<cpu>(size_t size) { size_t pitch; return packet::AlignedMallocPitch(&pitch, size, 1); } template<> inline void FreeHost_<cpu>(void *dptr) { packet::AlignedFree(dptr); } template<typename xpu, int dim, typename DType> inline void AllocHost(Tensor<cpu, dim, DType> *obj) { obj->stride_ = obj->size(dim - 1); CHECK_EQ(obj->CheckContiguous(), true) << "AllocHost"; void *dptr = AllocHost_<xpu>(obj->MSize() * sizeof(DType)); obj->dptr_ = reinterpret_cast<DType*>(dptr); } template<typename xpu, int dim, typename DType> inline void FreeHost(Tensor<cpu, dim, DType> *obj) { if (obj->dptr_ == NULL) { LOG(FATAL) << "FreeHost:: double free"; } FreeHost_<xpu>(obj->dptr_); obj->dptr_ = NULL; } template<int dim, typename DType> inline void AllocSpace(Tensor<cpu, dim, DType> *obj, bool pad) { size_t pitch; void *dptr; if (pad) { dptr = packet::AlignedMallocPitch (&pitch, obj->size(dim - 1) * sizeof(DType), obj->shape_.FlatTo2D()[0]); obj->stride_ = static_cast<index_t>(pitch / sizeof(DType)); } else { obj->stride_ = obj->size(dim - 1); dptr = packet::AlignedMallocPitch (&pitch, obj->shape_.Size() * sizeof(DType), 1); } obj->dptr_ = reinterpret_cast<DType*>(dptr); } template<typename Device, typename DType, int dim> inline Tensor<Device, dim, DType> NewTensor(const Shape<dim> &shape, DType initv, bool pad, Stream<Device> *stream_) { Tensor<Device, dim, DType> obj(shape); obj.stream_ = stream_; AllocSpace(&obj, pad); MapExp<sv::saveto>(&obj, expr::ScalarExp<DType>(initv)); return obj; } template<int dim, typename DType> inline void FreeSpace(Tensor<cpu, dim, DType> *obj) { packet::AlignedFree(obj->dptr_); obj->dptr_ = NULL; } template<int dim, typename DType> inline void Copy(Tensor<cpu, dim, DType> _dst, const Tensor<cpu, dim, DType> &_src, Stream<cpu> *stream) { #pragma GCC diagnostic push #if __GNUC__ >= 8 #pragma GCC diagnostic ignored "-Wclass-memaccess" #endif CHECK_EQ(_dst.shape_, _src.shape_) << "Copy:shape mismatch:" << _dst.shape_ << " vs " << _src.shape_; if (_dst.CheckContiguous() && _src.CheckContiguous()) { memcpy(_dst.dptr_, _src.dptr_, sizeof(DType) * _dst.shape_.Size()); } else { Tensor<cpu, 2, DType> dst = _dst.FlatTo2D(); Tensor<cpu, 2, DType> src = _src.FlatTo2D(); for (index_t y = 0; y < dst.size(0); ++y) { memcpy(dst[y].dptr_, src[y].dptr_, sizeof(DType) * dst.size(1)); } } #pragma GCC diagnostic pop } template<typename Saver, typename R, int dim, typename DType, typename E> inline void MapPlan(TRValue<R, cpu, dim, DType> *dst, const expr::Plan<E, DType> &plan) { Shape<2> shape = expr::ShapeCheck<dim, R>::Check(dst->self()).FlatTo2D(); expr::Plan<R, DType> dplan = expr::MakePlan(dst->self()); #ifndef __CUDACC__ #pragma omp parallel for #endif // temp remove openmp, as default setting throttles CPU for (openmp_index_t y = 0; y < shape[0]; ++y) { for (index_t x = 0; x < shape[1]; ++x) { // trust your compiler! -_- they will optimize it Saver::template Save<DType>(dplan.REval(y, x), plan.Eval(y, x)); } } } // code to handle SSE optimization template<bool pass_check, typename Saver, typename R, int dim, typename DType, typename E, int etype> struct MapExpCPUEngine { inline static void Map(TRValue<R, cpu, dim, DType> *dst, const expr::Exp<E, DType, etype> &exp) { MapPlan<Saver>(dst, MakePlan(exp.self())); } }; template<typename SV, int dim, typename DType, typename E, int etype> struct MapExpCPUEngine<true, SV, Tensor<cpu, dim, DType>, dim, DType, E, etype> { inline static void Map(Tensor<cpu, dim, DType> *dst, const expr::Exp<E, DType, etype> &exp) { if (expr::PacketAlignCheck<dim, E, MSHADOW_DEFAULT_PACKET>::Check(exp.self()) && expr::PacketAlignCheck<dim, Tensor<cpu, dim, DType>, MSHADOW_DEFAULT_PACKET>::Check(*dst)) { expr::MapPacketPlan<SV>(dst->self(), expr::MakePacketPlan<MSHADOW_DEFAULT_PACKET>(exp.self())); } else { MapPlan<SV>(dst, MakePlan(exp.self())); } } }; template<typename Saver, typename R, int dim, typename DType, typename E, int etype> inline void MapExp(TRValue<R, cpu, dim, DType> *dst, const expr::Exp<E, DType, etype> &exp) { expr::TypeCheckPass<expr::TypeCheck<cpu, dim, DType, E>::kMapPass> ::Error_All_Tensor_in_Exp_Must_Have_Same_Type(); Shape<dim> eshape = expr::ShapeCheck<dim, E>::Check(exp.self()); Shape<dim> dshape = expr::ShapeCheck<dim, R>::Check(dst->self()); CHECK(eshape[0] == 0 || eshape == dshape) << "Assignment: Shape of Tensors are not consistent with target, " << "eshape: " << eshape << " dshape:" << dshape; MapExpCPUEngine<expr::PacketCheck<E, MSHADOW_DEFAULT_PACKET>::kPass, Saver, R, dim, DType, E, etype> ::Map(dst->ptrself(), exp); } template<typename Saver, typename Reducer, typename R, typename DType, typename E, int etype> inline void MapReduceKeepLowest(TRValue<R, cpu, 1, DType> *dst, const expr::Exp<E, DType, etype> &exp, DType scale) { expr::TypeCheckPass<expr::TypeCheck<cpu, 1, DType, E>::kRedPass> ::Error_TypeCheck_Not_Pass_For_Reduce_Exp(); Shape<2> eshape = expr::ShapeCheck<expr::ExpInfo<E>::kDim, E> ::Check(exp.self()).FlatTo2D(); Shape<1> dshape = expr::ShapeCheck<1, R>::Check(dst->self()); CHECK_EQ(eshape[1], dshape[0]) << "MapReduceKeepLowest::reduction dimension do not match"; CHECK_NE(eshape[0], 0U) << "can not reduce over empty tensor"; // execution expr::Plan<R, DType> dplan = MakePlan(dst->self()); expr::Plan<E, DType> splan = MakePlan(exp.self()); #ifndef __CUDACC__ #pragma omp parallel for #endif for (openmp_index_t x = 0; x < eshape[1]; ++x) { DType res = splan.Eval(0, x); for (index_t y = 1; y < eshape[0]; ++y) { Reducer::Reduce(res, splan.Eval(y, x)); } Saver::template Save<DType>(dplan.REval(0, x), res * scale); } } template<typename Saver, typename Reducer, int dimkeep, typename R, typename DType, typename E, int etype> inline void MapReduceKeepHighDim(TRValue<R, cpu, 1, DType> *dst, const expr::Exp<E, DType, etype> &exp, DType scale) { expr::TypeCheckPass<expr::TypeCheck<cpu, dimkeep, DType, E>::kRedPass> ::Error_TypeCheck_Not_Pass_For_Reduce_Exp(); typedef Shape<expr::ExpInfo<E>::kDim> EShape; EShape eshape = expr::ShapeCheck<expr::ExpInfo<E>::kDim, E> ::Check(exp.self()); Shape<1> dshape = expr::ShapeCheck<1, R>::Check(dst->self()); CHECK_EQ(eshape[dimkeep], dshape[0]) << "MapReduceKeepHighDim::reduction dimension do not match"; // use equvalent form Shape<4> pshape = Shape4(eshape.ProdShape(0, dimkeep), eshape[dimkeep], eshape.ProdShape(dimkeep + 1, EShape::kSubdim), eshape[EShape::kSubdim]); // execution expr::Plan<R, DType> dplan = MakePlan(dst->self()); expr::Plan<E, DType> splan = MakePlan(exp.self()); #ifndef __CUDACC__ #pragma omp parallel for #endif for (openmp_index_t c = 0; c < pshape[1]; ++c) { DType res; Reducer::SetInitValue(res); for (index_t n = 0; n < pshape[0]; ++n) { DType tres; Reducer::SetInitValue(tres); for (index_t y = 0; y < pshape[2]; ++y) { for (index_t x = 0; x < pshape[3]; ++x) { Reducer::Reduce(tres, splan.Eval((n * pshape[1] + c) * pshape[2] + y, x)); } } Reducer::Reduce(res, tres); } Saver::template Save<DType>(dplan.REval(0, c), DType(res * scale)); } } template<typename DType> inline void Softmax(Tensor<cpu, 1, DType> dst, const Tensor<cpu, 1, DType> &energy) { DType mmax = energy[0]; for (index_t x = 1; x < dst.size(0); ++x) { if (mmax < energy[x]) mmax = energy[x]; } DType sum = DType(0.0f); for (index_t x = 0; x < dst.size(0); ++x) { dst[x] = std::exp(energy[x] - mmax); sum += dst[x]; } for (index_t x = 0; x < dst.size(0); ++x) { dst[x] /= sum; } } template<typename DType> inline void SoftmaxGrad(Tensor<cpu, 2, DType> dst, const Tensor<cpu, 2, DType> &src, const Tensor<cpu, 1, DType> &label) { #pragma omp parallel for for (openmp_index_t y = 0; y < dst.size(0); ++y) { const index_t k = static_cast<int>(label[y]); for (index_t x = 0; x < dst.size(1); ++x) { if (x == k) { dst[y][k] = src[y][k] - 1.0f; } else { dst[y][x] = src[y][x]; } } } } template<typename DType> inline void SmoothSoftmaxGrad(Tensor<cpu, 2, DType> dst, const Tensor<cpu, 2, DType> &src, const Tensor<cpu, 1, DType> &label, const float alpha) { const float smooth_grad = (alpha / (dst.size(1) - 1)); #pragma omp parallel for for (openmp_index_t y = 0; y < dst.size(0); ++y) { const index_t k = static_cast<int>(label[y]); for (index_t x = 0; x < dst.size(1); ++x) { if (x == k) { dst[y][k] = src[y][k] - 1.0f + alpha; } else { dst[y][x] = src[y][x] - smooth_grad; } } } } template<typename DType> inline void SoftmaxGrad(Tensor<cpu, 2, DType> dst, const Tensor<cpu, 2, DType> &src, const Tensor<cpu, 1, DType> &label, const DType &ignore_label) { #pragma omp parallel for for (openmp_index_t y = 0; y < dst.size(0); ++y) { const int k = static_cast<int>(label[y]); for (int x = 0; x < static_cast<int>(dst.size(1)); ++x) { if (static_cast<int>(ignore_label) == k) { dst[y][x] = 0.0f; } else { if (x == k) { dst[y][k] = src[y][k] - 1.0f; } else { dst[y][x] = src[y][x]; } } } } } template<typename DType> inline void SmoothSoftmaxGrad(Tensor<cpu, 2, DType> dst, const Tensor<cpu, 2, DType> &src, const Tensor<cpu, 1, DType> &label, const DType &ignore_label, const float alpha) { const float smooth_grad = (alpha / (dst.size(1) - 1)); #pragma omp parallel for for (openmp_index_t y = 0; y < dst.size(0); ++y) { const int k = static_cast<int>(label[y]); for (int x = 0; x < static_cast<int>(dst.size(1)); ++x) { if (static_cast<int>(ignore_label) == k) { dst[y][x] = 0.0f; } else { if (x == k) { dst[y][k] = src[y][k] - 1.0f + alpha; } else { dst[y][x] = src[y][x] - smooth_grad; } } } } } template<typename DType> inline void SoftmaxGrad(Tensor<cpu, 3, DType> dst, const Tensor<cpu, 3, DType> &src, const Tensor<cpu, 2, DType> &label) { #pragma omp parallel for for (openmp_index_t n = 0; n < dst.size(2); ++n) { for (index_t y = 0; y < dst.size(0); ++y) { const int k = static_cast<int>(label[y][n]); for (int x = 0; x < static_cast<int>(dst.size(1)); ++x) { if (x == k) { dst[y][k][n] = src[y][k][n] - 1.0f; } else { dst[y][x][n] = src[y][x][n]; } } } } } template<typename DType> inline void SmoothSoftmaxGrad(Tensor<cpu, 3, DType> dst, const Tensor<cpu, 3, DType> &src, const Tensor<cpu, 2, DType> &label, const float alpha) { const float smooth_grad = (alpha / (dst.size(1) - 1)); #pragma omp parallel for for (openmp_index_t n = 0; n < dst.size(2); ++n) { for (index_t y = 0; y < dst.size(0); ++y) { const int k = static_cast<int>(label[y][n]); for (int x = 0; x < static_cast<int>(dst.size(1)); ++x) { if (x == k) { dst[y][k][n] = src[y][k][n] - 1.0f + alpha; } else { dst[y][x][n] = src[y][x][n] - smooth_grad; } } } } } template<typename DType> inline void SoftmaxGrad(Tensor<cpu, 3, DType> dst, const Tensor<cpu, 3, DType> &src, const Tensor<cpu, 2, DType> &label, const DType &ignore_label) { #pragma omp parallel for for (openmp_index_t n = 0; n < dst.size(2); ++n) { for (index_t y = 0; y < dst.size(0); ++y) { const int k = static_cast<int>(label[y][n]); if (k == static_cast<int>(ignore_label)) { for (int x = 0; x < static_cast<int>(dst.size(1)); ++x) { dst[y][x][n] = DType(0.0f); } } else { for (int x = 0; x < static_cast<int>(dst.size(1)); ++x) { if (x == k) { dst[y][k][n] = src[y][k][n] - 1.0f; } else { dst[y][x][n] = src[y][x][n]; } } } } } } template<typename DType> inline void SmoothSoftmaxGrad(Tensor<cpu, 3, DType> dst, const Tensor<cpu, 3, DType> &src, const Tensor<cpu, 2, DType> &label, const DType &ignore_label, const float alpha) { const float smooth_grad = (alpha / (dst.size(1) - 1)); #pragma omp parallel for for (openmp_index_t n = 0; n < dst.size(2); ++n) { for (index_t y = 0; y < dst.size(0); ++y) { const int k = static_cast<int>(label[y][n]); if (k == static_cast<int>(ignore_label)) { for (int x = 0; x < static_cast<int>(dst.size(1)); ++x) { dst[y][x][n] = DType(0.0f); } } else { for (int x = 0; x < static_cast<int>(dst.size(1)); ++x) { if (x == k) { dst[y][k][n] = src[y][k][n] - 1.0f + alpha; } else { dst[y][x][n] = src[y][x][n] - smooth_grad; } } } } } } template<typename DType> inline void Softmax(Tensor<cpu, 2, DType> dst, const Tensor<cpu, 2, DType> &energy) { CHECK_EQ(dst.shape_, energy.shape_) << "Softmax: shape mismatch"; #pragma omp parallel for for (openmp_index_t y = 0; y < dst.size(0); ++y) { Softmax(dst[y], energy[y]); } } template<typename DType> inline void Softmax(Tensor<cpu, 3, DType> dst, const Tensor<cpu, 3, DType> &energy) { CHECK_EQ(dst.shape_, energy.shape_) << "Softmax: shape mismatch"; #pragma omp parallel for for (openmp_index_t y = 0; y < dst.size(0); ++y) { for (index_t n = 0; n < dst.size(2); ++n) { DType mmax = energy[y][0][n]; for (index_t x = 1; x < dst.size(1); ++x) { if (mmax < energy[y][x][n]) mmax = energy[y][x][n]; } DType sum = DType(0.0f); for (index_t x = 0; x < dst.size(1); ++x) { dst[y][x][n] = std::exp(energy[y][x][n] - mmax); sum += dst[y][x][n]; } for (index_t x = 0; x < dst.size(1); ++x) { dst[y][x][n] /= sum; } } } } template<bool clip, typename IndexType, typename DType> inline void AddTakeGrad(Tensor<cpu, 2, DType> dst, const Tensor<cpu, 1, IndexType>& index, const Tensor<cpu, 2, DType> &src) { const int K = dst.shape_[0]; for (index_t y = 0; y < index.size(0); ++y) { int j = index[y]; if (clip) { if (j <= 0) j = 0; else if (j >= K) j = K - 1; } else { j %= K; if (j < 0) j += K; } dst[j] += src[y]; } } template<typename IndexType, typename DType> inline void AddTakeGradLargeBatch(Tensor<cpu, 2, DType> dst, const Tensor<cpu, 1, IndexType>& sorted, const Tensor<cpu, 1, IndexType>& index, const Tensor<cpu, 2, DType> &src) { for (index_t y = 0; y < sorted.size(0); ++y) { dst[sorted[y]] += src[index[y]]; } } template<typename IndexType, typename DType> inline void IndexFill(Tensor<cpu, 2, DType> dst, const Tensor<cpu, 1, IndexType>& index, const Tensor<cpu, 2, DType> &src) { for (index_t y = 0; y < index.size(0); ++y) { for (index_t j = 0; j < src.size(1); j++) { dst[index[y]][j] = src[y][j]; } } } template<typename KDType, typename VDType> inline void SortByKey(Tensor<cpu, 1, KDType> keys, Tensor<cpu, 1, VDType> values, bool is_ascend) { CHECK_EQ(keys.CheckContiguous(), true); CHECK_EQ(values.CheckContiguous(), true); CHECK_EQ(keys.size(0), values.size(0)) << "The sizes of key/value are not equal! keys_size: " << keys.size(0) << "values_size: " << values.size(0); std::vector<size_t> idx(keys.size(0)); std::vector<KDType> keys_vec(keys.size(0)); std::vector<VDType> values_vec(values.size(0)); for (int i = 0; i < keys.size(0); i++) { idx[i] = i; keys_vec[i] = keys[i]; values_vec[i] = values[i]; } if (is_ascend) { std::stable_sort(idx.begin(), idx.end(), [&keys_vec](size_t i1, size_t i2) {return keys_vec[i1] < keys_vec[i2]; }); } else { std::stable_sort(idx.begin(), idx.end(), [&keys_vec](size_t i1, size_t i2) {return keys_vec[i1] > keys_vec[i2]; }); } for (index_t i = 0; i < values.size(0); i++) { keys[i] = keys_vec[idx[i]]; values[i] = values_vec[idx[i]]; } } template<typename Device, typename VDType, typename SDType> inline void VectorizedSort(Tensor<Device, 1, VDType> values, Tensor<Device, 1, SDType> segments) { // We can sort each segments using two stable sorts SortByKey(values, segments, true); SortByKey(segments, values, true); } // blas related template<typename Device, typename DType> inline void VectorDot(Tensor<Device, 1, DType> dst, const Tensor<Device, 1, DType> &lhs, const Tensor<Device, 1, DType> &rhs) { CHECK_EQ(lhs.size(0), rhs.size(0)) << "VectorDot: Shape mismatch"; CHECK_EQ(dst.size(0), 1U) << "VectorDot: expect dst to be scalar"; expr::BLASEngine<Device, DType>::SetStream(lhs.stream_); mshadow::expr::BLASEngine<Device, DType>::dot( lhs.stream_, lhs.size(0), lhs.dptr_, 1, rhs.dptr_, 1, dst.dptr_); } template<bool transpose_left, bool transpose_right, typename Device, typename DType> inline void BatchGEMM(Tensor<Device, 3, DType> dst, const Tensor<Device, 3, DType> &lhs, const Tensor<Device, 3, DType> &rhs, DType alpha, DType beta, Tensor<Device, 1, DType*> workspace) { index_t batch_size = dst.shape_[0]; expr::BLASEngine<Device, DType>::SetStream(dst.stream_); Shape<3> sleft = transpose_left ? Shape3(lhs.shape_[0], lhs.shape_[2], lhs.shape_[1]) : lhs.shape_; Shape<3> sright = transpose_right ? Shape3(rhs.shape_[0], rhs.shape_[2], rhs.shape_[1]) : rhs.shape_; CHECK_EQ(dst.CheckContiguous(), true); CHECK_EQ(lhs.CheckContiguous(), true); CHECK_EQ(rhs.CheckContiguous(), true); CHECK(sleft[0] == batch_size && sright[0] == batch_size) << "BatchGEMM: batchsize must be equal." << "dst: " << dst.shape_ << "\n" << "lhs: " << sleft << "\n" << "rhs: " << sright << "\n"; CHECK(dst.size(1) == sleft[1] && dst.size(2) == sright[2] && sleft[2] == sright[1]) << "BatchGEMM: matrix shape mismatch" << "dst: " << dst.shape_ << "\n" << "lhs: " << sleft << "\n" << "rhs: " << sright << "\n"; CHECK(workspace.size(0) >= 3 * batch_size) << "Workspace Size must be bigger than " << 3 * batch_size; CHECK_EQ(workspace.CheckContiguous(), true); // use column major argument to compatible with most BLAS expr::BLASEngine<Device, DType>::batched_gemm (dst.stream_, transpose_right, transpose_left, transpose_right ? rhs.size(1) : rhs.size(2), transpose_left ? lhs.size(2) : lhs.size(1), transpose_right ? rhs.size(2) : rhs.size(1), alpha, rhs.dptr_, rhs.stride_, lhs.dptr_, lhs.stride_, beta, dst.dptr_, dst.stride_, batch_size, workspace.dptr_); } } // namespace mshadow #endif // MSHADOW_TENSOR_CPU_INL_H_
django_scrypt_fmt_plug.c
/* scrypt cracker patch for JtR. Hacked together during May of 2013 by Dhiru * Kholia <dhiru at openwall.com>. * * This software is Copyright (c) 2013 Dhiru Kholia <dhiru at openwall.com> and * it is hereby released to the general public under the following terms: * * Redistribution and use in source and binary forms, with or without * modification, are permitted. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_django_scrypt; #elif FMT_REGISTERS_H john_register_one(&fmt_django_scrypt); #else #include <string.h> #include "arch.h" #include "misc.h" #include "common.h" #include "formats.h" #include "params.h" #include "options.h" #include "base64.h" #include "escrypt/crypto_scrypt.h" #ifdef _OPENMP static int omp_t = 1; #include <omp.h> #define OMP_SCALE 1 // So slow a format, a multiplier is NOT needed #endif #include "memdbg.h" #define FORMAT_LABEL "django-scrypt" #define FORMAT_NAME "" #define FORMAT_TAG "scrypt" #define TAG_LENGTH 6 #ifdef __XOP__ #define ALGORITHM_NAME "Salsa20/8 128/128 XOP" #elif defined(__AVX__) #define ALGORITHM_NAME "Salsa20/8 128/128 AVX" #elif defined(__SSE2__) #define ALGORITHM_NAME "Salsa20/8 128/128 SSE2" #else #define ALGORITHM_NAME "Salsa20/8 32/" ARCH_BITS_STR #endif #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define PLAINTEXT_LENGTH 125 #define BINARY_SIZE 64 #define SALT_SIZE sizeof(struct custom_salt) #define BINARY_ALIGN 4 #define SALT_ALIGN 4 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 /* notastrongpassword => scrypt$NBGmaGIXijJW$14$8$1$64$achPt01SbytSt+F3CcCFgEPr96+/j9iCTdejFdAARZ8mzfejrP64TJ5XBJa3gYwuCKOEGlw2E/lWCWS7LeS6CA== */ static struct fmt_tests scrypt_tests[] = { /* https://pypi.python.org/pypi/django-scrypt/ format hashes */ {"scrypt$NBGmaGIXijJW$14$8$1$64$achPt01SbytSt+F3CcCFgEPr96+/j9iCTdejFdAARZ8mzfejrP64TJ5XBJa3gYwuCKOEGlw2E/lWCWS7LeS6CA==", "notastrongpassword"}, {"scrypt$Cj0PzdtT3qS2$14$8$1$64$qn4CDnM8CcIBNrpQXHo6ti8vSUoSXj7GBFy7k1bp5wPs8jKjh/gHZ+qM9uk6LbcVHm02yBaI5WCbDm/Shq/MXA==", "realmenuseJtR"}, {NULL} }; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static ARCH_WORD_32 (*crypt_out)[BINARY_SIZE / sizeof(ARCH_WORD_32)]; static struct custom_salt { /* int type; */ // not used (another type probably required a new JtR format) int N; int r; int p; unsigned char salt[32]; } *cur_salt; static void init(struct fmt_main *self) { #ifdef _OPENMP omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc_tiny(sizeof(*saved_key) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD); crypt_out = mem_calloc_tiny(sizeof(*crypt_out) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD); } static int isDigits(char *p) { while (*p && *p != '$') { if (*p <= '0' || *p >= '9') return 0; ++p; } return 1; } static int valid(char *ciphertext, struct fmt_main *self) { char *cp, *cp2; if (strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH)) return 0; cp = ciphertext + TAG_LENGTH; if (*cp != '$') return 0; ++cp; cp2 = strchr(cp, '$'); if (!cp2) return 0; if (cp2-cp > 32) return 0; cp = &cp2[1]; if (isDigits(cp) == 0) return 0; cp = strchr(cp, '$'); if (!cp) return 0; ++cp; if (isDigits(cp) == 0) return 0; cp = strchr(cp, '$'); if (!cp) return 0; ++cp; if (isDigits(cp) == 0) return 0; cp = strchr(cp, '$'); if (!cp) return 0; ++cp; if (isDigits(cp) == 0) return 0; cp = strchr(cp, '$'); if (!cp) return 0; ++cp; if (strlen(cp) != 88) return 0; return 1; } static void *get_salt(char *ciphertext) { char *ctcopy = strdup(ciphertext); char *keeptr = ctcopy; char *p; /* ensure alignment */ static union { struct custom_salt _cs; ARCH_WORD_32 dummy; } un; static struct custom_salt *cs = &(un._cs); ctcopy += TAG_LENGTH; p = strtok(ctcopy, "$"); strncpy((char*)cs->salt, p, 32); p = strtok(NULL, "$"); cs->N = atoi(p); p = strtok(NULL, "$"); cs->r = atoi(p); p = strtok(NULL, "$"); cs->p = atoi(p); MEM_FREE(keeptr); return (void *)cs; } static void *get_binary(char *ciphertext) { static union { unsigned char c[BINARY_SIZE + 1]; ARCH_WORD dummy; } buf; unsigned char *out = buf.c; char *p; p = strrchr(ciphertext, '$') + 1; base64_decode(p, strlen(p), (char*)out); return out; } static int get_hash_0(int index) { return crypt_out[index][0] & 0xf; } static int get_hash_1(int index) { return crypt_out[index][0] & 0xff; } static int get_hash_2(int index) { return crypt_out[index][0] & 0xfff; } static int get_hash_3(int index) { return crypt_out[index][0] & 0xffff; } static int get_hash_4(int index) { return crypt_out[index][0] & 0xfffff; } static int get_hash_5(int index) { return crypt_out[index][0] & 0xffffff; } static int get_hash_6(int index) { return crypt_out[index][0] & 0x7ffffff; } static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; } static int crypt_all(int *pcount, struct db_salt *salt) { int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for for (index = 0; index < count; index++) #endif { crypto_scrypt((unsigned char*)saved_key[index], strlen((char*)saved_key[index]), cur_salt->salt, strlen((char*)cur_salt->salt), (1ULL) << cur_salt->N, cur_salt->r, cur_salt->p, (unsigned char*)crypt_out[index], BINARY_SIZE); } return count; } static int cmp_all(void *binary, int count) { int index = 0; #ifdef _OPENMP for (; index < count; index++) #endif if (!memcmp(binary, crypt_out[index], BINARY_SIZE)) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } static void scrypt_set_key(char *key, int index) { int saved_key_length = strlen(key); if (saved_key_length > PLAINTEXT_LENGTH) saved_key_length = PLAINTEXT_LENGTH; memcpy(saved_key[index], key, saved_key_length); saved_key[index][saved_key_length] = 0; } static char *get_key(int index) { return saved_key[index]; } #if FMT_MAIN_VERSION > 11 static unsigned int tunable_cost_N(void *salt) { static struct custom_salt *my_salt; my_salt = salt; return (unsigned int) my_salt->N; } static unsigned int tunable_cost_r(void *salt) { static struct custom_salt *my_salt; my_salt = salt; return (unsigned int) my_salt->r; } static unsigned int tunable_cost_p(void *salt) { static struct custom_salt *my_salt; my_salt = salt; return (unsigned int) my_salt->p; } #endif struct fmt_main fmt_django_scrypt = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, #if FMT_MAIN_VERSION > 11 { "N", "r", "p" }, #endif scrypt_tests }, { init, fmt_default_done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, get_binary, get_salt, #if FMT_MAIN_VERSION > 11 { tunable_cost_N, tunable_cost_r, tunable_cost_p }, #endif fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, set_salt, scrypt_set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
omp_parallel_for_reduction.c
// RUN: %libomp-compile-and-run #include <stdio.h> #include <math.h> #include "omp_testsuite.h" #define DOUBLE_DIGITS 20 /* dt^DOUBLE_DIGITS */ #define MAX_FACTOR 10 #define KNOWN_PRODUCT 3628800 /* 10! */ int test_omp_parallel_for_reduction() { int sum; int known_sum; double dsum; double dknown_sum; double dt=0.5; /* base of geometric row for + and - test*/ double rounding_error= 1.E-9; int diff; double ddiff; int product; int known_product; int logic_and; int logic_or; int bit_and; int bit_or; int exclusiv_bit_or; int logics[LOOPCOUNT]; int i; double dpt; int result; sum =0; dsum=0; dt = 1./3.; result = 0; product = 1; logic_and=1; logic_or=0; bit_and=1; bit_or=0; exclusiv_bit_or=0; /* Tests for integers */ known_sum = (LOOPCOUNT*(LOOPCOUNT+1))/2; #pragma omp parallel for schedule(dynamic,1) private(i) reduction(+:sum) for (i=1;i<=LOOPCOUNT;i++) { sum=sum+i; } if(known_sum!=sum) { result++; fprintf(stderr,"Error in sum with integers: Result was %d" " instead of %d\n",sum,known_sum); } diff = (LOOPCOUNT*(LOOPCOUNT+1))/2; #pragma omp parallel for schedule(dynamic,1) private(i) reduction(-:diff) for (i=1;i<=LOOPCOUNT;++i) { diff=diff-i; } if(diff != 0) { result++; fprintf(stderr,"Error in difference with integers: Result was %d" " instead of 0.\n",diff); } /* Tests for doubles */ dsum=0; dpt=1; for (i=0;i<DOUBLE_DIGITS;++i) { dpt*=dt; } dknown_sum = (1-dpt)/(1-dt); #pragma omp parallel for schedule(dynamic,1) private(i) reduction(+:dsum) for (i=0;i<DOUBLE_DIGITS;++i) { dsum += pow(dt,i); } if( fabs(dsum-dknown_sum) > rounding_error ) { result++; fprintf(stderr,"Error in sum with doubles: Result was %f" " instead of %f (Difference: %E)\n", dsum, dknown_sum, dsum-dknown_sum); } dpt=1; for (i=0;i<DOUBLE_DIGITS;++i) { dpt*=dt; } fprintf(stderr,"\n"); ddiff = (1-dpt)/(1-dt); #pragma omp parallel for schedule(dynamic,1) private(i) reduction(-:ddiff) for (i=0;i<DOUBLE_DIGITS;++i) { ddiff -= pow(dt,i); } if( fabs(ddiff) > rounding_error) { result++; fprintf(stderr,"Error in Difference with doubles: Result was %E" " instead of 0.0\n",ddiff); } /* Tests for integers */ #pragma omp parallel for schedule(dynamic,1) private(i) reduction(*:product) for(i=1;i<=MAX_FACTOR;i++) { product *= i; } known_product = KNOWN_PRODUCT; if(known_product != product) { result++; fprintf(stderr,"Error in Product with integers: Result was %d" " instead of %d\n\n",product,known_product); } /* Tests for logic AND */ for(i=0;i<LOOPCOUNT;i++) { logics[i]=1; } #pragma omp parallel for schedule(dynamic,1) private(i) \ reduction(&&:logic_and) for(i=0;i<LOOPCOUNT;++i) { logic_and = (logic_and && logics[i]); } if(!logic_and) { result++; fprintf(stderr,"Error in logic AND part 1.\n"); } logic_and = 1; logics[LOOPCOUNT/2]=0; #pragma omp parallel for schedule(dynamic,1) private(i) \ reduction(&&:logic_and) for(i=0;i<LOOPCOUNT;++i) { logic_and = logic_and && logics[i]; } if(logic_and) { result++; fprintf(stderr,"Error in logic AND part 2.\n"); } /* Tests for logic OR */ for(i=0;i<LOOPCOUNT;i++) { logics[i]=0; } #pragma omp parallel for schedule(dynamic,1) private(i) \ reduction(||:logic_or) for(i=0;i<LOOPCOUNT;++i) { logic_or = logic_or || logics[i]; } if(logic_or) { result++; fprintf(stderr,"Error in logic OR part 1.\n"); } logic_or = 0; logics[LOOPCOUNT/2]=1; #pragma omp parallel for schedule(dynamic,1) private(i) \ reduction(||:logic_or) for(i=0;i<LOOPCOUNT;++i) { logic_or = logic_or || logics[i]; } if(!logic_or) { result++; fprintf(stderr,"Error in logic OR part 2.\n"); } /* Tests for bitwise AND */ for(i=0;i<LOOPCOUNT;++i) { logics[i]=1; } #pragma omp parallel for schedule(dynamic,1) private(i) \ reduction(&:bit_and) for(i=0;i<LOOPCOUNT;++i) { bit_and = (bit_and & logics[i]); } if(!bit_and) { result++; fprintf(stderr,"Error in BIT AND part 1.\n"); } bit_and = 1; logics[LOOPCOUNT/2]=0; #pragma omp parallel for schedule(dynamic,1) private(i) \ reduction(&:bit_and) for(i=0;i<LOOPCOUNT;++i) { bit_and = bit_and & logics[i]; } if(bit_and) { result++; fprintf(stderr,"Error in BIT AND part 2.\n"); } /* Tests for bitwise OR */ for(i=0;i<LOOPCOUNT;i++) { logics[i]=0; } #pragma omp parallel for schedule(dynamic,1) private(i) \ reduction(|:bit_or) for(i=0;i<LOOPCOUNT;++i) { bit_or = bit_or | logics[i]; } if(bit_or) { result++; fprintf(stderr,"Error in BIT OR part 1\n"); } bit_or = 0; logics[LOOPCOUNT/2]=1; #pragma omp parallel for schedule(dynamic,1) private(i) \ reduction(|:bit_or) for(i=0;i<LOOPCOUNT;++i) { bit_or = bit_or | logics[i]; } if(!bit_or) { result++; fprintf(stderr,"Error in BIT OR part 2\n"); } /* Tests for bitwise XOR */ for(i=0;i<LOOPCOUNT;i++) { logics[i]=0; } #pragma omp parallel for schedule(dynamic,1) private(i) \ reduction(^:exclusiv_bit_or) for(i=0;i<LOOPCOUNT;++i) { exclusiv_bit_or = exclusiv_bit_or ^ logics[i]; } if(exclusiv_bit_or) { result++; fprintf(stderr,"Error in EXCLUSIV BIT OR part 1\n"); } exclusiv_bit_or = 0; logics[LOOPCOUNT/2]=1; #pragma omp parallel for schedule(dynamic,1) private(i) \ reduction(^:exclusiv_bit_or) for(i=0;i<LOOPCOUNT;++i) { exclusiv_bit_or = exclusiv_bit_or ^ logics[i]; } if(!exclusiv_bit_or) { result++; fprintf(stderr,"Error in EXCLUSIV BIT OR part 2\n"); } /*printf("\nResult:%d\n",result);*/ return (result==0); } int main() { int i; int num_failed=0; for(i = 0; i < REPETITIONS; i++) { if(!test_omp_parallel_for_reduction()) { num_failed++; } } return num_failed; }
GB_binop__times_fc32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__times_fc32 // A.*B function (eWiseMult): GB_AemultB__times_fc32 // A*D function (colscale): GB_AxD__times_fc32 // D*A function (rowscale): GB_DxB__times_fc32 // C+=B function (dense accum): GB_Cdense_accumB__times_fc32 // C+=b function (dense accum): GB_Cdense_accumb__times_fc32 // C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum__times_fc32 // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__times_fc32 // C=scalar+B GB_bind1st__times_fc32 // C=scalar+B' GB_bind1st_tran__times_fc32 // C=A+scalar GB_bind2nd__times_fc32 // C=A'+scalar GB_bind2nd_tran__times_fc32 // C type: GxB_FC32_t // A type: GxB_FC32_t // B,b type: GxB_FC32_t // BinaryOp: cij = GB_FC32_mul (aij, bij) #define GB_ATYPE \ GxB_FC32_t #define GB_BTYPE \ GxB_FC32_t #define GB_CTYPE \ GxB_FC32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC32_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ GxB_FC32_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ GxB_FC32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = GB_FC32_mul (x, y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_TIMES || GxB_NO_FC32 || GxB_NO_TIMES_FC32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB_Cdense_ewise3_accum__times_fc32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__times_fc32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__times_fc32 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__times_fc32 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type GxB_FC32_t GxB_FC32_t bwork = (*((GxB_FC32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__times_fc32 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t *GB_RESTRICT Cx = (GxB_FC32_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__times_fc32 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t *GB_RESTRICT Cx = (GxB_FC32_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__times_fc32 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__times_fc32 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__times_fc32 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t *Cx = (GxB_FC32_t *) Cx_output ; GxB_FC32_t x = (*((GxB_FC32_t *) x_input)) ; GxB_FC32_t *Bx = (GxB_FC32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC32_t bij = Bx [p] ; Cx [p] = GB_FC32_mul (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__times_fc32 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; GxB_FC32_t *Cx = (GxB_FC32_t *) Cx_output ; GxB_FC32_t *Ax = (GxB_FC32_t *) Ax_input ; GxB_FC32_t y = (*((GxB_FC32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC32_t aij = Ax [p] ; Cx [p] = GB_FC32_mul (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GxB_FC32_t aij = Ax [pA] ; \ Cx [pC] = GB_FC32_mul (x, aij) ; \ } GrB_Info GB_bind1st_tran__times_fc32 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ GxB_FC32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t x = (*((const GxB_FC32_t *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ GxB_FC32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GxB_FC32_t aij = Ax [pA] ; \ Cx [pC] = GB_FC32_mul (aij, y) ; \ } GrB_Info GB_bind2nd_tran__times_fc32 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t y = (*((const GxB_FC32_t *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
hybrid_whereami.c
/* Program hybrid_whereami reports the mask for each OMP thread for each MPI process, and works for nsec seconds (10). This allows one to inspect occupation through utilities like top (e.g. execute top, then hit the 1 key). Uses maskeraid utilities github.com/TACC/maskeraid mpi_report_mask(): in pure MPI region to report MPI process masks hybrid_report_mask(): in OpenMP parallel region to report thread masks map_to_cpuid( cpuid ): sets thread affinity to cpu_id (see /proc/cpuinfo, or hwloc) load_cpu_nsec(nsec): loads the cpu for nsec (default 10) hybrid_whereami.c is a driver for: 1.) Get line arguments (optional): help or number of seconds for load 2.) Start MPI Affinity for MPI processes can be reset here. mpi_report_mask() reports MPI process masks 3.) Start OpenMP parallel region hybrid_report_mask() reports masks for each thread of each MPI process. 4.) Set a work load on each thread 5.) Finish parallel region 6.) Stop MPI Kent Milfeld 12/16/15 Update to separate require a single call for OpenMP hybrid. Uses multi-threaded MPI initialization Kent Milfeld 2015/07/13 */ #include <stdio.h> #include <omp.h> #include <mpi.h> #include <unistd.h> #include <stdlib.h> void load_cpu_nsec(int nsec); void hybrid_report_mask(void); int map_to_cpuid( int icore); int main(int argc, char **argv){ int rank, nranks; // MPI variables. int nthrds, thrd, cpuid; //Thread info int requested=MPI_THREAD_MULTIPLE, provided; int nsec = 10; // Load, default time int ierr; // Error number cmdln_get_nsec_or_help( &nsec, argc, argv); //optional, get nsec from cmd line // thread safe init replaces MPI_Init(&argc, &argv); MPI_Init_thread(&argc, &argv, requested, &provided); MPI_Comm_size(MPI_COMM_WORLD, &nranks); MPI_Comm_rank(MPI_COMM_WORLD, &rank); mpi_report_mask(); // Report JUST MPI process masks #pragma omp parallel private(thrd,nthrds,ierr) { thrd = omp_get_thread_num(); nthrds = omp_get_num_threads(); // cpuid = thrd; // set cpuid to thread number (thrd) // ierr = map_to_cpuid( cpuid ); // set your own affinity here hybrid_report_mask(); // Call mask reporter load_cpu_nsec( nsec ); // Load up rank process so user can watch top. } MPI_Finalize(); }
dataset.h
/*! * Copyright (c) 2016 Microsoft Corporation. All rights reserved. * Licensed under the MIT License. See LICENSE file in the project root for license information. */ #ifndef LIGHTGBM_DATASET_H_ #define LIGHTGBM_DATASET_H_ #include <LightGBM/config.h> #include <LightGBM/feature_group.h> #include <LightGBM/meta.h> #include <LightGBM/utils/openmp_wrapper.h> #include <LightGBM/utils/random.h> #include <LightGBM/utils/text_reader.h> #include <string> #include <functional> #include <memory> #include <mutex> #include <unordered_set> #include <utility> #include <vector> namespace LightGBM { /*! \brief forward declaration */ class DatasetLoader; /*! * \brief This class is used to store some meta(non-feature) data for training data, * e.g. labels, weights, initial scores, query level informations. * * Some details: * 1. Label, used for training. * 2. Weights, weighs of records, optional * 3. Query Boundaries, necessary for lambdarank. * The documents of i-th query is in [ query_boundaries[i], query_boundaries[i+1] ) * 4. Query Weights, auto calculate by weights and query_boundaries(if both of them are existed) * the weight for i-th query is sum(query_boundaries[i] , .., query_boundaries[i+1]) / (query_boundaries[i + 1] - query_boundaries[i+1]) * 5. Initial score. optional. if existing, the model will boost from this score, otherwise will start from 0. */ class Metadata { public: /*! * \brief Null constructor */ Metadata(); /*! * \brief Initialization will load query level informations, since it is need for sampling data * \param data_filename Filename of data */ void Init(const char *data_filename); /*! * \brief init as subset * \param metadata Filename of data * \param used_indices * \param num_used_indices */ void Init(const Metadata &metadata, const data_size_t *used_indices, data_size_t num_used_indices); /*! * \brief Initial with binary memory * \param memory Pointer to memory */ void LoadFromMemory(const void *memory); /*! \brief Destructor */ ~Metadata(); /*! * \brief Initial work, will allocate space for label, weight(if exists) and query(if exists) * \param num_data Number of training data * \param weight_idx Index of weight column, < 0 means doesn't exists * \param query_idx Index of query id column, < 0 means doesn't exists */ void Init(data_size_t num_data, int weight_idx, int query_idx); /*! * \brief Partition label by used indices * \param used_indices Indices of local used */ void PartitionLabel(const std::vector<data_size_t, mi_stl_allocator<data_size_t>> &used_indices); /*! * \brief Partition meta data according to local used indices if need * \param num_all_data Number of total training data, including other machines' data on parallel learning * \param used_data_indices Indices of local used training data */ void CheckOrPartition(data_size_t num_all_data, const std::vector<data_size_t, mi_stl_allocator<data_size_t>> &used_data_indices); void SetLabel(const label_t *label, data_size_t len); void SetWeights(const label_t *weights, data_size_t len); void SetQuery(const data_size_t *query, data_size_t len); /*! * \brief Set initial scores * \param init_score Initial scores, this class will manage memory for init_score. */ void SetInitScore(const double *init_score, data_size_t len); /*! * \brief Save binary data to file * \param file File want to write */ void SaveBinaryToFile(const VirtualFileWriter *writer) const; /*! * \brief Get sizes in byte of this object */ size_t SizesInByte() const; /*! * \brief Get pointer of label * \return Pointer of label */ inline const label_t *label() const { return label_.data(); } /*! * \brief Set label for one record * \param idx Index of this record * \param value Label value of this record */ inline void SetLabelAt(data_size_t idx, label_t value) { label_[idx] = value; } /*! * \brief Set Weight for one record * \param idx Index of this record * \param value Weight value of this record */ inline void SetWeightAt(data_size_t idx, label_t value) { weights_[idx] = value; } /*! * \brief Set Query Id for one record * \param idx Index of this record * \param value Query Id value of this record */ inline void SetQueryAt(data_size_t idx, data_size_t value) { queries_[idx] = static_cast<data_size_t>(value); } /*! * \brief Get weights, if not exists, will return nullptr * \return Pointer of weights */ inline const label_t *weights() const { if (!weights_.empty()) { return weights_.data(); } else { return nullptr; } } /*! * \brief Get data boundaries on queries, if not exists, will return nullptr * we assume data will order by query, * the interval of [query_boundaris[i], query_boundaris[i+1]) * is the data indices for query i. * \return Pointer of data boundaries on queries */ inline const data_size_t *query_boundaries() const { if (!query_boundaries_.empty()) { return query_boundaries_.data(); } else { return nullptr; } } /*! * \brief Get Number of queries * \return Number of queries */ inline data_size_t num_queries() const { return num_queries_; } /*! * \brief Get weights for queries, if not exists, will return nullptr * \return Pointer of weights for queries */ inline const label_t *query_weights() const { if (!query_weights_.empty()) { return query_weights_.data(); } else { return nullptr; } } /*! * \brief Get initial scores, if not exists, will return nullptr * \return Pointer of initial scores */ inline const double *init_score() const { if (!init_score_.empty()) { return init_score_.data(); } else { return nullptr; } } /*! * \brief Get size of initial scores */ inline int64_t num_init_score() const { return num_init_score_; } /*! \brief Disable copy */ Metadata &operator=(const Metadata &) = delete; /*! \brief Disable copy */ Metadata(const Metadata &) = delete; private: /*! \brief Load initial scores from file */ void LoadInitialScore(); /*! \brief Load wights from file */ void LoadWeights(); /*! \brief Load query boundaries from file */ void LoadQueryBoundaries(); /*! \brief Load query wights */ void LoadQueryWeights(); /*! \brief Filename of current data */ std::string data_filename_; /*! \brief Number of data */ data_size_t num_data_; /*! \brief Number of weights, used to check correct weight file */ data_size_t num_weights_; /*! \brief Label data */ std::vector<label_t, mi_stl_allocator<label_t>> label_; /*! \brief Weights data */ std::vector<label_t, mi_stl_allocator<label_t>> weights_; /*! \brief Query boundaries */ std::vector<data_size_t, mi_stl_allocator<data_size_t>> query_boundaries_; /*! \brief Query weights */ std::vector<label_t, mi_stl_allocator<label_t>> query_weights_; /*! \brief Number of querys */ data_size_t num_queries_; /*! \brief Number of Initial score, used to check correct weight file */ int64_t num_init_score_; /*! \brief Initial score */ std::vector<double, mi_stl_allocator<double>> init_score_; /*! \brief Queries data */ std::vector<data_size_t, mi_stl_allocator<data_size_t>> queries_; /*! \brief mutex for threading safe call */ std::mutex mutex_; bool weight_load_from_file_; bool query_load_from_file_; bool init_score_load_from_file_; }; /*! \brief Interface for Parser */ class Parser { public: /*! \brief virtual destructor */ virtual ~Parser() {} /*! * \brief Parse one line with label * \param str One line record, string format, should end with '\0' * \param out_features Output columns, store in (column_idx, values) * \param out_label Label will store to this if exists */ virtual void ParseOneLine(const char *str, std::vector<std::pair<int, double>, mi_stl_allocator<std::pair<int, double>>> *out_features, double *out_label) const = 0; virtual int NumFeatures() const = 0; /*! * \brief Create an object of parser, will auto choose the format depend on file * \param filename One Filename of data * \param num_features Pass num_features of this data file if you know, <=0 means don't know * \param label_idx index of label column * \return Object of parser */ static Parser *CreateParser(const char *filename, bool header, int num_features, int label_idx); }; struct TrainingShareStates { int num_threads = 0; bool is_colwise = true; bool is_use_subcol = false; bool is_use_subrow = false; bool is_subrow_copied = false; bool is_constant_hessian = true; const data_size_t *bagging_use_indices; data_size_t bagging_indices_cnt; int num_bin_aligned; std::unique_ptr<MultiValBin> multi_val_bin; std::unique_ptr<MultiValBin> multi_val_bin_subset; std::vector<uint32_t, mi_stl_allocator<uint32_t>> hist_move_src; std::vector<uint32_t, mi_stl_allocator<uint32_t>> hist_move_dest; std::vector<uint32_t, mi_stl_allocator<uint32_t>> hist_move_size; std::vector<hist_t, Common::AlignmentAllocator<hist_t, kAlignedSize>> hist_buf; void SetMultiValBin(MultiValBin *bin) { num_threads = OMP_NUM_THREADS(); if (bin == nullptr) { return; } multi_val_bin.reset(bin); num_bin_aligned = (bin->num_bin() + kAlignedSize - 1) / kAlignedSize * kAlignedSize; size_t new_size = static_cast<size_t>(num_bin_aligned) * 2 * num_threads; if (new_size > hist_buf.size()) { hist_buf.resize(static_cast<size_t>(num_bin_aligned) * 2 * num_threads); } } hist_t *TempBuf() { if (!is_use_subcol) { return nullptr; } return hist_buf.data() + hist_buf.size() - num_bin_aligned * 2; } void HistMove(const hist_t *src, hist_t *dest) { if (!is_use_subcol) { return; } #pragma omp parallel for schedule(static) for (int i = 0; i < static_cast<int>(hist_move_src.size()); ++i) { std::copy_n(src + hist_move_src[i], hist_move_size[i], dest + hist_move_dest[i]); } } }; /*! \brief The main class of data set, * which are used to training or validation */ class Dataset { public: friend DatasetLoader; LIGHTGBM_EXPORT Dataset(); LIGHTGBM_EXPORT Dataset(data_size_t num_data); void Construct( std::vector<std::unique_ptr<BinMapper>, mi_stl_allocator<std::unique_ptr<BinMapper>>> *bin_mappers, int num_total_features, const std::vector<std::vector<double, mi_stl_allocator<double>>, mi_stl_allocator<std::vector<double, mi_stl_allocator<double>>>> &forced_bins, int **sample_non_zero_indices, double **sample_values, const int *num_per_col, int num_sample_col, size_t total_sample_cnt, const Config &io_config); /*! \brief Destructor */ LIGHTGBM_EXPORT ~Dataset(); LIGHTGBM_EXPORT bool CheckAlign(const Dataset &other) const { if (num_features_ != other.num_features_) { return false; } if (num_total_features_ != other.num_total_features_) { return false; } if (label_idx_ != other.label_idx_) { return false; } for (int i = 0; i < num_features_; ++i) { if (!FeatureBinMapper(i)->CheckAlign(*(other.FeatureBinMapper(i)))) { return false; } } return true; } inline void FinishOneRow(int tid, data_size_t row_idx, const std::vector<bool, mi_stl_allocator<bool>> &is_feature_added) { if (is_finish_load_) { return; } for (auto fidx : feature_need_push_zeros_) { if (is_feature_added[fidx]) { continue; } const int group = feature2group_[fidx]; const int sub_feature = feature2subfeature_[fidx]; feature_groups_[group]->PushData(tid, sub_feature, row_idx, 0.0f); } } inline void PushOneRow(int tid, data_size_t row_idx, const std::vector<double, mi_stl_allocator<double>> &feature_values) { if (is_finish_load_) { return; } for (size_t i = 0; i < feature_values.size() && i < static_cast<size_t>(num_total_features_); ++i) { int feature_idx = used_feature_map_[i]; if (feature_idx >= 0) { const int group = feature2group_[feature_idx]; const int sub_feature = feature2subfeature_[feature_idx]; feature_groups_[group]->PushData(tid, sub_feature, row_idx, feature_values[i]); } } } inline void PushOneRow(int tid, data_size_t row_idx, const std::vector<std::pair<int, double>, mi_stl_allocator<std::pair<int, double>>> &feature_values) { if (is_finish_load_) { return; } std::vector<bool, mi_stl_allocator<bool>> is_feature_added(num_features_, false); for (auto &inner_data : feature_values) { if (inner_data.first >= num_total_features_) { continue; } int feature_idx = used_feature_map_[inner_data.first]; if (feature_idx >= 0) { is_feature_added[feature_idx] = true; const int group = feature2group_[feature_idx]; const int sub_feature = feature2subfeature_[feature_idx]; feature_groups_[group]->PushData(tid, sub_feature, row_idx, inner_data.second); } } FinishOneRow(tid, row_idx, is_feature_added); } inline void PushOneData(int tid, data_size_t row_idx, int group, int sub_feature, double value) { feature_groups_[group]->PushData(tid, sub_feature, row_idx, value); } inline int RealFeatureIndex(int fidx) const { return real_feature_idx_[fidx]; } inline int InnerFeatureIndex(int col_idx) const { return used_feature_map_[col_idx]; } inline int Feature2Group(int feature_idx) const { return feature2group_[feature_idx]; } inline int Feture2SubFeature(int feature_idx) const { return feature2subfeature_[feature_idx]; } inline uint64_t GroupBinBoundary(int group_idx) const { return group_bin_boundaries_[group_idx]; } inline uint64_t NumTotalBin() const { return group_bin_boundaries_.back(); } inline std::vector<int, mi_stl_allocator<int>> ValidFeatureIndices() const { std::vector<int, mi_stl_allocator<int>> ret; for (int i = 0; i < num_total_features_; ++i) { if (used_feature_map_[i] >= 0) { ret.push_back(i); } } return ret; } void ReSize(data_size_t num_data); void CopySubrow(const Dataset *fullset, const data_size_t *used_indices, data_size_t num_used_indices, bool need_meta_data); MultiValBin *GetMultiBinFromSparseFeatures() const; MultiValBin *GetMultiBinFromAllFeatures() const; TrainingShareStates *GetShareStates( score_t *gradients, score_t *hessians, const std::vector<int8_t, mi_stl_allocator<int8_t>> &is_feature_used, bool is_constant_hessian, bool force_colwise, bool force_rowwise) const; LIGHTGBM_EXPORT void FinishLoad(); LIGHTGBM_EXPORT bool SetFloatField(const char *field_name, const float *field_data, data_size_t num_element); LIGHTGBM_EXPORT bool SetDoubleField(const char *field_name, const double *field_data, data_size_t num_element); LIGHTGBM_EXPORT bool SetIntField(const char *field_name, const int *field_data, data_size_t num_element); LIGHTGBM_EXPORT bool GetFloatField(const char *field_name, data_size_t *out_len, const float **out_ptr); LIGHTGBM_EXPORT bool GetDoubleField(const char *field_name, data_size_t *out_len, const double **out_ptr); LIGHTGBM_EXPORT bool GetIntField(const char *field_name, data_size_t *out_len, const int **out_ptr); /*! * \brief Save current dataset into binary file, will save to "filename.bin" */ LIGHTGBM_EXPORT void SaveBinaryFile(const char *bin_filename); LIGHTGBM_EXPORT void DumpTextFile(const char *text_filename); LIGHTGBM_EXPORT void CopyFeatureMapperFrom(const Dataset *dataset); LIGHTGBM_EXPORT void CreateValid(const Dataset *dataset); void InitTrain(const std::vector<int8_t, mi_stl_allocator<int8_t>> &is_feature_used, TrainingShareStates *share_state) const; template <bool USE_INDICES, bool USE_HESSIAN> void ConstructHistogramsInner(const std::vector<int8_t, mi_stl_allocator<int8_t>> &is_feature_used, const data_size_t *data_indices, data_size_t num_data, const score_t *gradients, const score_t *hessians, score_t *ordered_gradients, score_t *ordered_hessians, TrainingShareStates *share_state, hist_t *hist_data) const; template <bool USE_INDICES, bool ORDERED> void ConstructHistogramsMultiVal(const data_size_t *data_indices, data_size_t num_data, const score_t *gradients, const score_t *hessians, TrainingShareStates *share_state, hist_t *hist_data) const; inline void ConstructHistograms( const std::vector<int8_t, mi_stl_allocator<int8_t>> &is_feature_used, const data_size_t *data_indices, data_size_t num_data, const score_t *gradients, const score_t *hessians, score_t *ordered_gradients, score_t *ordered_hessians, TrainingShareStates *share_state, hist_t *hist_data) const { if (num_data <= 0) { return; } bool use_indices = data_indices != nullptr && (num_data < num_data_); if (share_state->is_constant_hessian) { if (use_indices) { ConstructHistogramsInner<true, false>( is_feature_used, data_indices, num_data, gradients, hessians, ordered_gradients, ordered_hessians, share_state, hist_data); } else { ConstructHistogramsInner<false, false>( is_feature_used, data_indices, num_data, gradients, hessians, ordered_gradients, ordered_hessians, share_state, hist_data); } } else { if (use_indices) { ConstructHistogramsInner<true, true>( is_feature_used, data_indices, num_data, gradients, hessians, ordered_gradients, ordered_hessians, share_state, hist_data); } else { ConstructHistogramsInner<false, true>( is_feature_used, data_indices, num_data, gradients, hessians, ordered_gradients, ordered_hessians, share_state, hist_data); } } } void FixHistogram(int feature_idx, double sum_gradient, double sum_hessian, hist_t *data) const; inline data_size_t Split(int feature, const uint32_t *threshold, int num_threshold, bool default_left, const data_size_t *data_indices, data_size_t cnt, data_size_t *lte_indices, data_size_t *gt_indices) const { const int group = feature2group_[feature]; const int sub_feature = feature2subfeature_[feature]; return feature_groups_[group]->Split( sub_feature, threshold, num_threshold, default_left, data_indices, cnt, lte_indices, gt_indices); } inline int SubFeatureBinOffset(int i) const { const int sub_feature = feature2subfeature_[i]; if (sub_feature == 0) { return 1; } else { return 0; } } inline int FeatureNumBin(int i) const { const int group = feature2group_[i]; const int sub_feature = feature2subfeature_[i]; return feature_groups_[group]->bin_mappers_[sub_feature]->num_bin(); } inline int FeatureGroupNumBin(int group) const { return feature_groups_[group]->num_total_bin_; } inline const BinMapper *FeatureBinMapper(int i) const { const int group = feature2group_[i]; const int sub_feature = feature2subfeature_[i]; return feature_groups_[group]->bin_mappers_[sub_feature].get(); } inline const Bin *FeatureGroupBin(int group) const { return feature_groups_[group]->bin_data_.get(); } inline BinIterator *FeatureIterator(int i) const { const int group = feature2group_[i]; const int sub_feature = feature2subfeature_[i]; return feature_groups_[group]->SubFeatureIterator(sub_feature); } inline BinIterator *FeatureGroupIterator(int group) const { return feature_groups_[group]->FeatureGroupIterator(); } inline bool IsMultiGroup(int i) const { return feature_groups_[i]->is_multi_val_; } inline double RealThreshold(int i, uint32_t threshold) const { const int group = feature2group_[i]; const int sub_feature = feature2subfeature_[i]; return feature_groups_[group]->bin_mappers_[sub_feature]->BinToValue(threshold); } // given a real threshold, find the closest threshold bin inline uint32_t BinThreshold(int i, double threshold_double) const { const int group = feature2group_[i]; const int sub_feature = feature2subfeature_[i]; return feature_groups_[group]->bin_mappers_[sub_feature]->ValueToBin(threshold_double); } /*! * \brief Get meta data pointer * \return Pointer of meta data */ inline const Metadata &metadata() const { return metadata_; } /*! \brief Get Number of used features */ inline int num_features() const { return num_features_; } /*! \brief Get Number of feature groups */ inline int num_feature_groups() const { return num_groups_; } /*! \brief Get Number of total features */ inline int num_total_features() const { return num_total_features_; } /*! \brief Get the index of label column */ inline int label_idx() const { return label_idx_; } /*! \brief Get names of current data set */ inline const std::vector<std::string, mi_stl_allocator<std::string>> &feature_names() const { return feature_names_; } inline void set_feature_names(const std::vector<std::string, mi_stl_allocator<std::string>> &feature_names) { if (feature_names.size() != static_cast<size_t>(num_total_features_)) { Log::Fatal("Size of feature_names error, should equal with total number of features"); } feature_names_ = std::vector<std::string, mi_stl_allocator<std::string>>(feature_names); std::unordered_set<std::string> feature_name_set; // replace ' ' in feature_names with '_' bool spaceInFeatureName = false; for (auto &feature_name : feature_names_) { // check json if (!Common::CheckAllowedJSON(feature_name)) { Log::Fatal("Do not support special JSON characters in feature name."); } if (feature_name.find(' ') != std::string::npos) { spaceInFeatureName = true; std::replace(feature_name.begin(), feature_name.end(), ' ', '_'); } if (feature_name_set.count(feature_name) > 0) { Log::Fatal("Feature (%s) appears more than one time.", feature_name.c_str()); } feature_name_set.insert(feature_name); } if (spaceInFeatureName) { Log::Warning("Find whitespaces in feature_names, replace with underlines"); } } inline std::vector<std::string, mi_stl_allocator<std::string>> feature_infos() const { std::vector<std::string, mi_stl_allocator<std::string>> bufs; for (int i = 0; i < num_total_features_; ++i) { int fidx = used_feature_map_[i]; if (fidx < 0) { bufs.push_back("none"); } else { const auto bin_mapper = FeatureBinMapper(fidx); bufs.push_back(bin_mapper->bin_info_string()); } } return bufs; } /*! \brief Get Number of data */ inline data_size_t num_data() const { return num_data_; } /*! \brief Disable copy */ Dataset &operator=(const Dataset &) = delete; /*! \brief Disable copy */ Dataset(const Dataset &) = delete; void AddFeaturesFrom(Dataset *other); private: std::string data_filename_; /*! \brief Store used features */ std::vector<std::unique_ptr<FeatureGroup>, mi_stl_allocator<std::unique_ptr<FeatureGroup>>> feature_groups_; /*! \brief Mapper from real feature index to used index*/ std::vector<int, mi_stl_allocator<int>> used_feature_map_; /*! \brief Number of used features*/ int num_features_; /*! \brief Number of total features*/ int num_total_features_; /*! \brief Number of total data*/ data_size_t num_data_; /*! \brief Store some label level data*/ Metadata metadata_; /*! \brief index of label column */ int label_idx_ = 0; /*! \brief store feature names */ std::vector<std::string, mi_stl_allocator<std::string>> feature_names_; /*! \brief store feature names */ static const char *binary_file_token; int num_groups_; std::vector<int, mi_stl_allocator<int>> real_feature_idx_; std::vector<int, mi_stl_allocator<int>> feature2group_; std::vector<int, mi_stl_allocator<int>> feature2subfeature_; std::vector<uint64_t, mi_stl_allocator<uint64_t>> group_bin_boundaries_; std::vector<int, mi_stl_allocator<int>> group_feature_start_; std::vector<int, mi_stl_allocator<int>> group_feature_cnt_; bool is_finish_load_; int max_bin_; std::vector<int32_t, mi_stl_allocator<int32_t>> max_bin_by_feature_; std::vector<std::vector<double, mi_stl_allocator<double>>, mi_stl_allocator<std::vector<double, mi_stl_allocator<double>>>> forced_bin_bounds_; int bin_construct_sample_cnt_; int min_data_in_bin_; bool use_missing_; bool zero_as_missing_; std::vector<int, mi_stl_allocator<int>> feature_need_push_zeros_; }; } // namespace LightGBM #endif // LightGBM_DATA_H_
resample.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % RRRR EEEEE SSSSS AAA M M PPPP L EEEEE % % R R E SS A A MM MM P P L E % % RRRR EEE SSS AAAAA M M M PPPP L EEE % % R R E SS A A M M P L E % % R R EEEEE SSSSS A A M M P LLLLL EEEEE % % % % % % MagickCore Pixel Resampling Methods % % % % Software Design % % Cristy % % Anthony Thyssen % % August 2007 % % % % % % Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/artifact.h" #include "magick/color-private.h" #include "magick/cache.h" #include "magick/draw.h" #include "magick/exception-private.h" #include "magick/gem.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/log.h" #include "magick/magick.h" #include "magick/memory_.h" #include "magick/pixel.h" #include "magick/pixel-private.h" #include "magick/quantum.h" #include "magick/random_.h" #include "magick/resample.h" #include "magick/resize.h" #include "magick/resize-private.h" #include "magick/resource_.h" #include "magick/transform.h" #include "magick/signature-private.h" #include "magick/token.h" #include "magick/utility.h" #include "magick/option.h" /* EWA Resampling Options */ /* select ONE resampling method */ #define EWA 1 /* Normal EWA handling - raw or clamped */ /* if 0 then use "High Quality EWA" */ #define EWA_CLAMP 1 /* EWA Clamping from Nicolas Robidoux */ #define FILTER_LUT 1 /* Use a LUT rather then direct filter calls */ /* output debugging information */ #define DEBUG_ELLIPSE 0 /* output ellipse info for debug */ #define DEBUG_HIT_MISS 0 /* output hit/miss pixels (as gnuplot commands) */ #define DEBUG_NO_PIXEL_HIT 0 /* Make pixels that fail to hit anything - RED */ #if ! FILTER_DIRECT #define WLUT_WIDTH 1024 /* size of the filter cache */ #endif /* Typedef declarations. */ struct _ResampleFilter { CacheView *view; Image *image; ExceptionInfo *exception; MagickBooleanType debug; /* Information about image being resampled */ ssize_t image_area; InterpolatePixelMethod interpolate; VirtualPixelMethod virtual_pixel; FilterTypes filter; /* processing settings needed */ MagickBooleanType limit_reached, do_interpolate, average_defined; MagickPixelPacket average_pixel; /* current ellipitical area being resampled around center point */ double A, B, C, Vlimit, Ulimit, Uwidth, slope; #if FILTER_LUT /* LUT of weights for filtered average in elliptical area */ double filter_lut[WLUT_WIDTH]; #else /* Use a Direct call to the filter functions */ ResizeFilter *filter_def; double F; #endif /* the practical working support of the filter */ double support; size_t signature; }; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e R e s a m p l e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireResampleFilter() initializes the information resample needs do to a % scaled lookup of a color from an image, using area sampling. % % The algorithm is based on a Elliptical Weighted Average, where the pixels % found in a large elliptical area is averaged together according to a % weighting (filter) function. For more details see "Fundamentals of Texture % Mapping and Image Warping" a master's thesis by Paul.S.Heckbert, June 17, % 1989. Available for free from, http://www.cs.cmu.edu/~ph/ % % As EWA resampling (or any sort of resampling) can require a lot of % calculations to produce a distorted scaling of the source image for each % output pixel, the ResampleFilter structure generated holds that information % between individual image resampling. % % This function will make the appropriate AcquireVirtualCacheView() calls % to view the image, calling functions do not need to open a cache view. % % Usage Example... % resample_filter=AcquireResampleFilter(image,exception); % SetResampleFilter(resample_filter, GaussianFilter, 1.0); % for (y=0; y < (ssize_t) image->rows; y++) { % for (x=0; x < (ssize_t) image->columns; x++) { % u= ....; v= ....; % ScaleResampleFilter(resample_filter, ... scaling vectors ...); % (void) ResamplePixelColor(resample_filter,u,v,&pixel); % ... assign resampled pixel value ... % } % } % DestroyResampleFilter(resample_filter); % % The format of the AcquireResampleFilter method is: % % ResampleFilter *AcquireResampleFilter(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport ResampleFilter *AcquireResampleFilter(const Image *image, ExceptionInfo *exception) { register ResampleFilter *resample_filter; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); resample_filter=(ResampleFilter *) AcquireMagickMemory( sizeof(*resample_filter)); if (resample_filter == (ResampleFilter *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); (void) ResetMagickMemory(resample_filter,0,sizeof(*resample_filter)); resample_filter->exception=exception; resample_filter->image=ReferenceImage((Image *) image); resample_filter->view=AcquireVirtualCacheView(resample_filter->image,exception); resample_filter->debug=IsEventLogging(); resample_filter->signature=MagickCoreSignature; resample_filter->image_area=(ssize_t) (image->columns*image->rows); resample_filter->average_defined = MagickFalse; /* initialise the resampling filter settings */ SetResampleFilter(resample_filter, image->filter, image->blur); (void) SetResampleFilterInterpolateMethod(resample_filter, image->interpolate); (void) SetResampleFilterVirtualPixelMethod(resample_filter, GetImageVirtualPixelMethod(image)); return(resample_filter); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y R e s a m p l e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyResampleFilter() finalizes and cleans up the resampling % resample_filter as returned by AcquireResampleFilter(), freeing any memory % or other information as needed. % % The format of the DestroyResampleFilter method is: % % ResampleFilter *DestroyResampleFilter(ResampleFilter *resample_filter) % % A description of each parameter follows: % % o resample_filter: resampling information structure % */ MagickExport ResampleFilter *DestroyResampleFilter( ResampleFilter *resample_filter) { assert(resample_filter != (ResampleFilter *) NULL); assert(resample_filter->signature == MagickCoreSignature); assert(resample_filter->image != (Image *) NULL); if (resample_filter->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", resample_filter->image->filename); resample_filter->view=DestroyCacheView(resample_filter->view); resample_filter->image=DestroyImage(resample_filter->image); #if ! FILTER_LUT resample_filter->filter_def=DestroyResizeFilter(resample_filter->filter_def); #endif resample_filter->signature=(~MagickCoreSignature); resample_filter=(ResampleFilter *) RelinquishMagickMemory(resample_filter); return(resample_filter); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e s a m p l e P i x e l C o l o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResamplePixelColor() samples the pixel values surrounding the location % given using an elliptical weighted average, at the scale previously % calculated, and in the most efficent manner possible for the % VirtualPixelMethod setting. % % The format of the ResamplePixelColor method is: % % MagickBooleanType ResamplePixelColor(ResampleFilter *resample_filter, % const double u0,const double v0,MagickPixelPacket *pixel) % % A description of each parameter follows: % % o resample_filter: the resample filter. % % o u0,v0: A double representing the center of the area to resample, % The distortion transformed transformed x,y coordinate. % % o pixel: the resampled pixel is returned here. % */ MagickExport MagickBooleanType ResamplePixelColor( ResampleFilter *resample_filter,const double u0,const double v0, MagickPixelPacket *pixel) { MagickBooleanType status; ssize_t u,v, v1, v2, uw, hit; double u1; double U,V,Q,DQ,DDQ; double divisor_c,divisor_m; register double weight; register const PixelPacket *pixels; register const IndexPacket *indexes; assert(resample_filter != (ResampleFilter *) NULL); assert(resample_filter->signature == MagickCoreSignature); status=MagickTrue; /* GetMagickPixelPacket(resample_filter->image,pixel); */ if ( resample_filter->do_interpolate ) { status=InterpolateMagickPixelPacket(resample_filter->image, resample_filter->view,resample_filter->interpolate,u0,v0,pixel, resample_filter->exception); return(status); } #if DEBUG_ELLIPSE (void) FormatLocaleFile(stderr, "u0=%lf; v0=%lf;\n", u0, v0); #endif /* Does resample area Miss the image Proper? If and that area a simple solid color - then simply return that color! This saves a lot of calculation when resampling outside the bounds of the source image. However it probably should be expanded to image bounds plus the filters scaled support size. */ hit = 0; switch ( resample_filter->virtual_pixel ) { case BackgroundVirtualPixelMethod: case ConstantVirtualPixelMethod: case TransparentVirtualPixelMethod: case BlackVirtualPixelMethod: case GrayVirtualPixelMethod: case WhiteVirtualPixelMethod: case MaskVirtualPixelMethod: if ( resample_filter->limit_reached || u0 + resample_filter->Ulimit < 0.0 || u0 - resample_filter->Ulimit > (double) resample_filter->image->columns-1.0 || v0 + resample_filter->Vlimit < 0.0 || v0 - resample_filter->Vlimit > (double) resample_filter->image->rows-1.0 ) hit++; break; case UndefinedVirtualPixelMethod: case EdgeVirtualPixelMethod: if ( ( u0 + resample_filter->Ulimit < 0.0 && v0 + resample_filter->Vlimit < 0.0 ) || ( u0 + resample_filter->Ulimit < 0.0 && v0 - resample_filter->Vlimit > (double) resample_filter->image->rows-1.0 ) || ( u0 - resample_filter->Ulimit > (double) resample_filter->image->columns-1.0 && v0 + resample_filter->Vlimit < 0.0 ) || ( u0 - resample_filter->Ulimit > (double) resample_filter->image->columns-1.0 && v0 - resample_filter->Vlimit > (double) resample_filter->image->rows-1.0 ) ) hit++; break; case HorizontalTileVirtualPixelMethod: if ( v0 + resample_filter->Vlimit < 0.0 || v0 - resample_filter->Vlimit > (double) resample_filter->image->rows-1.0 ) hit++; /* outside the horizontally tiled images. */ break; case VerticalTileVirtualPixelMethod: if ( u0 + resample_filter->Ulimit < 0.0 || u0 - resample_filter->Ulimit > (double) resample_filter->image->columns-1.0 ) hit++; /* outside the vertically tiled images. */ break; case DitherVirtualPixelMethod: if ( ( u0 + resample_filter->Ulimit < -32.0 && v0 + resample_filter->Vlimit < -32.0 ) || ( u0 + resample_filter->Ulimit < -32.0 && v0 - resample_filter->Vlimit > (double) resample_filter->image->rows+31.0 ) || ( u0 - resample_filter->Ulimit > (double) resample_filter->image->columns+31.0 && v0 + resample_filter->Vlimit < -32.0 ) || ( u0 - resample_filter->Ulimit > (double) resample_filter->image->columns+31.0 && v0 - resample_filter->Vlimit > (double) resample_filter->image->rows+31.0 ) ) hit++; break; case TileVirtualPixelMethod: case MirrorVirtualPixelMethod: case RandomVirtualPixelMethod: case HorizontalTileEdgeVirtualPixelMethod: case VerticalTileEdgeVirtualPixelMethod: case CheckerTileVirtualPixelMethod: /* resampling of area is always needed - no VP limits */ break; } if ( hit ) { /* The area being resampled is simply a solid color * just return a single lookup color. * * Should this return the users requested interpolated color? */ status=InterpolateMagickPixelPacket(resample_filter->image, resample_filter->view,IntegerInterpolatePixel,u0,v0,pixel, resample_filter->exception); return(status); } /* When Scaling limits reached, return an 'averaged' result. */ if ( resample_filter->limit_reached ) { switch ( resample_filter->virtual_pixel ) { /* This is always handled by the above, so no need. case BackgroundVirtualPixelMethod: case ConstantVirtualPixelMethod: case TransparentVirtualPixelMethod: case GrayVirtualPixelMethod, case WhiteVirtualPixelMethod case MaskVirtualPixelMethod: */ case UndefinedVirtualPixelMethod: case EdgeVirtualPixelMethod: case DitherVirtualPixelMethod: case HorizontalTileEdgeVirtualPixelMethod: case VerticalTileEdgeVirtualPixelMethod: /* We need an average edge pixel, from the correct edge! How should I calculate an average edge color? Just returning an averaged neighbourhood, works well in general, but falls down for TileEdge methods. This needs to be done properly!!!!!! */ status=InterpolateMagickPixelPacket(resample_filter->image, resample_filter->view,AverageInterpolatePixel,u0,v0,pixel, resample_filter->exception); break; case HorizontalTileVirtualPixelMethod: case VerticalTileVirtualPixelMethod: /* just return the background pixel - Is there a better way? */ status=InterpolateMagickPixelPacket(resample_filter->image, resample_filter->view,IntegerInterpolatePixel,-1.0,-1.0,pixel, resample_filter->exception); break; case TileVirtualPixelMethod: case MirrorVirtualPixelMethod: case RandomVirtualPixelMethod: case CheckerTileVirtualPixelMethod: default: /* generate a average color of the WHOLE image */ if ( resample_filter->average_defined == MagickFalse ) { Image *average_image; CacheView *average_view; GetMagickPixelPacket(resample_filter->image,(MagickPixelPacket *) &resample_filter->average_pixel); resample_filter->average_defined=MagickTrue; /* Try to get an averaged pixel color of whole image */ average_image=ResizeImage(resample_filter->image,1,1,BoxFilter,1.0, resample_filter->exception); if (average_image == (Image *) NULL) { *pixel=resample_filter->average_pixel; /* FAILED */ break; } average_view=AcquireVirtualCacheView(average_image, &average_image->exception); pixels=(PixelPacket *)GetCacheViewVirtualPixels(average_view,0,0,1,1, resample_filter->exception); if (pixels == (const PixelPacket *) NULL) { average_view=DestroyCacheView(average_view); average_image=DestroyImage(average_image); *pixel=resample_filter->average_pixel; /* FAILED */ break; } indexes=(IndexPacket *) GetCacheViewAuthenticIndexQueue(average_view); SetMagickPixelPacket(resample_filter->image,pixels,indexes, &(resample_filter->average_pixel)); average_view=DestroyCacheView(average_view); average_image=DestroyImage(average_image); if ( resample_filter->virtual_pixel == CheckerTileVirtualPixelMethod ) { /* CheckerTile is a alpha blend of the image's average pixel color and the current background color */ /* image's average pixel color */ weight = QuantumScale*((MagickRealType)(QuantumRange- resample_filter->average_pixel.opacity)); resample_filter->average_pixel.red *= weight; resample_filter->average_pixel.green *= weight; resample_filter->average_pixel.blue *= weight; divisor_c = weight; /* background color */ weight = QuantumScale*((MagickRealType)(QuantumRange- resample_filter->image->background_color.opacity)); resample_filter->average_pixel.red += weight*resample_filter->image->background_color.red; resample_filter->average_pixel.green += weight*resample_filter->image->background_color.green; resample_filter->average_pixel.blue += weight*resample_filter->image->background_color.blue; resample_filter->average_pixel.opacity += resample_filter->image->background_color.opacity; divisor_c += weight; /* alpha blend */ resample_filter->average_pixel.red /= divisor_c; resample_filter->average_pixel.green /= divisor_c; resample_filter->average_pixel.blue /= divisor_c; resample_filter->average_pixel.opacity /= 2; /* 50% blend */ } } *pixel=resample_filter->average_pixel; break; } return(status); } /* Initialize weighted average data collection */ hit = 0; divisor_c = 0.0; divisor_m = 0.0; pixel->red = pixel->green = pixel->blue = 0.0; if (pixel->matte != MagickFalse) pixel->opacity = 0.0; if (pixel->colorspace == CMYKColorspace) pixel->index = 0.0; /* Determine the parellelogram bounding box fitted to the ellipse centered at u0,v0. This area is bounding by the lines... */ v1 = (ssize_t)ceil(v0 - resample_filter->Vlimit); /* range of scan lines */ v2 = (ssize_t)floor(v0 + resample_filter->Vlimit); /* scan line start and width accross the parallelogram */ u1 = u0 + (v1-v0)*resample_filter->slope - resample_filter->Uwidth; uw = (ssize_t)(2.0*resample_filter->Uwidth)+1; #if DEBUG_ELLIPSE (void) FormatLocaleFile(stderr, "v1=%ld; v2=%ld\n", (long)v1, (long)v2); (void) FormatLocaleFile(stderr, "u1=%ld; uw=%ld\n", (long)u1, (long)uw); #else # define DEBUG_HIT_MISS 0 /* only valid if DEBUG_ELLIPSE is enabled */ #endif /* Do weighted resampling of all pixels, within the scaled ellipse, bound by a Parellelogram fitted to the ellipse. */ DDQ = 2*resample_filter->A; for( v=v1; v<=v2; v++ ) { #if DEBUG_HIT_MISS long uu = ceil(u1); /* actual pixel location (for debug only) */ (void) FormatLocaleFile(stderr, "# scan line from pixel %ld, %ld\n", (long)uu, (long)v); #endif u = (ssize_t)ceil(u1); /* first pixel in scanline */ u1 += resample_filter->slope; /* start of next scan line */ /* location of this first pixel, relative to u0,v0 */ U = (double)u-u0; V = (double)v-v0; /* Q = ellipse quotent ( if Q<F then pixel is inside ellipse) */ Q = (resample_filter->A*U + resample_filter->B*V)*U + resample_filter->C*V*V; DQ = resample_filter->A*(2.0*U+1) + resample_filter->B*V; /* get the scanline of pixels for this v */ pixels=GetCacheViewVirtualPixels(resample_filter->view,u,v,(size_t) uw, 1,resample_filter->exception); if (pixels == (const PixelPacket *) NULL) return(MagickFalse); indexes=GetCacheViewVirtualIndexQueue(resample_filter->view); /* count up the weighted pixel colors */ for( u=0; u<uw; u++ ) { weight = 0; #if FILTER_LUT /* Note that the ellipse has been pre-scaled so F = WLUT_WIDTH */ if ( Q < (double)WLUT_WIDTH ) { weight = resample_filter->filter_lut[(int)Q]; #else /* Note that the ellipse has been pre-scaled so F = support^2 */ if ( Q < (double)resample_filter->F ) { weight = GetResizeFilterWeight(resample_filter->filter_def, sqrt(Q)); /* a SquareRoot! Arrggghhhhh... */ #endif if (pixel->matte != MagickFalse) pixel->opacity += weight*pixels->opacity; divisor_m += weight; if (pixel->matte != MagickFalse) weight *= QuantumScale*((MagickRealType)(QuantumRange-pixels->opacity)); pixel->red += weight*pixels->red; pixel->green += weight*pixels->green; pixel->blue += weight*pixels->blue; if (pixel->colorspace == CMYKColorspace) pixel->index += weight*(*indexes); divisor_c += weight; hit++; #if DEBUG_HIT_MISS /* mark the pixel according to hit/miss of the ellipse */ (void) FormatLocaleFile(stderr, "set arrow from %lf,%lf to %lf,%lf nohead ls 3\n", (long)uu-.1,(double)v-.1,(long)uu+.1,(long)v+.1); (void) FormatLocaleFile(stderr, "set arrow from %lf,%lf to %lf,%lf nohead ls 3\n", (long)uu+.1,(double)v-.1,(long)uu-.1,(long)v+.1); } else { (void) FormatLocaleFile(stderr, "set arrow from %lf,%lf to %lf,%lf nohead ls 1\n", (long)uu-.1,(double)v-.1,(long)uu+.1,(long)v+.1); (void) FormatLocaleFile(stderr, "set arrow from %lf,%lf to %lf,%lf nohead ls 1\n", (long)uu+.1,(double)v-.1,(long)uu-.1,(long)v+.1); } uu++; #else } #endif pixels++; indexes++; Q += DQ; DQ += DDQ; } } #if DEBUG_ELLIPSE (void) FormatLocaleFile(stderr, "Hit=%ld; Total=%ld;\n", (long)hit, (long)uw*(v2-v1) ); #endif /* Result sanity check -- this should NOT happen */ if ( hit == 0 || divisor_m <= MagickEpsilon || divisor_c <= MagickEpsilon ) { /* not enough pixels, or bad weighting in resampling, resort to direct interpolation */ #if DEBUG_NO_PIXEL_HIT pixel->opacity = pixel->red = pixel->green = pixel->blue = 0; pixel->red = QuantumRange; /* show pixels for which EWA fails */ #else status=InterpolateMagickPixelPacket(resample_filter->image, resample_filter->view,resample_filter->interpolate,u0,v0,pixel, resample_filter->exception); #endif return status; } /* Finialize results of resampling */ divisor_m = 1.0/divisor_m; if (pixel->matte != MagickFalse) pixel->opacity = (MagickRealType) ClampToQuantum(divisor_m*pixel->opacity); divisor_c = 1.0/divisor_c; pixel->red = (MagickRealType) ClampToQuantum(divisor_c*pixel->red); pixel->green = (MagickRealType) ClampToQuantum(divisor_c*pixel->green); pixel->blue = (MagickRealType) ClampToQuantum(divisor_c*pixel->blue); if (pixel->colorspace == CMYKColorspace) pixel->index = (MagickRealType) ClampToQuantum(divisor_c*pixel->index); return(MagickTrue); } #if EWA && EWA_CLAMP /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % - C l a m p U p A x e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClampUpAxes() function converts the input vectors into a major and % minor axis unit vectors, and their magnitude. This allows us to % ensure that the ellipse generated is never smaller than the unit % circle and thus never too small for use in EWA resampling. % % This purely mathematical 'magic' was provided by Professor Nicolas % Robidoux and his Masters student Chantal Racette. % % Reference: "We Recommend Singular Value Decomposition", David Austin % http://www.ams.org/samplings/feature-column/fcarc-svd % % By generating major and minor axis vectors, we can actually use the % ellipse in its "canonical form", by remapping the dx,dy of the % sampled point into distances along the major and minor axis unit % vectors. % % Reference: http://en.wikipedia.org/wiki/Ellipse#Canonical_form */ static inline void ClampUpAxes(const double dux, const double dvx, const double duy, const double dvy, double *major_mag, double *minor_mag, double *major_unit_x, double *major_unit_y, double *minor_unit_x, double *minor_unit_y) { /* * ClampUpAxes takes an input 2x2 matrix * * [ a b ] = [ dux duy ] * [ c d ] = [ dvx dvy ] * * and computes from it the major and minor axis vectors [major_x, * major_y] and [minor_x,minor_y] of the smallest ellipse containing * both the unit disk and the ellipse which is the image of the unit * disk by the linear transformation * * [ dux duy ] [S] = [s] * [ dvx dvy ] [T] = [t] * * (The vector [S,T] is the difference between a position in output * space and [X,Y]; the vector [s,t] is the difference between a * position in input space and [x,y].) */ /* * Output: * * major_mag is the half-length of the major axis of the "new" * ellipse. * * minor_mag is the half-length of the minor axis of the "new" * ellipse. * * major_unit_x is the x-coordinate of the major axis direction vector * of both the "old" and "new" ellipses. * * major_unit_y is the y-coordinate of the major axis direction vector. * * minor_unit_x is the x-coordinate of the minor axis direction vector. * * minor_unit_y is the y-coordinate of the minor axis direction vector. * * Unit vectors are useful for computing projections, in particular, * to compute the distance between a point in output space and the * center of a unit disk in output space, using the position of the * corresponding point [s,t] in input space. Following the clamping, * the square of this distance is * * ( ( s * major_unit_x + t * major_unit_y ) / major_mag )^2 * + * ( ( s * minor_unit_x + t * minor_unit_y ) / minor_mag )^2 * * If such distances will be computed for many [s,t]'s, it makes * sense to actually compute the reciprocal of major_mag and * minor_mag and multiply them by the above unit lengths. * * Now, if you want to modify the input pair of tangent vectors so * that it defines the modified ellipse, all you have to do is set * * newdux = major_mag * major_unit_x * newdvx = major_mag * major_unit_y * newduy = minor_mag * minor_unit_x = minor_mag * -major_unit_y * newdvy = minor_mag * minor_unit_y = minor_mag * major_unit_x * * and use these tangent vectors as if they were the original ones. * Usually, this is a drastic change in the tangent vectors even if * the singular values are not clamped; for example, the minor axis * vector always points in a direction which is 90 degrees * counterclockwise from the direction of the major axis vector. */ /* * Discussion: * * GOAL: Fix things so that the pullback, in input space, of a disk * of radius r in output space is an ellipse which contains, at * least, a disc of radius r. (Make this hold for any r>0.) * * ESSENCE OF THE METHOD: Compute the product of the first two * factors of an SVD of the linear transformation defining the * ellipse and make sure that both its columns have norm at least 1. * Because rotations and reflexions map disks to themselves, it is * not necessary to compute the third (rightmost) factor of the SVD. * * DETAILS: Find the singular values and (unit) left singular * vectors of Jinv, clampling up the singular values to 1, and * multiply the unit left singular vectors by the new singular * values in order to get the minor and major ellipse axis vectors. * * Image resampling context: * * The Jacobian matrix of the transformation at the output point * under consideration is defined as follows: * * Consider the transformation (x,y) -> (X,Y) from input locations * to output locations. (Anthony Thyssen, elsewhere in resample.c, * uses the notation (u,v) -> (x,y).) * * The Jacobian matrix of the transformation at (x,y) is equal to * * J = [ A, B ] = [ dX/dx, dX/dy ] * [ C, D ] [ dY/dx, dY/dy ] * * that is, the vector [A,C] is the tangent vector corresponding to * input changes in the horizontal direction, and the vector [B,D] * is the tangent vector corresponding to input changes in the * vertical direction. * * In the context of resampling, it is natural to use the inverse * Jacobian matrix Jinv because resampling is generally performed by * pulling pixel locations in the output image back to locations in * the input image. Jinv is * * Jinv = [ a, b ] = [ dx/dX, dx/dY ] * [ c, d ] [ dy/dX, dy/dY ] * * Note: Jinv can be computed from J with the following matrix * formula: * * Jinv = 1/(A*D-B*C) [ D, -B ] * [ -C, A ] * * What we do is modify Jinv so that it generates an ellipse which * is as close as possible to the original but which contains the * unit disk. This can be accomplished as follows: * * Let * * Jinv = U Sigma V^T * * be an SVD decomposition of Jinv. (The SVD is not unique, but the * final ellipse does not depend on the particular SVD.) * * We could clamp up the entries of the diagonal matrix Sigma so * that they are at least 1, and then set * * Jinv = U newSigma V^T. * * However, we do not need to compute V for the following reason: * V^T is an orthogonal matrix (that is, it represents a combination * of rotations and reflexions) so that it maps the unit circle to * itself. For this reason, the exact value of V does not affect the * final ellipse, and we can choose V to be the identity * matrix. This gives * * Jinv = U newSigma. * * In the end, we return the two diagonal entries of newSigma * together with the two columns of U. */ /* * ClampUpAxes was written by Nicolas Robidoux and Chantal Racette * of Laurentian University with insightful suggestions from Anthony * Thyssen and funding from the National Science and Engineering * Research Council of Canada. It is distinguished from its * predecessors by its efficient handling of degenerate cases. * * The idea of clamping up the EWA ellipse's major and minor axes so * that the result contains the reconstruction kernel filter support * is taken from Andreas Gustaffson's Masters thesis "Interactive * Image Warping", Helsinki University of Technology, Faculty of * Information Technology, 59 pages, 1993 (see Section 3.6). * * The use of the SVD to clamp up the singular values of the * Jacobian matrix of the pullback transformation for EWA resampling * is taken from the astrophysicist Craig DeForest. It is * implemented in his PDL::Transform code (PDL = Perl Data * Language). */ const double a = dux; const double b = duy; const double c = dvx; const double d = dvy; /* * n is the matrix Jinv * transpose(Jinv). Eigenvalues of n are the * squares of the singular values of Jinv. */ const double aa = a*a; const double bb = b*b; const double cc = c*c; const double dd = d*d; /* * Eigenvectors of n are left singular vectors of Jinv. */ const double n11 = aa+bb; const double n12 = a*c+b*d; const double n21 = n12; const double n22 = cc+dd; const double det = a*d-b*c; const double twice_det = det+det; const double frobenius_squared = n11+n22; const double discriminant = (frobenius_squared+twice_det)*(frobenius_squared-twice_det); /* * In exact arithmetic, discriminant can't be negative. In floating * point, it can, because of the bad conditioning of SVD * decompositions done through the associated normal matrix. */ const double sqrt_discriminant = sqrt(discriminant > 0.0 ? discriminant : 0.0); /* * s1 is the largest singular value of the inverse Jacobian * matrix. In other words, its reciprocal is the smallest singular * value of the Jacobian matrix itself. * If s1 = 0, both singular values are 0, and any orthogonal pair of * left and right factors produces a singular decomposition of Jinv. */ /* * Initially, we only compute the squares of the singular values. */ const double s1s1 = 0.5*(frobenius_squared+sqrt_discriminant); /* * s2 the smallest singular value of the inverse Jacobian * matrix. Its reciprocal is the largest singular value of the * Jacobian matrix itself. */ const double s2s2 = 0.5*(frobenius_squared-sqrt_discriminant); const double s1s1minusn11 = s1s1-n11; const double s1s1minusn22 = s1s1-n22; /* * u1, the first column of the U factor of a singular decomposition * of Jinv, is a (non-normalized) left singular vector corresponding * to s1. It has entries u11 and u21. We compute u1 from the fact * that it is an eigenvector of n corresponding to the eigenvalue * s1^2. */ const double s1s1minusn11_squared = s1s1minusn11*s1s1minusn11; const double s1s1minusn22_squared = s1s1minusn22*s1s1minusn22; /* * The following selects the largest row of n-s1^2 I as the one * which is used to find the eigenvector. If both s1^2-n11 and * s1^2-n22 are zero, n-s1^2 I is the zero matrix. In that case, * any vector is an eigenvector; in addition, norm below is equal to * zero, and, in exact arithmetic, this is the only case in which * norm = 0. So, setting u1 to the simple but arbitrary vector [1,0] * if norm = 0 safely takes care of all cases. */ const double temp_u11 = ( (s1s1minusn11_squared>=s1s1minusn22_squared) ? n12 : s1s1minusn22 ); const double temp_u21 = ( (s1s1minusn11_squared>=s1s1minusn22_squared) ? s1s1minusn11 : n21 ); const double norm = sqrt(temp_u11*temp_u11+temp_u21*temp_u21); /* * Finalize the entries of first left singular vector (associated * with the largest singular value). */ const double u11 = ( (norm>0.0) ? temp_u11/norm : 1.0 ); const double u21 = ( (norm>0.0) ? temp_u21/norm : 0.0 ); /* * Clamp the singular values up to 1. */ *major_mag = ( (s1s1<=1.0) ? 1.0 : sqrt(s1s1) ); *minor_mag = ( (s2s2<=1.0) ? 1.0 : sqrt(s2s2) ); /* * Return the unit major and minor axis direction vectors. */ *major_unit_x = u11; *major_unit_y = u21; *minor_unit_x = -u21; *minor_unit_y = u11; } #endif /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S c a l e R e s a m p l e F i l t e r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ScaleResampleFilter() does all the calculations needed to resample an image % at a specific scale, defined by two scaling vectors. This not using % a orthogonal scaling, but two distorted scaling vectors, to allow the % generation of a angled ellipse. % % As only two deritive scaling vectors are used the center of the ellipse % must be the center of the lookup. That is any curvature that the % distortion may produce is discounted. % % The input vectors are produced by either finding the derivitives of the % distortion function, or the partial derivitives from a distortion mapping. % They do not need to be the orthogonal dx,dy scaling vectors, but can be % calculated from other derivatives. For example you could use dr,da/r % polar coordinate vector scaling vectors % % If u,v = DistortEquation(x,y) OR u = Fu(x,y); v = Fv(x,y) % Then the scaling vectors are determined from the deritives... % du/dx, dv/dx and du/dy, dv/dy % If the resulting scaling vectors is othogonally aligned then... % dv/dx = 0 and du/dy = 0 % Producing an othogonally alligned ellipse in source space for the area to % be resampled. % % Note that scaling vectors are different to argument order. Argument order % is the general order the deritives are extracted from the distortion % equations, and not the scaling vectors. As such the middle two vaules % may be swapped from what you expect. Caution is advised. % % WARNING: It is assumed that any SetResampleFilter() method call will % always be performed before the ScaleResampleFilter() method, so that the % size of the ellipse will match the support for the resampling filter being % used. % % The format of the ScaleResampleFilter method is: % % void ScaleResampleFilter(const ResampleFilter *resample_filter, % const double dux,const double duy,const double dvx,const double dvy) % % A description of each parameter follows: % % o resample_filter: the resampling resample_filterrmation defining the % image being resampled % % o dux,duy,dvx,dvy: % The deritives or scaling vectors defining the EWA ellipse. % NOTE: watch the order, which is based on the order deritives % are usally determined from distortion equations (see above). % The middle two values may need to be swapped if you are thinking % in terms of scaling vectors. % */ MagickExport void ScaleResampleFilter(ResampleFilter *resample_filter, const double dux,const double duy,const double dvx,const double dvy) { double A,B,C,F; assert(resample_filter != (ResampleFilter *) NULL); assert(resample_filter->signature == MagickCoreSignature); resample_filter->limit_reached = MagickFalse; /* A 'point' filter forces use of interpolation instead of area sampling */ if ( resample_filter->filter == PointFilter ) return; /* EWA turned off - nothing to do */ #if DEBUG_ELLIPSE (void) FormatLocaleFile(stderr, "# -----\n" ); (void) FormatLocaleFile(stderr, "dux=%lf; dvx=%lf; duy=%lf; dvy=%lf;\n", dux, dvx, duy, dvy); #endif /* Find Ellipse Coefficents such that A*u^2 + B*u*v + C*v^2 = F With u,v relative to point around which we are resampling. And the given scaling dx,dy vectors in u,v space du/dx,dv/dx and du/dy,dv/dy */ #if EWA /* Direct conversion of derivatives into elliptical coefficients However when magnifying images, the scaling vectors will be small resulting in a ellipse that is too small to sample properly. As such we need to clamp the major/minor axis to a minumum of 1.0 to prevent it getting too small. */ #if EWA_CLAMP { double major_mag, minor_mag, major_x, major_y, minor_x, minor_y; ClampUpAxes(dux,dvx,duy,dvy, &major_mag, &minor_mag, &major_x, &major_y, &minor_x, &minor_y); major_x *= major_mag; major_y *= major_mag; minor_x *= minor_mag; minor_y *= minor_mag; #if DEBUG_ELLIPSE (void) FormatLocaleFile(stderr, "major_x=%lf; major_y=%lf; minor_x=%lf; minor_y=%lf;\n", major_x, major_y, minor_x, minor_y); #endif A = major_y*major_y+minor_y*minor_y; B = -2.0*(major_x*major_y+minor_x*minor_y); C = major_x*major_x+minor_x*minor_x; F = major_mag*minor_mag; F *= F; /* square it */ } #else /* raw unclamped EWA */ A = dvx*dvx+dvy*dvy; B = -2.0*(dux*dvx+duy*dvy); C = dux*dux+duy*duy; F = dux*dvy-duy*dvx; F *= F; /* square it */ #endif /* EWA_CLAMP */ #else /* HQ_EWA */ /* This Paul Heckbert's "Higher Quality EWA" formula, from page 60 in his thesis, which adds a unit circle to the elliptical area so as to do both Reconstruction and Prefiltering of the pixels in the resampling. It also means it is always likely to have at least 4 pixels within the area of the ellipse, for weighted averaging. No scaling will result with F == 4.0 and a circle of radius 2.0, and F smaller than this means magnification is being used. NOTE: This method produces a very blury result at near unity scale while producing perfect results for strong minitification and magnifications. However filter support is fixed to 2.0 (no good for Windowed Sinc filters) */ A = dvx*dvx+dvy*dvy+1; B = -2.0*(dux*dvx+duy*dvy); C = dux*dux+duy*duy+1; F = A*C - B*B/4; #endif #if DEBUG_ELLIPSE (void) FormatLocaleFile(stderr, "A=%lf; B=%lf; C=%lf; F=%lf\n", A,B,C,F); /* Figure out the various information directly about the ellipse. This information currently not needed at this time, but may be needed later for better limit determination. It is also good to have as a record for future debugging */ { double alpha, beta, gamma, Major, Minor; double Eccentricity, Ellipse_Area, Ellipse_Angle; alpha = A+C; beta = A-C; gamma = sqrt(beta*beta + B*B ); if ( alpha - gamma <= MagickEpsilon ) Major= MagickMaximumValue; else Major= sqrt(2*F/(alpha - gamma)); Minor = sqrt(2*F/(alpha + gamma)); (void) FormatLocaleFile(stderr, "# Major=%lf; Minor=%lf\n", Major, Minor ); /* other information about ellipse include... */ Eccentricity = Major/Minor; Ellipse_Area = MagickPI*Major*Minor; Ellipse_Angle = atan2(B, A-C); (void) FormatLocaleFile(stderr, "# Angle=%lf Area=%lf\n", (double) RadiansToDegrees(Ellipse_Angle), Ellipse_Area); } #endif /* If one or both of the scaling vectors is impossibly large (producing a very large raw F value), we may as well not bother doing any form of resampling since resampled area is very large. In this case some alternative means of pixel sampling, such as the average of the whole image is needed to get a reasonable result. Calculate only as needed. */ if ( (4*A*C - B*B) > MagickMaximumValue ) { resample_filter->limit_reached = MagickTrue; return; } /* Scale ellipse to match the filters support (that is, multiply F by the square of the support) Simplier to just multiply it by the support twice! */ F *= resample_filter->support; F *= resample_filter->support; /* Orthogonal bounds of the ellipse */ resample_filter->Ulimit = sqrt(C*F/(A*C-0.25*B*B)); resample_filter->Vlimit = sqrt(A*F/(A*C-0.25*B*B)); /* Horizontally aligned parallelogram fitted to Ellipse */ resample_filter->Uwidth = sqrt(F/A); /* Half of the parallelogram width */ resample_filter->slope = -B/(2.0*A); /* Reciprocal slope of the parallelogram */ #if DEBUG_ELLIPSE (void) FormatLocaleFile(stderr, "Ulimit=%lf; Vlimit=%lf; UWidth=%lf; Slope=%lf;\n", resample_filter->Ulimit, resample_filter->Vlimit, resample_filter->Uwidth, resample_filter->slope ); #endif /* Check the absolute area of the parallelogram involved. * This limit needs more work, as it is too slow for larger images * with tiled views of the horizon. */ if ( (resample_filter->Uwidth * resample_filter->Vlimit) > (4.0*resample_filter->image_area)) { resample_filter->limit_reached = MagickTrue; return; } /* Scale ellipse formula to directly index the Filter Lookup Table */ { register double scale; #if FILTER_LUT /* scale so that F = WLUT_WIDTH; -- hardcoded */ scale = (double)WLUT_WIDTH/F; #else /* scale so that F = resample_filter->F (support^2) */ scale = resample_filter->F/F; #endif resample_filter->A = A*scale; resample_filter->B = B*scale; resample_filter->C = C*scale; } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t R e s a m p l e F i l t e r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetResampleFilter() set the resampling filter lookup table based on a % specific filter. Note that the filter is used as a radial filter not as a % two pass othogonally aligned resampling filter. % % The format of the SetResampleFilter method is: % % void SetResampleFilter(ResampleFilter *resample_filter, % const FilterTypes filter,const double blur) % % A description of each parameter follows: % % o resample_filter: resampling resample_filterrmation structure % % o filter: the resize filter for elliptical weighting LUT % % o blur: filter blur factor (radial scaling) for elliptical weighting LUT % */ MagickExport void SetResampleFilter(ResampleFilter *resample_filter, const FilterTypes filter,const double blur) { ResizeFilter *resize_filter; assert(resample_filter != (ResampleFilter *) NULL); assert(resample_filter->signature == MagickCoreSignature); resample_filter->do_interpolate = MagickFalse; resample_filter->filter = filter; /* Default cylindrical filter is a Cubic Keys filter */ if ( filter == UndefinedFilter ) resample_filter->filter = RobidouxFilter; if ( resample_filter->filter == PointFilter ) { resample_filter->do_interpolate = MagickTrue; return; /* EWA turned off - nothing more to do */ } resize_filter = AcquireResizeFilter(resample_filter->image, resample_filter->filter,blur,MagickTrue,resample_filter->exception); if (resize_filter == (ResizeFilter *) NULL) { (void) ThrowMagickException(resample_filter->exception,GetMagickModule(), ModuleError, "UnableToSetFilteringValue", "Fall back to Interpolated 'Point' filter"); resample_filter->filter = PointFilter; resample_filter->do_interpolate = MagickTrue; return; /* EWA turned off - nothing more to do */ } /* Get the practical working support for the filter, * after any API call blur factors have been accoded for. */ #if EWA resample_filter->support = GetResizeFilterSupport(resize_filter); #else resample_filter->support = 2.0; /* fixed support size for HQ-EWA */ #endif #if FILTER_LUT /* Fill the LUT with the weights from the selected filter function */ { register int Q; double r_scale; /* Scale radius so the filter LUT covers the full support range */ r_scale = resample_filter->support*sqrt(1.0/(double)WLUT_WIDTH); for(Q=0; Q<WLUT_WIDTH; Q++) resample_filter->filter_lut[Q] = (double) GetResizeFilterWeight(resize_filter,sqrt((double)Q)*r_scale); /* finished with the resize filter */ resize_filter = DestroyResizeFilter(resize_filter); } #else /* save the filter and the scaled ellipse bounds needed for filter */ resample_filter->filter_def = resize_filter; resample_filter->F = resample_filter->support*resample_filter->support; #endif /* Adjust the scaling of the default unit circle This assumes that any real scaling changes will always take place AFTER the filter method has been initialized. */ ScaleResampleFilter(resample_filter, 1.0, 0.0, 0.0, 1.0); #if 0 /* This is old code kept as a reference only. Basically it generates a Gaussian bell curve, with sigma = 0.5 if the support is 2.0 Create Normal Gaussian 2D Filter Weighted Lookup Table. A normal EWA guassual lookup would use exp(Q*ALPHA) where Q = distance squared from 0.0 (center) to 1.0 (edge) and ALPHA = -4.0*ln(2.0) ==> -2.77258872223978123767 The table is of length 1024, and equates to support radius of 2.0 thus needs to be scaled by ALPHA*4/1024 and any blur factor squared The it comes from reference code provided by Fred Weinhaus. */ r_scale = -2.77258872223978123767/(WLUT_WIDTH*blur*blur); for(Q=0; Q<WLUT_WIDTH; Q++) resample_filter->filter_lut[Q] = exp((double)Q*r_scale); resample_filter->support = WLUT_WIDTH; #endif #if FILTER_LUT #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp single #endif { if (IsMagickTrue(GetImageArtifact(resample_filter->image, "resample:verbose")) ) { register int Q; double r_scale; /* Debug output of the filter weighting LUT Gnuplot the LUT data, the x scale index has been adjusted plot [0:2][-.2:1] "lut.dat" with lines The filter values should be normalized for comparision */ printf("#\n"); printf("# Resampling Filter LUT (%d values) for '%s' filter\n", WLUT_WIDTH, CommandOptionToMnemonic(MagickFilterOptions, resample_filter->filter) ); printf("#\n"); printf("# Note: values in table are using a squared radius lookup.\n"); printf("# As such its distribution is not uniform.\n"); printf("#\n"); printf("# The X value is the support distance for the Y weight\n"); printf("# so you can use gnuplot to plot this cylindrical filter\n"); printf("# plot [0:2][-.2:1] \"lut.dat\" with lines\n"); printf("#\n"); /* Scale radius so the filter LUT covers the full support range */ r_scale = resample_filter->support*sqrt(1.0/(double)WLUT_WIDTH); for(Q=0; Q<WLUT_WIDTH; Q++) printf("%8.*g %.*g\n", GetMagickPrecision(),sqrt((double)Q)*r_scale, GetMagickPrecision(),resample_filter->filter_lut[Q] ); printf("\n\n"); /* generate a 'break' in gnuplot if multiple outputs */ } /* Output the above once only for each image, and each setting (void) DeleteImageArtifact(resample_filter->image,"resample:verbose"); */ } #endif /* FILTER_LUT */ return; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t R e s a m p l e F i l t e r I n t e r p o l a t e M e t h o d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetResampleFilterInterpolateMethod() sets the resample filter interpolation % method. % % The format of the SetResampleFilterInterpolateMethod method is: % % MagickBooleanType SetResampleFilterInterpolateMethod( % ResampleFilter *resample_filter,const InterpolateMethod method) % % A description of each parameter follows: % % o resample_filter: the resample filter. % % o method: the interpolation method. % */ MagickExport MagickBooleanType SetResampleFilterInterpolateMethod( ResampleFilter *resample_filter,const InterpolatePixelMethod method) { assert(resample_filter != (ResampleFilter *) NULL); assert(resample_filter->signature == MagickCoreSignature); assert(resample_filter->image != (Image *) NULL); if (resample_filter->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", resample_filter->image->filename); resample_filter->interpolate=method; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t R e s a m p l e F i l t e r V i r t u a l P i x e l M e t h o d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetResampleFilterVirtualPixelMethod() changes the virtual pixel method % associated with the specified resample filter. % % The format of the SetResampleFilterVirtualPixelMethod method is: % % MagickBooleanType SetResampleFilterVirtualPixelMethod( % ResampleFilter *resample_filter,const VirtualPixelMethod method) % % A description of each parameter follows: % % o resample_filter: the resample filter. % % o method: the virtual pixel method. % */ MagickExport MagickBooleanType SetResampleFilterVirtualPixelMethod( ResampleFilter *resample_filter,const VirtualPixelMethod method) { assert(resample_filter != (ResampleFilter *) NULL); assert(resample_filter->signature == MagickCoreSignature); assert(resample_filter->image != (Image *) NULL); if (resample_filter->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", resample_filter->image->filename); resample_filter->virtual_pixel=method; if (method != UndefinedVirtualPixelMethod) (void) SetCacheViewVirtualPixelMethod(resample_filter->view,method); return(MagickTrue); }
3d7pt.c
/* * Order-1, 3D 7 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 16; tile_size[1] = 16; tile_size[2] = 8; tile_size[3] = 2048; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; const double alpha = 0.0876; const double beta = 0.0765; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt-1; t++) { for (i = 1; i < Nz-1; i++) { for (j = 1; j < Ny-1; j++) { for (k = 1; k < Nx-1; k++) { A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k]) + beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] + A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]); } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays (Causing performance degradation /* for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); */ return 0; }
MinStep.h
#ifndef MIN_STEP_H #define MIN_STEP_H #define MIN_STEP 0x60 #include "step/TraversalStep.h" #include "traversal/Traverser.h" #include <functional> #include <omp.h> class MinStep : public TraversalStep { private: std::function<int(Traverser*, Traverser*)> compare; public: MinStep(std::function<int(Traverser*, Traverser*)> c) : TraversalStep(true, MAP, MIN_STEP) { compare = c; } Traverser* min(Traverser* t1, Traverser* t2) { int cmp = compare(t1, t2); return cmp < 0 ? t1 : t2; } // TODO this is naive; there is no guarantee there will be N/2 threads. virtual void apply(GraphTraversal* traversal, TraverserSet& traversers) { Traverser* min_value; size_t N = traversers.size(); if(0==1) { std::vector<Traverser*> values(traversers.begin(), traversers.end()); std::vector<Traverser*> values2(values); omp_set_dynamic(0); omp_set_num_threads(N/2 + 1); size_t T; #pragma omp parallel { T = omp_get_num_threads(); } //std::cout << N << " values." << std::endl; //std::cout << T << " threads." << std::endl; if(T < N/2 + 1) { throw std::runtime_error("Not enough threads!"); } int thread; size_t k, it, i, j; #pragma omp parallel private(thread, i, j, k, it) { it = 0; thread = omp_get_thread_num(); for(k = N; k <= 1; k /= 2) { if(thread < k / 2.0) { i = 2*thread; j = i + 1; if(j >= k) j = i; if(it % 2 == 0) { values[thread] = min(values2[i], values2[j]); } else { values2[thread] = min(values[i], values[j]); } } ++it; #pragma omp barrier } } min_value = it % 2 == 0 ? values2[0] : values[0]; /* for(Traverser* trv : traversers) std::cout << boost::any_cast<uint64_t>(trv->get()) << ", "; std::cout << "\nmin = " << boost::any_cast<uint64_t>(min->get()) << std::endl; */ } else { min_value = traversers.front(); for(auto it = traversers.begin(); it != traversers.end(); ++it) { min_value = this->min(*it, min_value); } } traversers.clear(); traversers.push_back(min_value); } }; #endif
CNN_p.c
#include <Python.h> #define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION #include "numpy/arrayobject.h" #include <stdio.h> // #include <stdlib.h> // #include <stdarg.h> // #include <string.h> // #include <ctype.h> // #include <math.h> #ifdef _OPENMP #include <omp.h> #endif static int *matrixtimes(double *a,double *b,double *c,int n,int m,int p){ int i,j,k,tempid; for (i=0;i<n;i++){ for (j=0;j<p;j++){ tempid=i*p+j; c[tempid]=b[m*p+j]; for (k=0;k<m;k++){ c[tempid]+=a[i*m+k]*b[k*p+j]; } } } return 0; } static PyObject *fit_ANN_BP(PyObject *self, PyObject *args, PyObject *kwargs) { PyArrayObject *shapedata,*inputdata,*outputdata,*transdata,**transp; int i,j,k,p,q,times=0,transl=0; static char *kwlist[] = {"shape", "input", "output", "trans", "times", NULL}; if (!PyArg_ParseTupleAndKeywords(args, kwargs, "OOOO|i", kwlist, &shapedata, &inputdata, &outputdata, &transdata, &times)) return Py_BuildValue("Os", Py_None,"Couldn't parse variable from C function."); shapedata = PyArray_GETCONTIGUOUS(shapedata); inputdata = PyArray_GETCONTIGUOUS(inputdata); outputdata = PyArray_GETCONTIGUOUS(outputdata); transdata = PyArray_GETCONTIGUOUS(transdata); int *shape = (int *) PyArray_DATA(shapedata); double *input =(double *) PyArray_DATA(inputdata); double *output =(double *) PyArray_DATA(outputdata); transl=PyArray_DIM(transdata,0); double **trans = (double **) malloc(transl*sizeof(double *)); if (!trans) return PyErr_NoMemory(); transp=(PyArrayObject **)malloc(transl*(sizeof(PyArrayObject *))); if (!transp){ free(trans); return PyErr_NoMemory(); } if ((PyArray_NDIM(inputdata)!=2)||((PyArray_NDIM(outputdata)!=2))) return Py_BuildValue("Os", Py_None,"Input or Output data are not 2-D data."); if (PyArray_DIMS(inputdata)[0]!=PyArray_DIMS(outputdata)[0]) return Py_BuildValue("Os", Py_None,"Input and Output dim[0] is not the same."); if (PyArray_DIMS(inputdata)[1]!=shape[0]) return Py_BuildValue("Os", Py_None,"Input doesn't fit webshape."); if (PyArray_DIMS(outputdata)[1]!=shape[PyArray_DIMS(shapedata)[0]-1]) return Py_BuildValue("Os", Py_None,"Output doesn't fit webshape."); for (i=0;i<transl;i++){ transp[i]=((PyArrayObject **) PyArray_DATA(transdata))[i]; transp[i]=PyArray_GETCONTIGUOUS(transp[i]); trans[i] = (double *)PyArray_DATA(transp[i]); } int corenumber=omp_get_max_threads(),mythreadid=-1; printf("* Find %d threads avaiable.\n", corenumber); double ***temp=NULL; temp=(double ***)malloc(corenumber*sizeof(double **)); if (!temp){ free(trans); free(transp); return PyErr_NoMemory(); } for (i=0;i<corenumber;i++){ temp[i]=(double **)malloc(transl*sizeof(double *)); if (!temp[i]){ for (j=0;j<i;j++){ for (k=0;k<transl;k++) free(temp[j][k]); free(temp[j]); } free(temp); free(trans); free(transp); return PyErr_NoMemory(); } for (j=0;j<transl;j++){ temp[i][j] = (double *)malloc((shape[j+1])*sizeof(double)); if (!temp[i][j]){ for (k=0;k<j;k++) free(temp[i][j]); free(temp[i]); for (p=0;p<i;p++){ for (q=0;q<transl;q++) free(temp[p][q]); free(temp[p]); } free(temp); free(trans); free(transp); return PyErr_NoMemory(); } } } // #pragma omp parallel for firstprivate(times,i,transl,input,inputdata,trans,shape,temp,j,mythreadid) schedule(dynamic,1) for (k=0;k<times;k++){ mythreadid=omp_get_thread_num(); double **nowarray=temp[mythreadid]; for (i=0;i<transl;i++){ if (i==0) matrixtimes(&input[(k%(PyArray_DIM(inputdata,0)))*PyArray_DIM(inputdata,1)],trans[0],nowarray[0],1,shape[0],shape[1]); else{ matrixtimes(nowarray[i-1],trans[i],nowarray[i],1,shape[i],shape[i+1]); } for (j=0;j<shape[i+1];j++){ nowarray[i][j]=1./(1.+exp(nowarray[i][j])); } } // #pragma omp barrier // printf("%d\n", mythreadid); // if (mythreadid==1){ // } } for (i=0;i<shape[transl];i++){ output[i]=temp[0][transl-1][i]; } free(trans); free(transp); for (i=0;i<corenumber;i++){ for (j=0;j<transl;j++) free(temp[i][j]); free(temp[i]); } free(temp); return Py_BuildValue("O", Py_None); } static PyMethodDef CNN_p_methods[] = { {"fit_ANN_BP", (PyCFunction)fit_ANN_BP, METH_VARARGS | METH_KEYWORDS, "Perform BP calculation for ANN\n"}, {NULL, NULL, 0, NULL} }; #if PY_MAJOR_VERSION >= 3 static struct PyModuleDef CNN_pmodule = { PyModuleDef_HEAD_INIT, "CNN_p", "Neural Networks tools with parallel.", -1, CNN_p_methods, }; PyMODINIT_FUNC PyInit_CNN_p(void) { import_array(); return PyModule_Create(&CNN_pmodule); } #else PyMODINIT_FUNC initCNN_p(void) { Py_InitModule3("CNN_p", CNN_p_methods, "Neural Networks tools with parallel."); import_array(); } #endif
adjointadvection_avx.h
//***************************************************************************** // Title : src/equation_avx/adjointadvection_avx.h // Author : Tanabe Yuta // Date : 2021/02/13 // Copyright : (C)2021 TanabeYuta //***************************************************************************** #pragma once #include <immintrin.h> // compile option for g++(MinGW) : -mavx namespace PANSLBM2 { namespace AAD { template<class T, template<class>class Q>void Macro(T &, T &, T &, const T *, const T *, int); // Function of updating macroscopic values of AAD for 2D template<class T, template<class>class Q>void Macro(T &, T &, T &, T &, const T *, const T *, int); // Function of updating macroscopic values of AAD for 3D template<class T, template<class>class Q>void Equilibrium(T *, T, T, T, T, T); // Function of getting equilibrium of AAD for 2D template<class T, template<class>class Q>void Equilibrium(T *, T, T, T, T, T, T, T); // Function of getting equilibrium of AAD for 3D template<class T, template<class>class P>void ExternalForceBrinkman(T, T, T, T, T, T, T, T, T, T *, T *, T, int); // Function of applying external force with Brinkman model and advection of AAD for 2D template<class T, template<class>class P>void ExternalForceBrinkman(T, T, T, T, T, T, T, T, T, T, T, T, T *, T *, T, int); // Function of applying external force with Brinkman model and advection of AAD for 3D template<class T, template<class>class Q>void ExternalForceHeatExchange(T, T *, T *, T, int); // Function of applying external force with heat exchange of AAD for 2D/3D template<class T, template<class>class Q>void ExternalForceNaturalConvection(T, T, T, T, T *, T *, int); // Function of applying external force with natural convection of AAD for 2D template<class T, template<class>class Q>void ExternalForceNaturalConvection(T, T, T, T, T, T, T *, T *, int); // Function of applying external force with natural convection of AAD for 3D // Function of updating macroscopic values of AAD for 2D template<class Q> void Macro(__m256d &__item, __m256d &__iqx, __m256d &__iqy, const __m256d *__g) { __item = _mm256_mul_pd(Q::__ei[0], __g[0]); __iqx = _mm256_setzero_pd(); __iqy = _mm256_setzero_pd(); for (int c = 1; c < Q::nc; ++c) { __m256d __gei = _mm256_mul_pd(Q::__ei[c], __g[c]); __item = _mm256_add_pd(__item, __gei); __iqx = _mm256_add_pd(__iqx, _mm256_mul_pd(__gei, Q::__cx[c])); __iqy = _mm256_add_pd(__iqy, _mm256_mul_pd(__gei, Q::__cy[c])); } } // Function of updating macroscopic values of AAD for 3D template<class Q> void Macro(__m256d &__item, __m256d &__iqx, __m256d &__iqy, __m256d &__iqz, const __m256d *__g) { __item = _mm256_mul_pd(Q::__ei[0], __g[0]); __iqx = _mm256_setzero_pd(); __iqy = _mm256_setzero_pd(); __iqz = _mm256_setzero_pd(); for (int c = 1; c < Q::nc; ++c) { __m256d __gei = _mm256_mul_pd(Q::__ei[c], __g[c]); __item = _mm256_add_pd(__item, __gei); __iqx = _mm256_add_pd(__iqx, _mm256_mul_pd(__gei, Q::__cx[c])); __iqy = _mm256_add_pd(__iqy, _mm256_mul_pd(__gei, Q::__cy[c])); __iqz = _mm256_add_pd(__iqz, _mm256_mul_pd(__gei, Q::__cz[c])); } } // Function of getting equilibrium of AAD for 2D template<class Q> void Equilibrium(__m256d *__geq, const __m256d &__item, const __m256d &__iqx, const __m256d &__iqy, const __m256d &__ux, const __m256d &__uy) { __m256d __coef = _mm256_add_pd(__item, _mm256_mul_pd(_mm256_set1_pd(3.0), _mm256_add_pd(_mm256_mul_pd(__iqx, __ux), _mm256_mul_pd(__iqy, __uy)))); for (int c = 0; c < Q::nc; ++c) { __geq[c] = __coef; } } // Function of getting equilibrium of AAD for 3D template<class Q> void Equilibrium(__m256d *__geq, const __m256d &__item, const __m256d &__iqx, const __m256d &__iqy, const __m256d &__iqz, const __m256d &__ux, const __m256d &__uy, const __m256d &__uz) { __m256d __coef = _mm256_add_pd(__item, _mm256_mul_pd(_mm256_set1_pd(3.0), _mm256_add_pd(_mm256_add_pd(_mm256_mul_pd(__iqx, __ux), _mm256_mul_pd(__iqy, __uy)), _mm256_mul_pd(__iqz, __uz)))); for (int c = 0; c < Q::nc; ++c) { __geq[c] = __coef; } } // Function of applying external force with Brinkman model and advection of AAD for 2D template<class P> void ExternalForceBrinkman(const __m256d &__rho, const __m256d &__ux, const __m256d &__uy, const __m256d &__imx, const __m256d &__imy, const __m256d &__tem, const __m256d &__iqx, const __m256d &__iqy, const __m256d &__omegag, __m256d *__f, const __m256d &__alpha) { __m256d __coef = _mm256_div_pd(_mm256_set1_pd(3.0), _mm256_add_pd(__rho, __alpha)); __m256d __coefx = _mm256_sub_pd(_mm256_mul_pd(_mm256_mul_pd(__tem, __iqx), __omegag), _mm256_mul_pd(__alpha, __imx)); __m256d __coefy = _mm256_sub_pd(_mm256_mul_pd(_mm256_mul_pd(__tem, __iqy), __omegag), _mm256_mul_pd(__alpha, __imy)); __f[0] = _mm256_sub_pd(__f[0], _mm256_mul_pd(__coef, _mm256_add_pd(_mm256_mul_pd(__coefx, __ux), _mm256_mul_pd(__coefy, __uy)))); for (int c = 1; c < P::nc; ++c) { __f[c] = _mm256_add_pd(__f[c], _mm256_mul_pd(__coef, _mm256_add_pd(_mm256_mul_pd(__coefx, _mm256_sub_pd(P::__cx[c], __ux)), _mm256_mul_pd(__coefy, _mm256_sub_pd(P::__cy[c], __uy))))); } } // Function of applying external force with Brinkman model and advection of AAD for 3D template<class P> void ExternalForceBrinkman(const __m256d &__rho, const __m256d &__ux, const __m256d &__uy, const __m256d &__uz, const __m256d &__imx, const __m256d &__imy, const __m256d &__imz, const __m256d &__tem, const __m256d &__iqx, const __m256d &__iqy, const __m256d &__iqz, const __m256d &__omegag, __m256d *__f, const __m256d &__alpha) { __m256d __coef = _mm256_div_pd(_mm256_set1_pd(3.0), _mm256_add_pd(__rho, __alpha)); __m256d __coefx = _mm256_sub_pd(_mm256_mul_pd(_mm256_mul_pd(__tem, __iqx), __omegag), _mm256_mul_pd(__alpha, __imx)); __m256d __coefy = _mm256_sub_pd(_mm256_mul_pd(_mm256_mul_pd(__tem, __iqy), __omegag), _mm256_mul_pd(__alpha, __imy)); __m256d __coefz = _mm256_sub_pd(_mm256_mul_pd(_mm256_mul_pd(__tem, __iqz), __omegag), _mm256_mul_pd(__alpha, __imz)); __f[0] = _mm256_sub_pd(__f[0], _mm256_mul_pd(__coef, _mm256_add_pd(_mm256_add_pd(_mm256_mul_pd(__coefx, __ux), _mm256_mul_pd(__coefy, __uy)), _mm256_mul_pd(__coefz, __uz)))); for (int c = 1; c < P::nc; ++c) { __f[c] = _mm256_add_pd(__f[c], _mm256_mul_pd(__coef, _mm256_add_pd( _mm256_add_pd( _mm256_mul_pd(__coefx, _mm256_sub_pd(P::__cx[c], __ux)), _mm256_mul_pd(__coefy, _mm256_sub_pd(P::__cy[c], __uy)) ), _mm256_mul_pd(__coefz, _mm256_sub_pd(P::__cz[c], __uz)) ) )); } } // Function of applying external force with heat exchange of AAD for 2D/3D template<class Q> void ExternalForceHeatExchange(const __m256d &__item, __m256d *__g, const __m256d &__beta) { __m256d __coef = _mm256_mul_pd(__beta, _mm256_div_pd(_mm256_add_pd(_mm256_set1_pd(1.0), __item), _mm256_add_pd(_mm256_set1_pd(1.0), __beta))); for (int c = 0; c < Q::nc; ++c) { __g[c] = _mm256_sub_pd(__g[c], __coef); } } // Function of applying external force with natural convection of AAD for 2D template<class Q> void ExternalForceNaturalConvection(const __m256d &__imx, const __m256d &__imy, const __m256d &__gx, const __m256d &__gy, __m256d *__g) { __m256d __coef = _mm256_mul_pd(_mm256_set1_pd(3.0), _mm256_add_pd(_mm256_mul_pd(__imx, __gx), _mm256_mul_pd(__imy, __gy))); for (int c = 0; c < Q::nc; ++c) { __g[c] = _mm256_add_pd(__g[c], __coef); } } // Function of applying external force with natural convection of AAD for 3D template<class Q> void ExternalForceNaturalConvection(const __m256d &__imx, const __m256d &__imy, const __m256d &__imz, const __m256d &__gx, const __m256d &__gy, const __m256d &__gz, __m256d *__g) { __m256d __coef = _mm256_mul_pd(_mm256_set1_pd(3.0), _mm256_add_pd(_mm256_add_pd(_mm256_mul_pd(__imx, __gx), _mm256_mul_pd(__imy, __gy)), _mm256_mul_pd(__imz, __gz))); for (int c = 0; c < Q::nc; ++c) { __g[c] = _mm256_add_pd(__g[c], __coef); } } // Function of Update macro, External force(Brinkman, Heat exchange) and Collide of AAD for 2D template<template<class>class P, template<class>class Q> void MacroBrinkmanCollideHeatExchange( P<double>& _p, const double *_rho, const double *_ux, const double *_uy, double *_ip, double *_iux, double *_iuy, double *_imx, double *_imy, const double *_alpha, double _viscosity, Q<double>& _q, const double *_tem, double *_item, double *_iqx, double *_iqy, const double *_beta, double _diffusivity, bool _issave = false ) { const int ne = _p.nxyz/P<double>::packsize; double omegaf = 1.0/(3.0*_viscosity + 0.5), iomegaf = 1.0 - omegaf, feq[P<double>::nc]; __m256d __omegaf = _mm256_set1_pd(omegaf), __iomegaf = _mm256_set1_pd(iomegaf), __feq[P<double>::nc]; double omegag = 1.0/(3.0*_diffusivity + 0.5), iomegag = 1.0 - omegag, geq[Q<double>::nc]; __m256d __omegag = _mm256_set1_pd(omegag), __iomegag = _mm256_set1_pd(iomegag), __geq[Q<double>::nc]; #pragma omp parallel for private(__feq, __geq) for (int pidx = 0; pidx < ne; ++pidx) { int idx = pidx*P<double>::packsize; // Pack f0, f, g0 and g __m256d __f[P<double>::nc], __g[Q<double>::nc]; _p.LoadF(idx, __f); _q.LoadF(idx, __g); // Update macro __m256d __ip, __iux, __iuy, __imx, __imy; __m256d __rho = _mm256_loadu_pd(&_rho[idx]), __ux = _mm256_loadu_pd(&_ux[idx]), __uy = _mm256_loadu_pd(&_uy[idx]), __tem = _mm256_loadu_pd(&_tem[idx]); ANS::Macro<P<double> >(__ip, __iux, __iuy, __imx, __imy, __rho, __ux, __uy, __f); __m256d __item, __iqx, __iqy; Macro<Q<double> >(__item, __iqx, __iqy, __g); // External force with Brinkman model __m256d __alpha = _mm256_loadu_pd(&_alpha[idx]); ExternalForceBrinkman<P<double> >(__rho, __ux, __uy, __imx, __imy, __tem, __iqx, __iqy, __omegag, __f, __alpha); ANS::Macro<P<double> >(__ip, __iux, __iuy, __imx, __imy, __rho, __ux, __uy, __f); __m256d __beta = _mm256_loadu_pd(&_beta[idx]); ExternalForceHeatExchange<Q<double> >(__item, __g, __beta); Macro<Q<double> >(__item, __iqx, __iqy, __g); // Save macro if need if (_issave) { _mm256_storeu_pd(&_ip[idx], __ip); _mm256_storeu_pd(&_iux[idx], __iux); _mm256_storeu_pd(&_iuy[idx], __iuy); _mm256_storeu_pd(&_imx[idx], __imx); _mm256_storeu_pd(&_imy[idx], __imy); _mm256_storeu_pd(&_item[idx], __item); _mm256_storeu_pd(&_iqx[idx], __iqx); _mm256_storeu_pd(&_iqy[idx], __iqy); } // Collide ANS::Equilibrium<P<double> >(__feq, __ux, __uy, __ip, __iux, __iuy); for (int c = 0; c < P<double>::nc; ++c) { __f[c] = _mm256_add_pd(_mm256_mul_pd(__iomegaf, __f[c]), _mm256_mul_pd(__omegaf, __feq[c])); } _p.StoreF(idx, __f); Equilibrium<Q<double> >(__geq, __item, __iqx, __iqy, __ux, __uy); for (int c = 0; c < Q<double>::nc; ++c) { __g[c] = _mm256_add_pd(_mm256_mul_pd(__iomegag, __g[c]), _mm256_mul_pd(__omegag, __geq[c])); } _q.StoreF(idx, __g); } for (int idx = ne*P<double>::packsize; idx < _p.nxyz; ++idx) { // Update macro double ip, iux, iuy, imx, imy; ANS::Macro<double, P>(ip, iux, iuy, imx, imy, _rho[idx], _ux[idx], _uy[idx], _p.f0, _p.f, idx); double item, iqx, iqy; Macro<double, Q>(item, iqx, iqy, _q.f0, _q.f, idx); // External force with Brinkman model ExternalForceBrinkman<double, P>(_rho[idx], _ux[idx], _uy[idx], imx, imy, _tem[idx], iqx, iqy, omegag, _p.f0, _p.f, _alpha[idx], idx); ANS::Macro<double, P>(ip, iux, iuy, imx, imy, _rho[idx], _ux[idx], _uy[idx], _p.f0, _p.f, idx); ExternalForceHeatExchange<double, Q>(item, _q.f0, _q.f, _beta[idx], idx); Macro<double, Q>(item, iqx, iqy, _q.f0, _q.f, idx); // Save macro if need if (_issave) { _ip[idx] = ip; _iux[idx] = iux; _iuy[idx] = iuy; _imx[idx] = imx; _imy[idx] = imy; _item[idx] = item; _iqx[idx] = iqx; _iqy[idx] = iqy; } // Collide ANS::Equilibrium<double, P>(feq, _ux[idx], _uy[idx], ip, iux, iuy); _p.f0[idx] = iomegaf*_p.f0[idx] + omegaf*feq[0]; for (int c = 1; c < P<double>::nc; ++c) { int idxf = P<double>::IndexF(idx, c); _p.f[idxf] = iomegaf*_p.f[idxf] + omegaf*feq[c]; } Equilibrium<double, Q>(geq, item, iqx, iqy, _ux[idx], _uy[idx]); _q.f0[idx] = iomegag*_q.f0[idx] + omegag*geq[0]; for (int c = 1; c < Q<double>::nc; ++c) { int idxf = Q<double>::IndexF(idx, c); _q.f[idxf] = iomegag*_q.f[idxf] + omegag*geq[c]; } } } // Function of Update macro, External force(Brinkman, Heat exchange) and Collide of AAD for 3D template<template<class>class P, template<class>class Q> void MacroBrinkmanCollideHeatExchange( P<double>& _p, const double *_rho, const double *_ux, const double *_uy, const double *_uz, double *_ip, double *_iux, double *_iuy, double *_iuz, double *_imx, double *_imy, double *_imz, const double *_alpha, double _viscosity, Q<double>& _q, const double *_tem, double *_item, double *_iqx, double *_iqy, double *_iqz, const double *_beta, double _diffusivity, bool _issave = false ) { const int ne = _p.nxyz/P<double>::packsize; double omegaf = 1.0/(3.0*_viscosity + 0.5), iomegaf = 1.0 - omegaf, feq[P<double>::nc]; __m256d __omegaf = _mm256_set1_pd(omegaf), __iomegaf = _mm256_set1_pd(iomegaf), __feq[P<double>::nc]; double omegag = 1.0/(3.0*_diffusivity + 0.5), iomegag = 1.0 - omegag, geq[Q<double>::nc]; __m256d __omegag = _mm256_set1_pd(omegag), __iomegag = _mm256_set1_pd(iomegag), __geq[Q<double>::nc]; #pragma omp parallel for private(__feq, __geq) for (int pidx = 0; pidx < ne; ++pidx) { int idx = pidx*P<double>::packsize; // Pack f0, f, g0 and g __m256d __f[P<double>::nc], __g[Q<double>::nc]; _p.LoadF(idx, __f); _q.LoadF(idx, __g); // Update macro __m256d __ip, __iux, __iuy, __iuz, __imx, __imy, __imz; __m256d __rho = _mm256_loadu_pd(&_rho[idx]), __ux = _mm256_loadu_pd(&_ux[idx]), __uy = _mm256_loadu_pd(&_uy[idx]), __uz = _mm256_loadu_pd(&_uz[idx]), __tem = _mm256_loadu_pd(&_tem[idx]); ANS::Macro<P<double> >(__ip, __iux, __iuy, __iuz, __imx, __imy, __imz, __rho, __ux, __uy, __uz, __f); __m256d __item, __iqx, __iqy, __iqz; Macro<Q<double> >(__item, __iqx, __iqy, __iqz, __g); // External force with Brinkman model __m256d __alpha = _mm256_loadu_pd(&_alpha[idx]); ExternalForceBrinkman<P<double> >(__rho, __ux, __uy, __uz, __imx, __imy, __imz, __tem, __iqx, __iqy, __iqz, __omegag, __f, __alpha); ANS::Macro<P<double> >(__ip, __iux, __iuy, __iuz, __imx, __imy, __imz, __rho, __ux, __uy, __uz, __f); __m256d __beta = _mm256_loadu_pd(&_beta[idx]); ExternalForceHeatExchange<Q<double> >(__item, __g, __beta); Macro<Q<double> >(__item, __iqx, __iqy, __iqz, __g); // Save macro if need if (_issave) { _mm256_storeu_pd(&_ip[idx], __ip); _mm256_storeu_pd(&_iux[idx], __iux); _mm256_storeu_pd(&_iuy[idx], __iuy); _mm256_storeu_pd(&_iuz[idx], __iuz); _mm256_storeu_pd(&_imx[idx], __imx); _mm256_storeu_pd(&_imy[idx], __imy); _mm256_storeu_pd(&_imz[idx], __imz); _mm256_storeu_pd(&_item[idx], __item); _mm256_storeu_pd(&_iqx[idx], __iqx); _mm256_storeu_pd(&_iqy[idx], __iqy); _mm256_storeu_pd(&_iqz[idx], __iqz); } // Collide ANS::Equilibrium<P<double> >(__feq, __ux, __uy, __uz, __ip, __iux, __iuy, __iuz); for (int c = 0; c < P<double>::nc; ++c) { __f[c] = _mm256_add_pd(_mm256_mul_pd(__iomegaf, __f[c]), _mm256_mul_pd(__omegaf, __feq[c])); } _p.StoreF(idx, __f); Equilibrium<Q<double> >(__geq, __item, __iqx, __iqy, __iqz, __ux, __uy, __uz); for (int c = 0; c < Q<double>::nc; ++c) { __g[c] = _mm256_add_pd(_mm256_mul_pd(__iomegag, __g[c]), _mm256_mul_pd(__omegag, __geq[c])); } _q.StoreF(idx, __g); } for (int idx = ne*P<double>::packsize; idx < _p.nxyz; ++idx) { // Update macro double ip, iux, iuy, iuz, imx, imy, imz; ANS::Macro<double, P>(ip, iux, iuy, iuz, imx, imy, imz, _rho[idx], _ux[idx], _uy[idx], _uz[idx], _p.f0, _p.f, idx); double item, iqx, iqy, iqz; Macro<double, Q>(item, iqx, iqy, iqz, _q.f0, _q.f, idx); // External force with Brinkman model ExternalForceBrinkman<double, P>(_rho[idx], _ux[idx], _uy[idx], _uz[idx], imx, imy, imz, _tem[idx], iqx, iqy, iqz, omegag, _p.f0, _p.f, _alpha[idx], idx); ANS::Macro<double, P>(ip, iux, iuy, iuz, imx, imy, imz, _rho[idx], _ux[idx], _uy[idx], _uz[idx], _p.f0, _p.f, idx); ExternalForceHeatExchange<double, Q>(item, _q.f0, _q.f, _beta[idx], idx); Macro<double, Q>(item, iqx, iqy, iqz, _q.f0, _q.f, idx); // Save macro if need if (_issave) { _ip[idx] = ip; _iux[idx] = iux; _iuy[idx] = iuy; _iuz[idx] = iuz; _imx[idx] = imx; _imy[idx] = imy; _imz[idx] = imz; _item[idx] = item; _iqx[idx] = iqx; _iqy[idx] = iqy; _iqz[idx] = iqz; } // Collide ANS::Equilibrium<double, P>(feq, _ux[idx], _uy[idx], _uz[idx], ip, iux, iuy, iuz); _p.f0[idx] = iomegaf*_p.f0[idx] + omegaf*feq[0]; for (int c = 1; c < P<double>::nc; ++c) { int idxf = P<double>::IndexF(idx, c); _p.f[idxf] = iomegaf*_p.f[idxf] + omegaf*feq[c]; } Equilibrium<double, Q>(geq, item, iqx, iqy, iqz, _ux[idx], _uy[idx], _uz[idx]); _q.f0[idx] = iomegag*_q.f0[idx] + omegag*geq[0]; for (int c = 1; c < Q<double>::nc; ++c) { int idxf = Q<double>::IndexF(idx, c); _q.f[idxf] = iomegag*_q.f[idxf] + omegag*geq[c]; } } } // Function of Update macro and Collide of AAD for 2D template<template<class>class P, template<class>class Q> void MacroBrinkmanCollideForceConvection( P<double>& _p, const double *_rho, const double *_ux, const double *_uy, double *_ip, double *_iux, double *_iuy, double *_imx, double *_imy, const double *_alpha, double _viscosity, Q<double>& _q, const double *_tem, double *_item, double *_iqx, double *_iqy, const double *_diffusivity, bool _issave = false, double *_g = nullptr ) { const int ne = _p.nxyz/P<double>::packsize; double omegaf = 1.0/(3.0*_viscosity + 0.5), iomegaf = 1.0 - omegaf, feq[P<double>::nc], geq[Q<double>::nc]; __m256d __omegaf = _mm256_set1_pd(omegaf), __iomegaf = _mm256_set1_pd(iomegaf), __feq[P<double>::nc], __geq[Q<double>::nc]; #pragma omp parallel for private(__feq, __geq) for (int pidx = 0; pidx < ne; ++pidx) { int idx = pidx*P<double>::packsize; __m256d __diffusivity = _mm256_loadu_pd(&_diffusivity[idx]); __m256d __omegag = _mm256_div_pd(_mm256_set1_pd(1.0), _mm256_add_pd(_mm256_mul_pd(_mm256_set1_pd(3.0), __diffusivity), _mm256_set1_pd(0.5))); __m256d __iomegag = _mm256_sub_pd(_mm256_set1_pd(1.0), __omegag); // Pack f0, f, g0 and g __m256d __f[P<double>::nc], __g[Q<double>::nc]; _p.LoadF(idx, __f); _q.LoadF(idx, __g); // Update macro __m256d __ip, __iux, __iuy, __imx, __imy; __m256d __rho = _mm256_loadu_pd(&_rho[idx]), __ux = _mm256_loadu_pd(&_ux[idx]), __uy = _mm256_loadu_pd(&_uy[idx]), __tem = _mm256_loadu_pd(&_tem[idx]); ANS::Macro<P<double> >(__ip, __iux, __iuy, __imx, __imy, __rho, __ux, __uy, __f); __m256d __item, __iqx, __iqy; Macro<Q<double> >(__item, __iqx, __iqy, __g); // External force with Brinkman model __m256d __alpha = _mm256_loadu_pd(&_alpha[idx]); ExternalForceBrinkman<P<double> >(__rho, __ux, __uy, __imx, __imy, __tem, __iqx, __iqy, __omegag, __f, __alpha); ANS::Macro<P<double> >(__ip, __iux, __iuy, __imx, __imy, __rho, __ux, __uy, __f); // Save macro if need if (_issave) { _mm256_storeu_pd(&_ip[idx], __ip); _mm256_storeu_pd(&_iux[idx], __iux); _mm256_storeu_pd(&_iuy[idx], __iuy); _mm256_storeu_pd(&_imx[idx], __imx); _mm256_storeu_pd(&_imy[idx], __imy); _mm256_storeu_pd(&_item[idx], __item); _mm256_storeu_pd(&_iqx[idx], __iqx); _mm256_storeu_pd(&_iqy[idx], __iqy); if (_g) { int offsetf = Q<double>::nc*idx; for (int c = 0; c < Q<double>::nc; ++c) { _mm256_storeu_pd(&_g[offsetf + Q<double>::packsize*c], __g[c]); } } } // Collide ANS::Equilibrium<P<double> >(__feq, __ux, __uy, __ip, __iux, __iuy); for (int c = 0; c < P<double>::nc; ++c) { __f[c] = _mm256_add_pd(_mm256_mul_pd(__iomegaf, __f[c]), _mm256_mul_pd(__omegaf, __feq[c])); } _p.StoreF(idx, __f); Equilibrium<Q<double> >(__geq, __item, __iqx, __iqy, __ux, __uy); for (int c = 0; c < Q<double>::nc; ++c) { __g[c] = _mm256_add_pd(_mm256_mul_pd(__iomegag, __g[c]), _mm256_mul_pd(__omegag, __geq[c])); } _q.StoreF(idx, __g); } for (int idx = ne*P<double>::packsize; idx < _p.nxyz; ++idx) { double omegag = 1.0/(3.0*_diffusivity[idx] + 0.5), iomegag = 1.0 - omegag; // Update macro double ip, iux, iuy, imx, imy; ANS::Macro<double, P>(ip, iux, iuy, imx, imy, _rho[idx], _ux[idx], _uy[idx], _p.f0, _p.f, idx); double item, iqx, iqy; Macro<double, Q>(item, iqx, iqy, _q.f0, _q.f, idx); // External force with Brinkman model ExternalForceBrinkman<double, P>(_rho[idx], _ux[idx], _uy[idx], imx, imy, _tem[idx], iqx, iqy, omegag, _p.f0, _p.f, _alpha[idx], idx); ANS::Macro<double, P>(ip, iux, iuy, imx, imy, _rho[idx], _ux[idx], _uy[idx], _p.f0, _p.f, idx); // Save macro if need if (_issave) { _ip[idx] = ip; _iux[idx] = iux; _iuy[idx] = iuy; _imx[idx] = imx; _imy[idx] = imy; _item[idx] = item; _iqx[idx] = iqx; _iqy[idx] = iqy; if (_g) { int offsetf = Q<double>::nc*idx; _g[offsetf] = _q.f0[idx]; for (int c = 1; c < Q<double>::nc; ++c) { _g[offsetf + c] = _q.f[Q<double>::IndexF(idx, c)]; } } } // Collide ANS::Equilibrium<double, P>(feq, _ux[idx], _uy[idx], ip, iux, iuy); _p.f0[idx] = iomegaf*_p.f0[idx] + omegaf*feq[0]; for (int c = 1; c < P<double>::nc; ++c) { int idxf = P<double>::IndexF(idx, c); _p.f[idxf] = iomegaf*_p.f[idxf] + omegaf*feq[c]; } Equilibrium<double, Q>(geq, item, iqx, iqy, _ux[idx], _uy[idx]); _q.f0[idx] = iomegag*_q.f0[idx] + omegag*geq[0]; for (int c = 1; c < Q<double>::nc; ++c) { int idxf = Q<double>::IndexF(idx, c); _q.f[idxf] = iomegag*_q.f[idxf] + omegag*geq[c]; } } } // Function of Update macro and Collide of AAD for 3D template<template<class>class P, template<class>class Q> void MacroBrinkmanCollideForceConvection( P<double>& _p, const double *_rho, const double *_ux, const double *_uy, const double *_uz, double *_ip, double *_iux, double *_iuy, double *_iuz, double *_imx, double *_imy, double *_imz, const double *_alpha, double _viscosity, Q<double>& _q, const double *_tem, double *_item, double *_iqx, double *_iqy, double *_iqz, const double *_diffusivity, bool _issave = false, double *_g = nullptr ) { const int ne = _p.nxyz/P<double>::packsize; double omegaf = 1.0/(3.0*_viscosity + 0.5), iomegaf = 1.0 - omegaf, feq[P<double>::nc], geq[Q<double>::nc]; __m256d __omegaf = _mm256_set1_pd(omegaf), __iomegaf = _mm256_set1_pd(iomegaf), __feq[P<double>::nc], __geq[Q<double>::nc]; #pragma omp parallel for private(__feq, __geq) for (int pidx = 0; pidx < ne; ++pidx) { int idx = pidx*P<double>::packsize; __m256d __diffusivity = _mm256_loadu_pd(&_diffusivity[idx]); __m256d __omegag = _mm256_div_pd(_mm256_set1_pd(1.0), _mm256_add_pd(_mm256_mul_pd(_mm256_set1_pd(3.0), __diffusivity), _mm256_set1_pd(0.5))); __m256d __iomegag = _mm256_sub_pd(_mm256_set1_pd(1.0), __omegag); // Pack f0, f, g0 and g __m256d __f[P<double>::nc], __g[Q<double>::nc]; _p.LoadF(idx, __f); _q.LoadF(idx, __g); // Update macro __m256d __ip, __iux, __iuy, __iuz, __imx, __imy, __imz; __m256d __rho = _mm256_loadu_pd(&_rho[idx]), __ux = _mm256_loadu_pd(&_ux[idx]), __uy = _mm256_loadu_pd(&_uy[idx]), __uz = _mm256_loadu_pd(&_uz[idx]), __tem = _mm256_loadu_pd(&_tem[idx]); ANS::Macro<P<double> >(__ip, __iux, __iuy, __iuz, __imx, __imy, __imz, __rho, __ux, __uy, __uz, __f); __m256d __item, __iqx, __iqy, __iqz; Macro<Q<double> >(__item, __iqx, __iqy, __iqz, __g); // External force with Brinkman model __m256d __alpha = _mm256_loadu_pd(&_alpha[idx]); ExternalForceBrinkman<P<double> >(__rho, __ux, __uy, __uz, __imx, __imy, __imz, __tem, __iqx, __iqy, __iqz, __omegag, __f, __alpha); ANS::Macro<P<double> >(__ip, __iux, __iuy, __iuz, __imx, __imy, __imz, __rho, __ux, __uy, __uz, __f); // Save macro if need if (_issave) { _mm256_storeu_pd(&_ip[idx], __ip); _mm256_storeu_pd(&_iux[idx], __iux); _mm256_storeu_pd(&_iuy[idx], __iuy); _mm256_storeu_pd(&_iuz[idx], __iuz); _mm256_storeu_pd(&_imx[idx], __imx); _mm256_storeu_pd(&_imy[idx], __imy); _mm256_storeu_pd(&_imz[idx], __imz); _mm256_storeu_pd(&_item[idx], __item); _mm256_storeu_pd(&_iqx[idx], __iqx); _mm256_storeu_pd(&_iqy[idx], __iqy); _mm256_storeu_pd(&_iqz[idx], __iqz); if (_g) { int offsetf = Q<double>::nc*idx; for (int c = 0; c < Q<double>::nc; ++c) { _mm256_storeu_pd(&_g[offsetf + Q<double>::packsize*c], __g[c]); } } } // Collide ANS::Equilibrium<P<double> >(__feq, __ux, __uy, __uz, __ip, __iux, __iuy, __iuz); for (int c = 0; c < P<double>::nc; ++c) { __f[c] = _mm256_add_pd(_mm256_mul_pd(__iomegaf, __f[c]), _mm256_mul_pd(__omegaf, __feq[c])); } _p.StoreF(idx, __f); Equilibrium<Q<double> >(__geq, __item, __iqx, __iqy, __iqz, __ux, __uy, __uz); for (int c = 0; c < Q<double>::nc; ++c) { __g[c] = _mm256_add_pd(_mm256_mul_pd(__iomegag, __g[c]), _mm256_mul_pd(__omegag, __geq[c])); } _q.StoreF(idx, __g); } for (int idx = ne*P<double>::packsize; idx < _p.nxyz; ++idx) { double omegag = 1.0/(3.0*_diffusivity[idx] + 0.5), iomegag = 1.0 - omegag; // Update macro double ip, iux, iuy, iuz, imx, imy, imz; ANS::Macro<double, P>(ip, iux, iuy, iuz, imx, imy, imz, _rho[idx], _ux[idx], _uy[idx], _uz[idx], _p.f0, _p.f, idx); double item, iqx, iqy, iqz; Macro<double, Q>(item, iqx, iqy, iqz, _q.f0, _q.f, idx); // External force with Brinkman model ExternalForceBrinkman<double, P>(_rho[idx], _ux[idx], _uy[idx], _uz[idx], imx, imy, imz, _tem[idx], iqx, iqy, iqz, omegag, _p.f0, _p.f, _alpha[idx], idx); ANS::Macro<double, P>(ip, iux, iuy, iuz, imx, imy, imz, _rho[idx], _ux[idx], _uy[idx], _uz[idx], _p.f0, _p.f, idx); // Save macro if need if (_issave) { _ip[idx] = ip; _iux[idx] = iux; _iuy[idx] = iuy; _imx[idx] = imx; _imy[idx] = imy; _imz[idx] = imz; _item[idx] = item; _iqx[idx] = iqx; _iqy[idx] = iqy; _iqz[idx] = iqz; if (_g) { int offsetf = Q<double>::nc*idx; _g[offsetf] = _q.f0[idx]; for (int c = 1; c < Q<double>::nc; ++c) { _g[offsetf + c] = _q.f[Q<double>::IndexF(idx, c)]; } } } // Collide ANS::Equilibrium<double, P>(feq, _ux[idx], _uy[idx], _uz[idx], ip, iux, iuy, iuz); _p.f0[idx] = iomegaf*_p.f0[idx] + omegaf*feq[0]; for (int c = 1; c < P<double>::nc; ++c) { int idxf = P<double>::IndexF(idx, c); _p.f[idxf] = iomegaf*_p.f[idxf] + omegaf*feq[c]; } Equilibrium<double, Q>(geq, item, iqx, iqy, iqz, _ux[idx], _uy[idx], _uz[idx]); _q.f0[idx] = iomegag*_q.f0[idx] + omegag*geq[0]; for (int c = 1; c < Q<double>::nc; ++c) { int idxf = Q<double>::IndexF(idx, c); _q.f[idxf] = iomegag*_q.f[idxf] + omegag*geq[c]; } } } // Function of Update macro and Collide of AAD for 2D template<template<class>class P, template<class>class Q> void MacroBrinkmanCollideNaturalConvection( P<double>& _p, const double *_rho, const double *_ux, const double *_uy, double *_ip, double *_iux, double *_iuy, double *_imx, double *_imy, const double *_alpha, double _viscosity, Q<double>& _q, const double *_tem, double *_item, double *_iqx, double *_iqy, const double *_diffusivity, double _gx, double _gy, bool _issave = false, double *_g = nullptr ) { const int ne = _p.nxyz/P<double>::packsize; double omegaf = 1.0/(3.0*_viscosity + 0.5), iomegaf = 1.0 - omegaf, feq[P<double>::nc], geq[Q<double>::nc]; __m256d __omegaf = _mm256_set1_pd(omegaf), __iomegaf = _mm256_set1_pd(iomegaf), __feq[P<double>::nc], __geq[Q<double>::nc]; __m256d __gx = _mm256_set1_pd(_gx), __gy = _mm256_set1_pd(_gy); #pragma omp parallel for private(__feq, __geq) for (int pidx = 0; pidx < ne; ++pidx) { int idx = pidx*P<double>::packsize; __m256d __diffusivity = _mm256_loadu_pd(&_diffusivity[idx]); __m256d __omegag = _mm256_div_pd(_mm256_set1_pd(1.0), _mm256_add_pd(_mm256_mul_pd(_mm256_set1_pd(3.0), __diffusivity), _mm256_set1_pd(0.5))); __m256d __iomegag = _mm256_sub_pd(_mm256_set1_pd(1.0), __omegag); // Pack f0, f, g0 and g __m256d __f[P<double>::nc], __g[Q<double>::nc]; _p.LoadF(idx, __f); _q.LoadF(idx, __g); // Update macro __m256d __ip, __iux, __iuy, __imx, __imy; __m256d __rho = _mm256_loadu_pd(&_rho[idx]), __ux = _mm256_loadu_pd(&_ux[idx]), __uy = _mm256_loadu_pd(&_uy[idx]), __tem = _mm256_loadu_pd(&_tem[idx]); ANS::Macro<P<double> >(__ip, __iux, __iuy, __imx, __imy, __rho, __ux, __uy, __f); __m256d __item, __iqx, __iqy; Macro<Q<double> >(__item, __iqx, __iqy, __g); // External force with Brinkman model __m256d __alpha = _mm256_loadu_pd(&_alpha[idx]); ExternalForceBrinkman<P<double> >(__rho, __ux, __uy, __imx, __imy, __tem, __iqx, __iqy, __omegag, __f, __alpha); ANS::Macro<P<double> >(__ip, __iux, __iuy, __imx, __imy, __rho, __ux, __uy, __f); ExternalForceNaturalConvection<Q<double> >(__imx, __imy, __gx, __gy, __g); Macro<Q<double> >(__item, __iqx, __iqy, __g); // Save macro if need if (_issave) { _mm256_storeu_pd(&_ip[idx], __ip); _mm256_storeu_pd(&_iux[idx], __iux); _mm256_storeu_pd(&_iuy[idx], __iuy); _mm256_storeu_pd(&_imx[idx], __imx); _mm256_storeu_pd(&_imy[idx], __imy); _mm256_storeu_pd(&_item[idx], __item); _mm256_storeu_pd(&_iqx[idx], __iqx); _mm256_storeu_pd(&_iqy[idx], __iqy); if (_g) { int offsetf = Q<double>::nc*idx; for (int c = 0; c < Q<double>::nc; ++c) { _mm256_storeu_pd(&_g[offsetf + Q<double>::packsize*c], __g[c]); } } } // Collide ANS::Equilibrium<P<double> >(__feq, __ux, __uy, __ip, __iux, __iuy); for (int c = 0; c < P<double>::nc; ++c) { __f[c] = _mm256_add_pd(_mm256_mul_pd(__iomegaf, __f[c]), _mm256_mul_pd(__omegaf, __feq[c])); } _p.StoreF(idx, __f); Equilibrium<Q<double> >(__geq, __item, __iqx, __iqy, __ux, __uy); for (int c = 0; c < Q<double>::nc; ++c) { __g[c] = _mm256_add_pd(_mm256_mul_pd(__iomegag, __g[c]), _mm256_mul_pd(__omegag, __geq[c])); } _q.StoreF(idx, __g); } for (int idx = ne*P<double>::packsize; idx < _p.nxyz; ++idx) { double omegag = 1.0/(3.0*_diffusivity[idx] + 0.5), iomegag = 1.0 - omegag; // Update macro double ip, iux, iuy, imx, imy; ANS::Macro<double, P>(ip, iux, iuy, imx, imy, _rho[idx], _ux[idx], _uy[idx], _p.f0, _p.f, idx); double item, iqx, iqy; Macro<double, Q>(item, iqx, iqy, _q.f0, _q.f, idx); // External force with Brinkman model ExternalForceBrinkman<double, P>(_rho[idx], _ux[idx], _uy[idx], imx, imy, _tem[idx], iqx, iqy, omegag, _p.f0, _p.f, _alpha[idx], idx); ANS::Macro<double, P>(ip, iux, iuy, imx, imy, _rho[idx], _ux[idx], _uy[idx], _p.f0, _p.f, idx); ExternalForceNaturalConvection<double, Q>(imx, imy, _gx, _gy, _q.f0, _q.f, idx); Macro<double, Q>(item, iqx, iqy, _q.f0, _q.f, idx); // Save macro if need if (_issave) { _ip[idx] = ip; _iux[idx] = iux; _iuy[idx] = iuy; _imx[idx] = imx; _imy[idx] = imy; _item[idx] = item; _iqx[idx] = iqx; _iqy[idx] = iqy; if (_g) { int offsetf = Q<double>::nc*idx; _g[offsetf] = _q.f0[idx]; for (int c = 1; c < Q<double>::nc; ++c) { _g[offsetf + c] = _q.f[Q<double>::IndexF(idx, c)]; } } } // Collide ANS::Equilibrium<double, P>(feq, _ux[idx], _uy[idx], ip, iux, iuy); _p.f0[idx] = iomegaf*_p.f0[idx] + omegaf*feq[0]; for (int c = 1; c < P<double>::nc; ++c) { int idxf = P<double>::IndexF(idx, c); _p.f[idxf] = iomegaf*_p.f[idxf] + omegaf*feq[c]; } Equilibrium<double, Q>(geq, item, iqx, iqy, _ux[idx], _uy[idx]); _q.f0[idx] = iomegag*_q.f0[idx] + omegag*geq[0]; for (int c = 1; c < Q<double>::nc; ++c) { int idxf = Q<double>::IndexF(idx, c); _q.f[idxf] = iomegag*_q.f[idxf] + omegag*geq[c]; } } } // Function of Update macro and Collide of AAD for 3D template<template<class>class P, template<class>class Q> void MacroBrinkmanCollideNaturalConvection( P<double>& _p, const double *_rho, const double *_ux, const double *_uy, const double *_uz, double *_ip, double *_iux, double *_iuy, double *_iuz, double *_imx, double *_imy, double *_imz, const double *_alpha, double _viscosity, Q<double>& _q, const double *_tem, double *_item, double *_iqx, double *_iqy, double *_iqz, const double *_diffusivity, double _gx, double _gy, double _gz, bool _issave = false, double *_g = nullptr ) { const int ne = _p.nxyz/P<double>::packsize; double omegaf = 1.0/(3.0*_viscosity + 0.5), iomegaf = 1.0 - omegaf, feq[P<double>::nc], geq[Q<double>::nc]; __m256d __omegaf = _mm256_set1_pd(omegaf), __iomegaf = _mm256_set1_pd(iomegaf), __feq[P<double>::nc], __geq[Q<double>::nc]; __m256d __gx = _mm256_set1_pd(_gx), __gy = _mm256_set1_pd(_gy), __gz = _mm256_set1_pd(_gz); #pragma omp parallel for private(__feq, __geq) for (int pidx = 0; pidx < ne; ++pidx) { int idx = pidx*P<double>::packsize; __m256d __diffusivity = _mm256_loadu_pd(&_diffusivity[idx]); __m256d __omegag = _mm256_div_pd(_mm256_set1_pd(1.0), _mm256_add_pd(_mm256_mul_pd(_mm256_set1_pd(3.0), __diffusivity), _mm256_set1_pd(0.5))); __m256d __iomegag = _mm256_sub_pd(_mm256_set1_pd(1.0), __omegag); // Pack f0, f, g0 and g __m256d __f[P<double>::nc], __g[Q<double>::nc]; _p.LoadF(idx, __f); _q.LoadF(idx, __g); // Update macro __m256d __ip, __iux, __iuy, __iuz, __imx, __imy, __imz; __m256d __rho = _mm256_loadu_pd(&_rho[idx]), __ux = _mm256_loadu_pd(&_ux[idx]), __uy = _mm256_loadu_pd(&_uy[idx]), __uz = _mm256_loadu_pd(&_uz[idx]), __tem = _mm256_loadu_pd(&_tem[idx]); ANS::Macro<P<double> >(__ip, __iux, __iuy, __iuz, __imx, __imy, __imz, __rho, __ux, __uy, __uz, __f); __m256d __item, __iqx, __iqy, __iqz; Macro<Q<double> >(__item, __iqx, __iqy, __iqz, __g); // External force with Brinkman model __m256d __alpha = _mm256_loadu_pd(&_alpha[idx]); ExternalForceBrinkman<P<double> >(__rho, __ux, __uy, __uz, __imx, __imy, __imz, __tem, __iqx, __iqy, __iqz, __omegag, __f, __alpha); ANS::Macro<P<double> >(__ip, __iux, __iuy, __iuz, __imx, __imy, __imz, __rho, __ux, __uy, __uz, __f); ExternalForceNaturalConvection<Q<double> >(__imx, __imy, __imz, __gx, __gy, __gz, __g); Macro<Q<double> >(__item, __iqx, __iqy, __iqz, __g); // Save macro if need if (_issave) { _mm256_storeu_pd(&_ip[idx], __ip); _mm256_storeu_pd(&_iux[idx], __iux); _mm256_storeu_pd(&_iuy[idx], __iuy); _mm256_storeu_pd(&_iuz[idx], __iuz); _mm256_storeu_pd(&_imx[idx], __imx); _mm256_storeu_pd(&_imy[idx], __imy); _mm256_storeu_pd(&_imz[idx], __imz); _mm256_storeu_pd(&_item[idx], __item); _mm256_storeu_pd(&_iqx[idx], __iqx); _mm256_storeu_pd(&_iqy[idx], __iqy); _mm256_storeu_pd(&_iqz[idx], __iqz); if (_g) { int offsetf = Q<double>::nc*idx; for (int c = 0; c < Q<double>::nc; ++c) { _mm256_storeu_pd(&_g[offsetf + Q<double>::packsize*c], __g[c]); } } } // Collide ANS::Equilibrium<P<double> >(__feq, __ux, __uy, __uz, __ip, __iux, __iuy, __iuz); for (int c = 0; c < P<double>::nc; ++c) { __f[c] = _mm256_add_pd(_mm256_mul_pd(__iomegaf, __f[c]), _mm256_mul_pd(__omegaf, __feq[c])); } _p.StoreF(idx, __f); Equilibrium<Q<double> >(__geq, __item, __iqx, __iqy, __iqz, __ux, __uy, __uz); for (int c = 0; c < Q<double>::nc; ++c) { __g[c] = _mm256_add_pd(_mm256_mul_pd(__iomegag, __g[c]), _mm256_mul_pd(__omegag, __geq[c])); } _q.StoreF(idx, __g); } for (int idx = ne*P<double>::packsize; idx < _p.nxyz; ++idx) { double omegag = 1.0/(3.0*_diffusivity[idx] + 0.5), iomegag = 1.0 - omegag; // Update macro double ip, iux, iuy, iuz, imx, imy, imz; ANS::Macro<double, P>(ip, iux, iuy, iuz, imx, imy, imz, _rho[idx], _ux[idx], _uy[idx], _uz[idx], _p.f0, _p.f, idx); double item, iqx, iqy, iqz; Macro<double, Q>(item, iqx, iqy, iqz, _q.f0, _q.f, idx); // External force with Brinkman model ExternalForceBrinkman<double, P>(_rho[idx], _ux[idx], _uy[idx], _uz[idx], imx, imy, imz, _tem[idx], iqx, iqy, iqz, omegag, _p.f0, _p.f, _alpha[idx], idx); ANS::Macro<double, P>(ip, iux, iuy, iuz, imx, imy, imz, _rho[idx], _ux[idx], _uy[idx], _uz[idx], _p.f0, _p.f, idx); ExternalForceNaturalConvection<double, Q>(imx, imy, imz, _gx, _gy, _gz, _q.f0, _q.f, idx); Macro<double, Q>(item, iqx, iqy, iqz, _q.f0, _q.f, idx); // Save macro if need if (_issave) { _ip[idx] = ip; _iux[idx] = iux; _iuy[idx] = iuy; _iuz[idx] = iuz; _imx[idx] = imx; _imy[idx] = imy; _imz[idx] = imz; _item[idx] = item; _iqx[idx] = iqx; _iqy[idx] = iqy; _iqz[idx] = iqz; if (_g) { int offsetf = Q<double>::nc*idx; _g[offsetf] = _q.f0[idx]; for (int c = 1; c < Q<double>::nc; ++c) { _g[offsetf + c] = _q.f[Q<double>::IndexF(idx, c)]; } } } // Collide ANS::Equilibrium<double, P>(feq, _ux[idx], _uy[idx], _uz[idx], ip, iux, iuy, iuz); _p.f0[idx] = iomegaf*_p.f0[idx] + omegaf*feq[0]; for (int c = 1; c < P<double>::nc; ++c) { int idxf = P<double>::IndexF(idx, c); _p.f[idxf] = iomegaf*_p.f[idxf] + omegaf*feq[c]; } Equilibrium<double, Q>(geq, item, iqx, iqy, iqz, _ux[idx], _uy[idx], _uz[idx]); _q.f0[idx] = iomegag*_q.f0[idx] + omegag*geq[0]; for (int c = 1; c < Q<double>::nc; ++c) { int idxf = Q<double>::IndexF(idx, c); _q.f[idxf] = iomegag*_q.f[idxf] + omegag*geq[c]; } } } // Function of getting sensitivity of temperature at heat source for D2Q9 template<template<class>class Q, class Fv, class Ff> void SensitivityTemperatureAtHeatSource( const double *_ux, const double *_uy, const double *_imx, const double *_imy, Q<double>& _q, const double *_tem, const double *_item, const double *_iqx, const double *_iqy, const double *_g, const double *_ig, double *_dfds, const double *_diffusivity, const double *_dads, const double *_dkds, Fv _qnbc, Ff _bctype ) { const int ps = Q<double>::packsize, ne = _q.nxyz/ps, nc = Q<double>::nc; auto IndexG = [=](int _idx, int _c) { return _idx < ne*ps ? (_idx/ps)*ps*nc + ps*_c + _idx%ps : nc*_idx + _c; }; // Brinkman term and diffusivity term #pragma omp parallel for for (int pidx = 0; pidx < ne; ++pidx) { int idx = pidx*ps; __m256d __dfds = _mm256_loadu_pd(&_dfds[idx]), __dads = _mm256_loadu_pd(&_dads[idx]), __dkds = _mm256_loadu_pd(&_dkds[idx]), __3 = _mm256_set1_pd(3.0); __m256d __ux = _mm256_loadu_pd(&_ux[idx]), __uy = _mm256_loadu_pd(&_uy[idx]), __imx = _mm256_loadu_pd(&_imx[idx]), __imy = _mm256_loadu_pd(&_imy[idx]); __m256d __tem = _mm256_loadu_pd(&_tem[idx]), __item = _mm256_loadu_pd(&_item[idx]), __iqx = _mm256_loadu_pd(&_iqx[idx]), __iqy = _mm256_loadu_pd(&_iqy[idx]); __dfds = _mm256_add_pd(__dfds, _mm256_mul_pd(__3, _mm256_mul_pd(__dads, _mm256_add_pd(_mm256_mul_pd(__ux, __imx), _mm256_mul_pd(__uy, __imy))))); int offsetf = nc*idx; __m256d __sumg = _mm256_setzero_pd(), __taug = _mm256_add_pd(_mm256_mul_pd(__3, _mm256_loadu_pd(&_diffusivity[idx])), _mm256_set1_pd(0.5)); for (int c = 0; c < nc; ++c) { int idxf = offsetf + ps*c; __m256d __g = _mm256_loadu_pd(&_g[idxf]), __ig = _mm256_loadu_pd(&_ig[idxf]); __sumg = _mm256_add_pd(__sumg, _mm256_mul_pd(__g, __ig)); } __dfds = _mm256_sub_pd(__dfds, _mm256_div_pd( _mm256_mul_pd(__3, _mm256_mul_pd(__dkds, _mm256_sub_pd( __sumg, _mm256_mul_pd(__tem, _mm256_add_pd(__item, _mm256_mul_pd(__3, _mm256_add_pd(_mm256_mul_pd(__ux, __iqx), _mm256_mul_pd(__uy, __iqy))))) ))), _mm256_mul_pd(__taug, __taug) )); _mm256_storeu_pd(&_dfds[idx], __dfds); } for (int idx = ne*ps; idx < _q.nxyz; ++idx) { _dfds[idx] += 3.0*_dads[idx]*(_ux[idx]*_imx[idx] + _uy[idx]*_imy[idx]); int offsetf = nc*idx; double sumg = 0.0; for (int c = 0; c < nc; ++c) { sumg += _g[offsetf + c]*_ig[offsetf + c]; } _dfds[idx] += -3.0/pow(3.0*_diffusivity[idx] + 0.5, 2.0)*_dkds[idx]*(sumg - _tem[idx]*(_item[idx] + 3.0*(_ux[idx]*_iqx[idx] + _uy[idx]*_iqy[idx]))); } // Boundary term along xmin if (_q.PEx == 0) { for (int j = 0; j < _q.ny; ++j) { if (_bctype(0 + _q.offsetx, j + _q.offsety)) { int idx = _q.Index(0, j); _dfds[idx] += _qnbc(0 + _q.offsetx, j + _q.offsety)*_dkds[idx]*( (1.0 + 3.0*_ux[idx])*(-6.0 + 4.0*_ig[IndexG(idx, 1)] + _ig[IndexG(idx, 5)] + _ig[IndexG(idx, 8)]) + 3.0*_uy[idx]*(_ig[IndexG(idx, 5)] - _ig[IndexG(idx, 8)]) )/(36.0*(1.0 - 3.0*_ux[idx])*pow(_diffusivity[idx], 2.0)); } } } // Boundary term along xmax if (_q.PEx == _q.mx - 1) { for (int j = 0; j < _q.ny; ++j) { if (_bctype((_q.nx - 1) + _q.offsetx, j + _q.offsety)) { int idx = _q.Index(_q.nx - 1, j); _dfds[idx] += _qnbc((_q.nx - 1) + _q.offsetx, j + _q.offsety)*_dkds[idx]*( (1.0 - 3.0*_ux[idx])*(-6.0 + 4.0*_ig[IndexG(idx, 3)] + _ig[IndexG(idx, 6)] + _ig[IndexG(idx, 7)]) + 3.0*_uy[idx]*(_ig[IndexG(idx, 6)] - _ig[IndexG(idx, 7)]) )/(36.0*(1.0 + 3.0*_ux[idx])*pow(_diffusivity[idx], 2.0)); } } } // Boundary term along ymin if (_q.PEy == 0) { for (int i = 0; i < _q.nx; ++i) { if (_bctype(i + _q.offsetx, 0 + _q.offsety)) { int idx = _q.Index(i, 0); _dfds[idx] += _qnbc(i + _q.offsetx, 0 + _q.offsety)*_dkds[idx]*( (1.0 + 3.0*_uy[idx])*(-6.0 + 4.0*_ig[IndexG(idx, 2)] + _ig[IndexG(idx, 5)] + _ig[IndexG(idx, 6)]) + 3.0*_ux[idx]*(_ig[IndexG(idx, 5)] - _ig[IndexG(idx, 6)]) )/(36.0*(1.0 - 3.0*_uy[idx])*pow(_diffusivity[idx], 2.0)); } } } // Boundary term along ymax if (_q.PEy == _q.my - 1) { for (int i = 0; i < _q.nx; ++i) { if (_bctype(i + _q.offsetx, (_q.ny - 1) + _q.offsety)) { int idx = _q.Index(i, _q.ny - 1); _dfds[idx] += _qnbc(i + _q.offsetx, (_q.ny - 1) + _q.offsety)*_dkds[idx]*( (1.0 - 3.0*_uy[idx])*(-6.0 + 4.0*_ig[IndexG(idx, 4)] + _ig[IndexG(idx, 7)] + _ig[IndexG(idx, 8)]) + 3.0*_ux[idx]*(_ig[IndexG(idx, 8)] - _ig[IndexG(idx, 7)]) )/(36.0*(1.0 + 3.0*_uy[idx])*pow(_diffusivity[idx], 2.0)); } } } } // Function of getting sensitivity of temperature at heat source for D3Q15 template<template<class>class Q, class Fv, class Ff> void SensitivityTemperatureAtHeatSource( const double *_ux, const double *_uy, const double *_uz, const double *_imx, const double *_imy, const double *_imz, Q<double>& _q, const double *_tem, const double *_item, const double *_iqx, const double *_iqy, const double *_iqz, const double *_g, const double *_ig, double *_dfds, const double *_diffusivity, const double *_dads, const double *_dkds, Fv _qnbc, Ff _bctype ) { const int ps = Q<double>::packsize, ne = _q.nxyz/ps, nc = Q<double>::nc; auto IndexG = [=](int _idx, int _c) { return _idx < ne*ps ? (_idx/ps)*ps*nc + ps*_c + _idx%ps : nc*_idx + _c; }; // Brinkman term and diffusivity term #pragma omp parallel for for (int pidx = 0; pidx < ne; ++pidx) { int idx = pidx*ps; __m256d __dfds = _mm256_loadu_pd(&_dfds[idx]), __dads = _mm256_loadu_pd(&_dads[idx]), __dkds = _mm256_loadu_pd(&_dkds[idx]), __3 = _mm256_set1_pd(3.0); __m256d __ux = _mm256_loadu_pd(&_ux[idx]), __uy = _mm256_loadu_pd(&_uy[idx]), __uz = _mm256_loadu_pd(&_uz[idx]); __m256d __imx = _mm256_loadu_pd(&_imx[idx]), __imy = _mm256_loadu_pd(&_imy[idx]), __imz = _mm256_loadu_pd(&_imz[idx]); __m256d __tem = _mm256_loadu_pd(&_tem[idx]), __item = _mm256_loadu_pd(&_item[idx]), __iqx = _mm256_loadu_pd(&_iqx[idx]), __iqy = _mm256_loadu_pd(&_iqy[idx]), __iqz = _mm256_loadu_pd(&_iqz[idx]); __dfds = _mm256_add_pd(__dfds, _mm256_mul_pd(__3, _mm256_mul_pd(__dads, _mm256_add_pd(_mm256_mul_pd(__ux, __imx), _mm256_add_pd(_mm256_mul_pd(__uy, __imy), _mm256_mul_pd(__uz, __imz)))))); int offsetf = nc*idx; __m256d __sumg = _mm256_setzero_pd(), __taug = _mm256_add_pd(_mm256_mul_pd(__3, _mm256_loadu_pd(&_diffusivity[idx])), _mm256_set1_pd(0.5)); for (int c = 0; c < nc; ++c) { int idxf = offsetf + ps*c; __m256d __g = _mm256_loadu_pd(&_g[idxf]), __ig = _mm256_loadu_pd(&_ig[idxf]); __sumg = _mm256_add_pd(__sumg, _mm256_mul_pd(__g, __ig)); } __dfds = _mm256_sub_pd(__dfds, _mm256_div_pd( _mm256_mul_pd(__3, _mm256_mul_pd(__dkds, _mm256_sub_pd( __sumg, _mm256_mul_pd(__tem, _mm256_add_pd(__item, _mm256_mul_pd( __3, _mm256_add_pd(_mm256_mul_pd(__ux, __iqx), _mm256_add_pd(_mm256_mul_pd(__uy, __iqy), _mm256_mul_pd(__uz, __iqz))) ))) ))), _mm256_mul_pd(__taug, __taug) )); _mm256_storeu_pd(&_dfds[idx], __dfds); } for (int idx = ne*ps; idx < _q.nxyz; ++idx) { _dfds[idx] += 3.0*_dads[idx]*(_ux[idx]*_imx[idx] + _uy[idx]*_imy[idx] + _uz[idx]*_imz[idx]); int offsetf = nc*idx; double sumg = 0.0; for (int c = 0; c < nc; ++c) { sumg += _g[offsetf + c]*_ig[offsetf + c]; } _dfds[idx] += -3.0/pow(3.0*_diffusivity[idx] + 0.5, 2.0)*_dkds[idx]*(sumg - _tem[idx]*(_item[idx] + 3.0*(_ux[idx]*_iqx[idx] + _uy[idx]*_iqy[idx] + _uz[idx]*_iqz[idx]))); } // Boundary term along xmin if (_q.PEx == 0) { for (int j = 0; j < _q.ny; ++j) { for (int k = 0; k < _q.nz; ++k) { if (_bctype(0 + _q.offsetx, j + _q.offsety, k + _q.offsetz)) { int idx = _q.Index(0, j, k); _dfds[idx] += _qnbc(0 + _q.offsetx, j + _q.offsety, k + _q.offsetz)*_dkds[idx]*( (1.0 + 3.0*_ux[idx])*(-12.0 + 8.0*_ig[IndexG(idx, 1)] + _ig[IndexG(idx, 7)] + _ig[IndexG(idx, 9)] + _ig[IndexG(idx, 10)] + _ig[IndexG(idx, 12)]) + 3.0*_uy[idx]*(_ig[IndexG(idx, 7)] - _ig[IndexG(idx, 9)] + _ig[IndexG(idx, 10)] - _ig[IndexG(idx, 12)]) + 3.0*_uz[idx]*(_ig[IndexG(idx, 7)] + _ig[IndexG(idx, 9)] - _ig[IndexG(idx, 10)] - _ig[IndexG(idx, 12)]) )/(72.0*(1.0 - 3.0*_ux[idx])*pow(_diffusivity[idx], 2.0)); } } } } // Boundary term along xmax if (_q.PEx == _q.mx - 1) { for (int j = 0; j < _q.ny; ++j) { for (int k = 0; k < _q.nz; ++k) { if (_bctype((_q.nx - 1) + _q.offsetx, j + _q.offsety, k + _q.offsetz)) { int idx = _q.Index(_q.nx - 1, j, k); _dfds[idx] += _qnbc((_q.nx - 1) + _q.offsetx, j + _q.offsety, k + _q.offsetz)*_dkds[idx]*( (1.0 - 3.0*_ux[idx])*(-12.0 + 8.0*_ig[IndexG(idx, 4)] + _ig[IndexG(idx, 8)] + _ig[IndexG(idx, 11)] + _ig[IndexG(idx, 13)] + _ig[IndexG(idx, 14)]) + 3.0*_uy[idx]*(_ig[IndexG(idx, 8)] - _ig[IndexG(idx, 11)] + _ig[IndexG(idx, 13)] - _ig[IndexG(idx, 14)]) + 3.0*_uz[idx]*(_ig[IndexG(idx, 8)] - _ig[IndexG(idx, 11)] - _ig[IndexG(idx, 13)] + _ig[IndexG(idx, 14)]) )/(72.0*(1.0 + 3.0*_ux[idx])*pow(_diffusivity[idx], 2.0)); } } } } // Boundary term along ymin if (_q.PEy == 0) { for (int k = 0; k < _q.nz; ++k) { for (int i = 0; i < _q.nx; ++i) { if (_bctype(i + _q.offsetx, 0 + _q.offsety, k + _q.offsetz)) { int idx = _q.Index(i, 0, k); _dfds[idx] += _qnbc(i + _q.offsetx, 0 + _q.offsety, k + _q.offsetz)*_dkds[idx]*( (1.0 + 3.0*_uy[idx])*(-12.0 + 8.0*_ig[IndexG(idx, 2)] + _ig[IndexG(idx, 7)] + _ig[IndexG(idx, 8)] + _ig[IndexG(idx, 10)] + _ig[IndexG(idx, 13)]) + 3.0*_uz[idx]*(_ig[IndexG(idx, 7)] + _ig[IndexG(idx, 8)] - _ig[IndexG(idx, 10)] - _ig[IndexG(idx, 13)]) + 3.0*_ux[idx]*(_ig[IndexG(idx, 7)] - _ig[IndexG(idx, 8)] + _ig[IndexG(idx, 10)] - _ig[IndexG(idx, 13)]) )/(72.0*(1.0 - 3.0*_uy[idx])*pow(_diffusivity[idx], 2.0)); } } } } // Boundary term along ymax if (_q.PEy == _q.my - 1) { for (int k = 0; k < _q.nz; ++k) { for (int i = 0; i < _q.nx; ++i) { if (_bctype(i + _q.offsetx, (_q.ny - 1) + _q.offsety, k + _q.offsetz)) { int idx = _q.Index(i, _q.ny - 1, k); _dfds[idx] += _qnbc(i + _q.offsetx, (_q.ny - 1) + _q.offsety, k + _q.offsetz)*_dkds[idx]*( (1.0 - 3.0*_uy[idx])*(-12.0 + 8.0*_ig[IndexG(idx, 5)] + _ig[IndexG(idx, 9)] + _ig[IndexG(idx, 11)] + _ig[IndexG(idx, 12)] + _ig[IndexG(idx, 14)]) + _uz[idx]*(_ig[IndexG(idx, 9)] - _ig[IndexG(idx, 11)] - _ig[IndexG(idx, 12)] + _ig[IndexG(idx, 14)]) + _ux[idx]*(_ig[IndexG(idx, 9)] - _ig[IndexG(idx, 11)] + _ig[IndexG(idx, 12)] - _ig[IndexG(idx, 14)]) )/(72.0*(1.0 + 3.0*_uy[idx])*pow(_diffusivity[idx], 2.0)); } } } } // Boundary term along zmin if (_q.PEz == 0) { for (int i = 0; i < _q.nx; ++i) { for (int j = 0; j < _q.ny; ++j) { if (_bctype(i + _q.offsetx, j + _q.offsety, 0 + _q.offsetz)) { int idx = _q.Index(i, j, 0); _dfds[idx] += _qnbc(i + _q.offsetx, j + _q.offsety, 0 + _q.offsetz)*_dkds[idx]*( (1.0 + 3.0*_uz[idx])*(-12.0 + 8.0*_ig[IndexG(idx, 3)] + _ig[IndexG(idx, 7)] + _ig[IndexG(idx, 8)] + _ig[IndexG(idx, 9)] + _ig[IndexG(idx, 14)]) + _ux[idx]*(_ig[IndexG(idx, 7)] - _ig[IndexG(idx, 8)] + _ig[IndexG(idx, 9)] - _ig[IndexG(idx, 14)]) + _uy[idx]*(_ig[IndexG(idx, 7)] + _ig[IndexG(idx, 8)] - _ig[IndexG(idx, 9)] - _ig[IndexG(idx, 14)]) )/(72.0*(1.0 - 3.0*_uz[idx])*pow(_diffusivity[idx], 2.0)); } } } } // Boundary term along zmax if (_q.PEz == _q.mz - 1) { for (int i = 0; i < _q.nx; ++i) { for (int j = 0; j < _q.ny; ++j) { if (_bctype(i + _q.offsetx, j + _q.offsety, (_q.nz - 1) + _q.offsetz)) { int idx = _q.Index(i, j, _q.nz - 1); _dfds[idx] += _qnbc(i + _q.offsetx, j + _q.offsety, (_q.nz - 1) + _q.offsetz)*_dkds[idx]*( (1.0 - 3.0*_uz[idx])*(-12.0 + 8.0*_ig[IndexG(idx, 6)] + _ig[IndexG(idx, 10)] + _ig[IndexG(idx, 11)] + _ig[IndexG(idx, 12)] + _ig[IndexG(idx, 13)]) + _ux[idx]*(_ig[IndexG(idx, 10)] - _ig[IndexG(idx, 11)] + _ig[IndexG(idx, 12)] - _ig[IndexG(idx, 13)]) + _uy[idx]*(_ig[IndexG(idx, 10)] - _ig[IndexG(idx, 11)] - _ig[IndexG(idx, 12)] + _ig[IndexG(idx, 13)]) )/(72.0*(1.0 + 3.0*_uz[idx])*pow(_diffusivity[idx], 2.0)); } } } } } } }
GB_unaryop__minv_int16_int16.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__minv_int16_int16 // op(A') function: GB_tran__minv_int16_int16 // C type: int16_t // A type: int16_t // cast: int16_t cij = (int16_t) aij // unaryop: cij = GB_IMINV_SIGNED (aij, 16) #define GB_ATYPE \ int16_t #define GB_CTYPE \ int16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IMINV_SIGNED (x, 16) ; // casting #define GB_CASTING(z, x) \ int16_t z = (int16_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_INT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__minv_int16_int16 ( int16_t *restrict Cx, const int16_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__minv_int16_int16 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
coordinate_common.h
/*! * Copyright 2018 by Contributors * \author Rory Mitchell */ #pragma once #include <algorithm> #include <string> #include <utility> #include <vector> #include <limits> #include "./param.h" #include "../common/random.h" namespace tsoobgx { namespace linear { struct CoordinateParam : public dmlc::Parameter<CoordinateParam> { int top_k; DMLC_DECLARE_PARAMETER(CoordinateParam) { DMLC_DECLARE_FIELD(top_k) .set_lower_bound(0) .set_default(0) .describe("The number of top features to select in 'thrifty' feature_selector. " "The value of zero means using all the features."); } }; /** * \brief Calculate change in weight for a given feature. Applies l1/l2 penalty normalised by the * number of training instances. * * \param sum_grad The sum gradient. * \param sum_hess The sum hess. * \param w The weight. * \param reg_alpha Unnormalised L1 penalty. * \param reg_lambda Unnormalised L2 penalty. * * \return The weight update. */ inline double CoordinateDelta(double sum_grad, double sum_hess, double w, double reg_alpha, double reg_lambda) { if (sum_hess < 1e-5f) return 0.0f; const double sum_grad_l2 = sum_grad + reg_lambda * w; const double sum_hess_l2 = sum_hess + reg_lambda; const double tmp = w - sum_grad_l2 / sum_hess_l2; if (tmp >= 0) { return std::max(-(sum_grad_l2 + reg_alpha) / sum_hess_l2, -w); } else { return std::min(-(sum_grad_l2 - reg_alpha) / sum_hess_l2, -w); } } /** * \brief Calculate update to bias. * * \param sum_grad The sum gradient. * \param sum_hess The sum hess. * * \return The weight update. */ inline double CoordinateDeltaBias(double sum_grad, double sum_hess) { return -sum_grad / sum_hess; } /** * \brief Get the gradient with respect to a single feature. * * \param group_idx Zero-based index of the group. * \param num_group Number of groups. * \param fidx The target feature. * \param gpair Gradients. * \param p_fmat The feature matrix. * * \return The gradient and diagonal Hessian entry for a given feature. */ inline std::pair<double, double> GetGradient(int group_idx, int num_group, int fidx, const std::vector<GradientPair> &gpair, DMatrix *p_fmat) { double sum_grad = 0.0, sum_hess = 0.0; for (const auto &batch : p_fmat->GetColumnBatches()) { auto col = batch[fidx]; const auto ndata = static_cast<bst_omp_uint>(col.size()); for (bst_omp_uint j = 0; j < ndata; ++j) { const bst_float v = col[j].fvalue; auto &p = gpair[col[j].index * num_group + group_idx]; if (p.GetHess() < 0.0f) continue; sum_grad += p.GetGrad() * v; sum_hess += p.GetHess() * v * v; } } return std::make_pair(sum_grad, sum_hess); } /** * \brief Get the gradient with respect to a single feature. Row-wise multithreaded. * * \param group_idx Zero-based index of the group. * \param num_group Number of groups. * \param fidx The target feature. * \param gpair Gradients. * \param p_fmat The feature matrix. * * \return The gradient and diagonal Hessian entry for a given feature. */ inline std::pair<double, double> GetGradientParallel(int group_idx, int num_group, int fidx, const std::vector<GradientPair> &gpair, DMatrix *p_fmat) { double sum_grad = 0.0, sum_hess = 0.0; for (const auto &batch : p_fmat->GetColumnBatches()) { auto col = batch[fidx]; const auto ndata = static_cast<bst_omp_uint>(col.size()); #pragma omp parallel for schedule(static) reduction(+ : sum_grad, sum_hess) for (bst_omp_uint j = 0; j < ndata; ++j) { const bst_float v = col[j].fvalue; auto &p = gpair[col[j].index * num_group + group_idx]; if (p.GetHess() < 0.0f) continue; sum_grad += p.GetGrad() * v; sum_hess += p.GetHess() * v * v; } } return std::make_pair(sum_grad, sum_hess); } /** * \brief Get the gradient with respect to the bias. Row-wise multithreaded. * * \param group_idx Zero-based index of the group. * \param num_group Number of groups. * \param gpair Gradients. * \param p_fmat The feature matrix. * * \return The gradient and diagonal Hessian entry for the bias. */ inline std::pair<double, double> GetBiasGradientParallel(int group_idx, int num_group, const std::vector<GradientPair> &gpair, DMatrix *p_fmat) { double sum_grad = 0.0, sum_hess = 0.0; const auto ndata = static_cast<bst_omp_uint>(p_fmat->Info().num_row_); #pragma omp parallel for schedule(static) reduction(+ : sum_grad, sum_hess) for (bst_omp_uint i = 0; i < ndata; ++i) { auto &p = gpair[i * num_group + group_idx]; if (p.GetHess() >= 0.0f) { sum_grad += p.GetGrad(); sum_hess += p.GetHess(); } } return std::make_pair(sum_grad, sum_hess); } /** * \brief Updates the gradient vector with respect to a change in weight. * * \param fidx The feature index. * \param group_idx Zero-based index of the group. * \param num_group Number of groups. * \param dw The change in weight. * \param in_gpair The gradient vector to be updated. * \param p_fmat The input feature matrix. */ inline void UpdateResidualParallel(int fidx, int group_idx, int num_group, float dw, std::vector<GradientPair> *in_gpair, DMatrix *p_fmat) { if (dw == 0.0f) return; for (const auto &batch : p_fmat->GetColumnBatches()) { auto col = batch[fidx]; // update grad value const auto num_row = static_cast<bst_omp_uint>(col.size()); #pragma omp parallel for schedule(static) for (bst_omp_uint j = 0; j < num_row; ++j) { GradientPair &p = (*in_gpair)[col[j].index * num_group + group_idx]; if (p.GetHess() < 0.0f) continue; p += GradientPair(p.GetHess() * col[j].fvalue * dw, 0); } } } /** * \brief Updates the gradient vector based on a change in the bias. * * \param group_idx Zero-based index of the group. * \param num_group Number of groups. * \param dbias The change in bias. * \param in_gpair The gradient vector to be updated. * \param p_fmat The input feature matrix. */ inline void UpdateBiasResidualParallel(int group_idx, int num_group, float dbias, std::vector<GradientPair> *in_gpair, DMatrix *p_fmat) { if (dbias == 0.0f) return; const auto ndata = static_cast<bst_omp_uint>(p_fmat->Info().num_row_); #pragma omp parallel for schedule(static) for (bst_omp_uint i = 0; i < ndata; ++i) { GradientPair &g = (*in_gpair)[i * num_group + group_idx]; if (g.GetHess() < 0.0f) continue; g += GradientPair(g.GetHess() * dbias, 0); } } /** * \brief Abstract class for stateful feature selection or ordering * in coordinate descent algorithms. */ class FeatureSelector { public: /*! \brief factory method */ static FeatureSelector *Create(int choice); /*! \brief virtual destructor */ virtual ~FeatureSelector() = default; /** * \brief Setting up the selector state prior to looping through features. * * \param model The model. * \param gpair The gpair. * \param p_fmat The feature matrix. * \param alpha Regularisation alpha. * \param lambda Regularisation lambda. * \param param A parameter with algorithm-dependent use. */ virtual void Setup(const gbm::GBLinearModel &model, const std::vector<GradientPair> &gpair, DMatrix *p_fmat, float alpha, float lambda, int param) {} /** * \brief Select next coordinate to update. * * \param iteration The iteration in a loop through features * \param model The model. * \param group_idx Zero-based index of the group. * \param gpair The gpair. * \param p_fmat The feature matrix. * \param alpha Regularisation alpha. * \param lambda Regularisation lambda. * * \return The index of the selected feature. -1 indicates none selected. */ virtual int NextFeature(int iteration, const gbm::GBLinearModel &model, int group_idx, const std::vector<GradientPair> &gpair, DMatrix *p_fmat, float alpha, float lambda) = 0; }; /** * \brief Deterministic selection by cycling through features one at a time. */ class CyclicFeatureSelector : public FeatureSelector { public: int NextFeature(int iteration, const gbm::GBLinearModel &model, int group_idx, const std::vector<GradientPair> &gpair, DMatrix *p_fmat, float alpha, float lambda) override { return iteration % model.param.num_feature; } }; /** * \brief Similar to Cyclic but with random feature shuffling prior to each update. * \note Its randomness is controllable by setting a random seed. */ class ShuffleFeatureSelector : public FeatureSelector { public: void Setup(const gbm::GBLinearModel &model, const std::vector<GradientPair> &gpair, DMatrix *p_fmat, float alpha, float lambda, int param) override { if (feat_index_.size() == 0) { feat_index_.resize(model.param.num_feature); std::iota(feat_index_.begin(), feat_index_.end(), 0); } std::shuffle(feat_index_.begin(), feat_index_.end(), common::GlobalRandom()); } int NextFeature(int iteration, const gbm::GBLinearModel &model, int group_idx, const std::vector<GradientPair> &gpair, DMatrix *p_fmat, float alpha, float lambda) override { return feat_index_[iteration % model.param.num_feature]; } protected: std::vector<bst_uint> feat_index_; }; /** * \brief A random (with replacement) coordinate selector. * \note Its randomness is controllable by setting a random seed. */ class RandomFeatureSelector : public FeatureSelector { public: int NextFeature(int iteration, const gbm::GBLinearModel &model, int group_idx, const std::vector<GradientPair> &gpair, DMatrix *p_fmat, float alpha, float lambda) override { return common::GlobalRandom()() % model.param.num_feature; } }; /** * \brief Select coordinate with the greatest gradient magnitude. * \note It has O(num_feature^2) complexity. It is fully deterministic. * * \note It allows restricting the selection to top_k features per group with * the largest magnitude of univariate weight change, by passing the top_k value * through the `param` argument of Setup(). That would reduce the complexity to * O(num_feature*top_k). */ class GreedyFeatureSelector : public FeatureSelector { public: void Setup(const gbm::GBLinearModel &model, const std::vector<GradientPair> &gpair, DMatrix *p_fmat, float alpha, float lambda, int param) override { top_k_ = static_cast<bst_uint>(param); const bst_uint ngroup = model.param.num_output_group; if (param <= 0) top_k_ = std::numeric_limits<bst_uint>::max(); if (counter_.size() == 0) { counter_.resize(ngroup); gpair_sums_.resize(model.param.num_feature * ngroup); } for (bst_uint gid = 0u; gid < ngroup; ++gid) { counter_[gid] = 0u; } } int NextFeature(int iteration, const gbm::GBLinearModel &model, int group_idx, const std::vector<GradientPair> &gpair, DMatrix *p_fmat, float alpha, float lambda) override { // k-th selected feature for a group auto k = counter_[group_idx]++; // stop after either reaching top-K or going through all the features in a group if (k >= top_k_ || counter_[group_idx] == model.param.num_feature) return -1; const int ngroup = model.param.num_output_group; const bst_omp_uint nfeat = model.param.num_feature; // Calculate univariate gradient sums std::fill(gpair_sums_.begin(), gpair_sums_.end(), std::make_pair(0., 0.)); for (const auto &batch : p_fmat->GetColumnBatches()) { #pragma omp parallel for schedule(static) for (bst_omp_uint i = 0; i < nfeat; ++i) { const auto col = batch[i]; const bst_uint ndata = col.size(); auto &sums = gpair_sums_[group_idx * nfeat + i]; for (bst_uint j = 0u; j < ndata; ++j) { const bst_float v = col[j].fvalue; auto &p = gpair[col[j].index * ngroup + group_idx]; if (p.GetHess() < 0.f) continue; sums.first += p.GetGrad() * v; sums.second += p.GetHess() * v * v; } } } // Find a feature with the largest magnitude of weight change int best_fidx = 0; double best_weight_update = 0.0f; for (bst_omp_uint fidx = 0; fidx < nfeat; ++fidx) { auto &s = gpair_sums_[group_idx * nfeat + fidx]; float dw = std::abs(static_cast<bst_float>( CoordinateDelta(s.first, s.second, model[fidx][group_idx], alpha, lambda))); if (dw > best_weight_update) { best_weight_update = dw; best_fidx = fidx; } } return best_fidx; } protected: bst_uint top_k_; std::vector<bst_uint> counter_; std::vector<std::pair<double, double>> gpair_sums_; }; /** * \brief Thrifty, approximately-greedy feature selector. * * \note Prior to cyclic updates, reorders features in descending magnitude of * their univariate weight changes. This operation is multithreaded and is a * linear complexity approximation of the quadratic greedy selection. * * \note It allows restricting the selection to top_k features per group with * the largest magnitude of univariate weight change, by passing the top_k value * through the `param` argument of Setup(). */ class ThriftyFeatureSelector : public FeatureSelector { public: void Setup(const gbm::GBLinearModel &model, const std::vector<GradientPair> &gpair, DMatrix *p_fmat, float alpha, float lambda, int param) override { top_k_ = static_cast<bst_uint>(param); if (param <= 0) top_k_ = std::numeric_limits<bst_uint>::max(); const bst_uint ngroup = model.param.num_output_group; const bst_omp_uint nfeat = model.param.num_feature; if (deltaw_.size() == 0) { deltaw_.resize(nfeat * ngroup); sorted_idx_.resize(nfeat * ngroup); counter_.resize(ngroup); gpair_sums_.resize(nfeat * ngroup); } // Calculate univariate gradient sums std::fill(gpair_sums_.begin(), gpair_sums_.end(), std::make_pair(0., 0.)); for (const auto &batch : p_fmat->GetColumnBatches()) { // column-parallel is usually faster than row-parallel #pragma omp parallel for schedule(static) for (bst_omp_uint i = 0; i < nfeat; ++i) { const auto col = batch[i]; const bst_uint ndata = col.size(); for (bst_uint gid = 0u; gid < ngroup; ++gid) { auto &sums = gpair_sums_[gid * nfeat + i]; for (bst_uint j = 0u; j < ndata; ++j) { const bst_float v = col[j].fvalue; auto &p = gpair[col[j].index * ngroup + gid]; if (p.GetHess() < 0.f) continue; sums.first += p.GetGrad() * v; sums.second += p.GetHess() * v * v; } } } } // rank by descending weight magnitude within the groups std::fill(deltaw_.begin(), deltaw_.end(), 0.f); std::iota(sorted_idx_.begin(), sorted_idx_.end(), 0); bst_float *pdeltaw = &deltaw_[0]; for (bst_uint gid = 0u; gid < ngroup; ++gid) { // Calculate univariate weight changes for (bst_omp_uint i = 0; i < nfeat; ++i) { auto ii = gid * nfeat + i; auto &s = gpair_sums_[ii]; deltaw_[ii] = static_cast<bst_float>(CoordinateDelta( s.first, s.second, model[i][gid], alpha, lambda)); } // sort in descending order of deltaw abs values auto start = sorted_idx_.begin() + gid * nfeat; std::sort(start, start + nfeat, [pdeltaw](size_t i, size_t j) { return std::abs(*(pdeltaw + i)) > std::abs(*(pdeltaw + j)); }); counter_[gid] = 0u; } } int NextFeature(int iteration, const gbm::GBLinearModel &model, int group_idx, const std::vector<GradientPair> &gpair, DMatrix *p_fmat, float alpha, float lambda) override { // k-th selected feature for a group auto k = counter_[group_idx]++; // stop after either reaching top-N or going through all the features in a group if (k >= top_k_ || counter_[group_idx] == model.param.num_feature) return -1; // note that sorted_idx stores the "long" indices const size_t grp_offset = group_idx * model.param.num_feature; return static_cast<int>(sorted_idx_[grp_offset + k] - grp_offset); } protected: bst_uint top_k_; std::vector<bst_float> deltaw_; std::vector<size_t> sorted_idx_; std::vector<bst_uint> counter_; std::vector<std::pair<double, double>> gpair_sums_; }; inline FeatureSelector *FeatureSelector::Create(int choice) { switch (choice) { case kCyclic: return new CyclicFeatureSelector(); case kShuffle: return new ShuffleFeatureSelector(); case kThrifty: return new ThriftyFeatureSelector(); case kGreedy: return new GreedyFeatureSelector(); case kRandom: return new RandomFeatureSelector(); default: LOG(FATAL) << "unknown coordinate selector: " << choice; } return nullptr; } } // namespace linear } // namespace tsoobgx
9.norace3.c
// RUN: clang %loadLLOV %s -o /dev/null 2>&1 | FileCheck %s #include <omp.h> #define N 100 int main() { int sum = 0; #pragma omp master for (int i = 0; i < N; i++) { sum += i; } return sum; } // We do not support inter SCoP data races for now // CHECK: Region is Data Race Free. // END
vow.c
/******************************************************************************************** * SIDH: an efficient supersingular isogeny cryptography library * * Abstract: functions for van Oorschot-Wiener attack *********************************************************************************************/ #include <omp.h> #include <stdbool.h> #include <stdint.h> #include <stdio.h> #include <signal.h> #include <math.h> #include <time.h> #include <string.h> #include "prng.h" #include "../tests/test_extras.h" #include "triples.h" #include "sync_strategies.c" #include "benchmarking.c" // Print statements for debugging void print_st(st_t *s, shared_state_t *shared_state) { uint64_t i; if (s->bytes == NULL) { for (i = 0; i < shared_state->NBYTES_STATE; i++) printf("--"); return; } for (i = 0; i < shared_state->NBYTES_STATE; i++) printf("%02x", s->bytes[i]); } /** * @brief runs one "iteration" of vOW: sampling a point, checking for distinguishedness and possibly backtracking * * @param S shared state pointer * @param private_state private state pointer * @param t temporary triple pointer * @param success pointer to global success variable * @return true vOW terminated, break out of loop * @return false keep looping */ static inline bool vOW_one_iteration( shared_state_t *S, private_state_t *private_state, trip_t *t, bool *success, double ratio_of_points_to_mine) { // printf("mine a point (1 vow iteration)\n"); // Walk to the next point using the current random function update(private_state); private_state->current.current_steps += 1; // Check if the new point is distinguisihed if (distinguished(private_state)) { // Found a distinguished point. Try backtracking if unsuccessful, sample a new starting point uint64_t id; bool read; bool res; private_state->current_dist++; private_state->dist_points++; // S->current_dist gets reset, this doesn't id = mem_index(private_state); copy_trip(&private_state->trip, &S->memory[id], private_state->NWORDS_STATE); read = (private_state->trip.current_steps > 0); // Did not get a collision in value, hence it was just a memory address collision if (!read || !is_equal_st(&private_state->trip.current_state, &private_state->current.current_state, private_state->NWORDS_STATE)) { private_state->mem_collisions += 1; } else { // Not a simple memory collision, backtrack! // printf("Collision "); // print_st(&private_state->trip.current_state, S); // printf("\n"); copy_trip(t, &private_state->current, private_state->NWORDS_STATE); res = backtrack(&private_state->trip, t, S, private_state); // Only check for success when not running for stats if (!private_state->collect_vow_stats) { if (res || *success) { *success = true; return true; } } } // Didn't get the golden collision, write the current distinguished point to memory // and sample a new starting point write_to_memory(&private_state->current, S, private_state, id); sample(private_state); } // Check if enough points have been mined for the current random function if (private_state->current_dist >= private_state->MAX_DIST * ratio_of_points_to_mine) { // Enough points collected for this random function if (!private_state->collect_vow_stats) { #if defined(STAKHANOVIST_SYNC) if (stakhanovist_resync_should_resync(S, private_state)) { sample(private_state); update_random_function(S, private_state); stakhanovist_resync_do_resync(S, private_state); } #elif defined(WINDOWED_SYNC) // In real attack. Sample a new starting point and random function sample(private_state); update_random_function(S, private_state); private_state->random_functions++; // maybe this could be merged with update_random_function private_state->current_dist = 0; #elif defined(NOBIGGIE_SYNC) if (nobiggie_resync_should_resync(S, private_state, success)) { // Resync, no thread has found the solution in this function version, so barriers inside this scope would be hit by all nobiggie_resync_do_resync(S, private_state); // Wait for 0 to reset S->resync_cores inside resync #pragma omp barrier } else { // Some core found the solution while waiting return true; } #endif } else { // we are collecting stats only for one random function, can stop vOW return true; } } if (private_state->current.current_steps >= private_state->MAX_STEPS) { // Walked too long without finding a new distinguished point // hence, sample a new starting point sample(private_state); } return false; } #if (OS_TARGET == OS_LINUX) // Handle Ctrl+C to stop prematurely and collect statistics bool ctrl_c_pressed = false; void sigintHandler(int sig_num) { /* Refer http://en.cppreference.com/w/c/program/signal */ ctrl_c_pressed = true; } #endif bool vOW(shared_state_t *S) { bool success = false; double start_wall_time = omp_get_wtime(); double *points_ratio = NULL; S->cpu_cycles = -cpu_cycles(); // Explicitly disable dynamic teams (ensures running on S->N_OF_CORES cores) omp_set_dynamic(0); // Runs cores benchmarks (across remote machines if used) to allocate work points_ratio = (double *)malloc(S->N_OF_CORES * sizeof(double)); if (points_ratio == NULL) { fprintf(stderr, "error: could not alloc points_ratio memory"); goto end; } run_benchmark(points_ratio, S->instance, 5000); // Runs the real attack #pragma omp parallel num_threads(S->N_OF_CORES) { private_state_t private_state; init_private_state(S, &private_state); double ratio_of_points_to_mine = points_ratio[private_state.thread_id]; double internal_cpu_time = omp_get_wtime(); initialize_private_memory(S, &private_state); trip_t t = init_trip(private_state.NWORDS_STATE); #if (OS_TARGET == OS_LINUX) // Set a Ctrl+C handler to dump statistics signal(SIGINT, sigintHandler); #endif // while we haven't exhausted the random functions to try while (private_state.random_functions <= private_state.MAX_FUNCTION_VERSIONS && !success) { #if (OS_TARGET == OS_LINUX) if (ctrl_c_pressed) { printf("\n%d: thinks ctrl+c was pressed", private_state.thread_id); break; } #endif #if defined(WINDOWED_SYNC) // "Windowed" resync windowed_resync(S, &private_state); #endif // Mine new points if (vOW_one_iteration(S, &private_state, &t, &success, ratio_of_points_to_mine)) { break; } } internal_cpu_time = omp_get_wtime() - internal_cpu_time; // Collect all the stats from each thread #pragma omp critical { S->collisions += private_state.collisions; S->mem_collisions += private_state.mem_collisions; S->dist_points += private_state.dist_points; S->number_steps_collect += private_state.number_steps_collect; S->number_steps_locate += private_state.number_steps_locate; S->number_steps = S->number_steps_collect + S->number_steps_locate; S->total_time += internal_cpu_time; S->final_avg_random_functions += (double)private_state.random_functions / (double)S->N_OF_CORES; } free_trip(&t); cleanup_private_memory(&private_state); free_private_state(&private_state); } end: #if (OS_TARGET == OS_LINUX) ctrl_c_pressed = false; #endif S->cpu_cycles += cpu_cycles(); free(points_ratio); S->success = success; S->wall_time = omp_get_wtime() - start_wall_time; return success; }
spmmd_x_csc_row.c
#include "alphasparse/kernel.h" #include "alphasparse/util.h" #include "alphasparse/opt.h" #include <memory.h> #ifdef _OPENMP #include <omp.h> #endif alphasparse_status_t ONAME(const ALPHA_SPMAT_CSC *matA, const ALPHA_SPMAT_CSC *matB, ALPHA_Number *matC, const ALPHA_INT ldc) { if (matA->cols != matB->rows) return ALPHA_SPARSE_STATUS_INVALID_VALUE; ALPHA_INT m = matA->rows; ALPHA_INT n = matB->cols; ALPHA_INT num_thread = alpha_get_thread_num(); #ifdef _OPENMP #pragma omp parallel for num_threads(num_thread) #endif for(ALPHA_INT i = 0; i < matA->rows; i++) { for(ALPHA_INT j = 0; j < matB->cols; j++) { alpha_setzero(matC[index2(i, j, ldc)]); } } #ifdef _OPENMP #pragma omp parallel for num_threads(num_thread) #endif for (ALPHA_INT bc = 0; bc < n; bc++) { for (ALPHA_INT bi = matB->cols_start[bc]; bi < matB->cols_end[bc]; bi++) { ALPHA_INT ac = matB->row_indx[bi]; // ac == br ALPHA_Number bv; bv = matB->values[bi]; for (ALPHA_INT ai = matA->cols_start[ac]; ai < matA->cols_end[ac]; ai++) { ALPHA_INT ar = matA->row_indx[ai]; ALPHA_Number av; av = matA->values[ai]; ALPHA_Number tmp; alpha_mul(tmp, av, bv); alpha_adde(matC[index2(ar, bc, ldc)], tmp); } } } return ALPHA_SPARSE_STATUS_SUCCESS; }
softplus_ref.c
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * License); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (c) 2021, OPEN AI LAB * Author: qtang@openailab.com */ #include "graph/tensor.h" #include "graph/node.h" #include "graph/graph.h" #include "module/module.h" #include "operator/op.h" #include "device/cpu/cpu_node.h" #include "device/cpu/cpu_graph.h" #include "device/cpu/cpu_module.h" #include "utility/float.h" #include "utility/sys_port.h" #include "utility/log.h" #include <math.h> int ref_softplus_fp32(struct tensor* input_tensor, struct tensor* output_tensor, int num_thread) { int w = input_tensor->dims[3]; int h = output_tensor->dims[2]; int channels = input_tensor->dims[1]; int size = h * w; int c_step = h * w; float* input_data = input_tensor->data; float* out_data = output_tensor->data; #pragma omp parallel for num_threads(num_thread) for (int q = 0; q < channels; q++) { float* src = input_data + c_step * q; float* dst = out_data + c_step * q; for (int i = 0; i < size; i++) { dst[i] = log(exp(src[i]) + 1.0f); } } return 0; } static int init_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { return 0; } static int release_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { return 0; } static int run(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { struct node* ir_node = exec_node->ir_node; struct graph* ir_graph = ir_node->graph; struct tensor* input_tensor; struct tensor* output_tensor; input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]); output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]); int ret = -1; if(input_tensor->data_type == TENGINE_DT_FP32) ret = ref_softplus_fp32(input_tensor, output_tensor, exec_graph->num_thread); else printf("Input data type %d not to be supported.\n", input_tensor->data_type); return ret; } static int reshape(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { struct node* node = exec_node->ir_node; struct graph* ir_graph = node->graph; struct tensor* input = get_ir_graph_tensor(ir_graph, node->input_tensors[0]); struct tensor* output = get_ir_graph_tensor(ir_graph, node->output_tensors[0]); int ret = set_ir_tensor_shape(output, input->dims, input->dim_num); return ret; } static int score(struct node_ops* node_ops, struct exec_graph* exec_graph, struct node* exec_node) { return OPS_SCORE_CANDO; } static struct node_ops hcl_node_ops = { .prerun = NULL, .run = run, .reshape = reshape, .postrun = NULL, .init_node = init_node, .release_node = release_node, .score = score }; int register_softplus_ref_op(void* arg) { return register_builtin_node_ops(OP_SOFTPLUS, &hcl_node_ops); } int unregister_softplus_ref_op(void* arg) { return unregister_builtin_node_ops(OP_SOFTPLUS, &hcl_node_ops); }
openMPMandelbrot.c
/* c program: -------------------------------- 1. draws Mandelbrot set for Fc(z)=z*z +c using Mandelbrot algorithm ( boolean escape time ) ------------------------------- 2. technique of creating ppm file is based on the code of Claudio Rocchini http://en.wikipedia.org/wiki/Image:Color_complex_plot.jpg create 24 bit color graphic file , portable pixmap file = PPM see http://en.wikipedia.org/wiki/Portable_pixmap to see the file use external application ( graphic viewer) */ #include <stdio.h> #include <math.h> #include <omp.h> int main() { /* screen ( integer) coordinate */ int iX,iY; const int iXmax = 51200; const int iYmax = 51200; /* world ( double) coordinate = parameter plane*/ double Cx,Cy; const double CxMin=-2.5; const double CxMax=1.5; const double CyMin=-2.0; const double CyMax=2.0; /* */ double PixelWidth=(CxMax-CxMin)/iXmax; double PixelHeight=(CyMax-CyMin)/iYmax; /* color component ( R or G or B) is coded from 0 to 255 */ /* it is 24 bit color RGB file */ const int MaxColorComponentValue=255; FILE * fp; char *filename="new1.ppm"; char *comment="# ";/* comment should start with # */ static unsigned char color[3]; /* Z=Zx+Zy*i ; Z0 = 0 */ double Zx, Zy; double Zx2, Zy2; /* Zx2=Zx*Zx; Zy2=Zy*Zy */ /* */ int Iteration; const int IterationMax=500; /* bail-out value , radius of circle ; */ const double EscapeRadius=2; double ER2=EscapeRadius*EscapeRadius; /*create new file,give it a name and open it in binary mode */ //fp= fopen(filename,"wb"); /* b - binary mode */ /*write ASCII header to the file*/ //fprintf(fp,"P6\n %s\n %d\n %d\n %d\n",comment,iXmax,iYmax,MaxColorComponentValue); /* compute and write image data bytes to the file*/ #pragma omp parallel for private(color,Cx,Cy,Zx,Zy,Zx2,Zy2,Iteration) for(iY=0;iY<iYmax;iY++) { Cy=CyMin + iY*PixelHeight; if (fabs(Cy)< PixelHeight/2) Cy=0.0; /* Main antenna */ for(iX=0;iX<iXmax;iX++) { Cx=CxMin + iX*PixelWidth; /* initial value of orbit = critical point Z= 0 */ Zx=0.0; Zy=0.0; Zx2=Zx*Zx; Zy2=Zy*Zy; /* */ for (Iteration=0;Iteration<IterationMax && ((Zx2+Zy2)<ER2);Iteration++) { Zy=2*Zx*Zy + Cy; Zx=Zx2-Zy2 +Cx; Zx2=Zx*Zx; Zy2=Zy*Zy; }; /* compute pixel color (24 bit = 3 bytes) */ if (Iteration==IterationMax) { /* interior of Mandelbrot set = black */ color[0]=0; color[1]=0; color[2]=0; } else { /* exterior of Mandelbrot set = white */ color[0]=255; /* Red*/ color[1]=255; /* Green */ color[2]=255;/* Blue */ }; /*write color to the file*/ //fwrite(color,1,3,fp); } } //fclose(fp); return 0; }
image.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % IIIII M M AAA GGGG EEEEE % % I MM MM A A G E % % I M M M AAAAA G GG EEE % % I M M A A G G E % % IIIII M M A A GGGG EEEEE % % % % % % MagickCore Image Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright @ 1999 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/animate.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/cache.h" #include "MagickCore/cache-private.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/client.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colormap.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/composite-private.h" #include "MagickCore/compress.h" #include "MagickCore/constitute.h" #include "MagickCore/delegate.h" #include "MagickCore/display.h" #include "MagickCore/draw.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/gem.h" #include "MagickCore/geometry.h" #include "MagickCore/histogram.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/magic.h" #include "MagickCore/magick.h" #include "MagickCore/magick-private.h" #include "MagickCore/memory_.h" #include "MagickCore/memory-private.h" #include "MagickCore/module.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/paint.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/profile.h" #include "MagickCore/property.h" #include "MagickCore/quantize.h" #include "MagickCore/random_.h" #include "MagickCore/resource_.h" #include "MagickCore/segment.h" #include "MagickCore/semaphore.h" #include "MagickCore/signature-private.h" #include "MagickCore/statistic.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/threshold.h" #include "MagickCore/timer.h" #include "MagickCore/timer-private.h" #include "MagickCore/token.h" #include "MagickCore/token-private.h" #include "MagickCore/utility.h" #include "MagickCore/utility-private.h" #include "MagickCore/version.h" #include "MagickCore/xwindow-private.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireImage() returns a pointer to an image structure initialized to % default values. % % The format of the AcquireImage method is: % % Image *AcquireImage(const ImageInfo *image_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: Many of the image default values are set from this % structure. For example, filename, compression, depth, background color, % and others. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AcquireImage(const ImageInfo *image_info, ExceptionInfo *exception) { const char *option; Image *image; MagickStatusType flags; /* Allocate image structure. */ (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); image=(Image *) AcquireCriticalMemory(sizeof(*image)); (void) memset(image,0,sizeof(*image)); /* Initialize Image structure. */ (void) CopyMagickString(image->magick,"MIFF",MagickPathExtent); image->storage_class=DirectClass; image->depth=MAGICKCORE_QUANTUM_DEPTH; image->colorspace=sRGBColorspace; image->rendering_intent=PerceptualIntent; image->gamma=1.000f/2.200f; image->chromaticity.red_primary.x=0.6400f; image->chromaticity.red_primary.y=0.3300f; image->chromaticity.red_primary.z=0.0300f; image->chromaticity.green_primary.x=0.3000f; image->chromaticity.green_primary.y=0.6000f; image->chromaticity.green_primary.z=0.1000f; image->chromaticity.blue_primary.x=0.1500f; image->chromaticity.blue_primary.y=0.0600f; image->chromaticity.blue_primary.z=0.7900f; image->chromaticity.white_point.x=0.3127f; image->chromaticity.white_point.y=0.3290f; image->chromaticity.white_point.z=0.3583f; image->interlace=NoInterlace; image->ticks_per_second=UndefinedTicksPerSecond; image->compose=OverCompositeOp; (void) QueryColorCompliance(MatteColor,AllCompliance,&image->matte_color, exception); (void) QueryColorCompliance(BackgroundColor,AllCompliance, &image->background_color,exception); (void) QueryColorCompliance(BorderColor,AllCompliance,&image->border_color, exception); (void) QueryColorCompliance(TransparentColor,AllCompliance, &image->transparent_color,exception); GetTimerInfo(&image->timer); image->cache=AcquirePixelCache(0); image->channel_mask=DefaultChannels; image->channel_map=AcquirePixelChannelMap(); image->blob=CloneBlobInfo((BlobInfo *) NULL); image->timestamp=GetMagickTime(); image->debug=IsEventLogging(); image->reference_count=1; image->semaphore=AcquireSemaphoreInfo(); image->signature=MagickCoreSignature; if (image_info == (ImageInfo *) NULL) return(image); /* Transfer image info. */ SetBlobExempt(image,image_info->file != (FILE *) NULL ? MagickTrue : MagickFalse); (void) CopyMagickString(image->filename,image_info->filename, MagickPathExtent); (void) CopyMagickString(image->magick_filename,image_info->filename, MagickPathExtent); (void) CopyMagickString(image->magick,image_info->magick,MagickPathExtent); if (image_info->size != (char *) NULL) { (void) ParseAbsoluteGeometry(image_info->size,&image->extract_info); image->columns=image->extract_info.width; image->rows=image->extract_info.height; image->offset=image->extract_info.x; image->extract_info.x=0; image->extract_info.y=0; } if (image_info->extract != (char *) NULL) { RectangleInfo geometry; (void) memset(&geometry,0,sizeof(geometry)); flags=ParseAbsoluteGeometry(image_info->extract,&geometry); if (((flags & XValue) != 0) || ((flags & YValue) != 0)) { image->extract_info=geometry; Swap(image->columns,image->extract_info.width); Swap(image->rows,image->extract_info.height); } } image->compression=image_info->compression; image->quality=image_info->quality; image->endian=image_info->endian; image->interlace=image_info->interlace; image->units=image_info->units; if (image_info->density != (char *) NULL) { GeometryInfo geometry_info; flags=ParseGeometry(image_info->density,&geometry_info); if ((flags & RhoValue) != 0) image->resolution.x=geometry_info.rho; image->resolution.y=image->resolution.x; if ((flags & SigmaValue) != 0) image->resolution.y=geometry_info.sigma; } if (image_info->page != (char *) NULL) { char *geometry; image->page=image->extract_info; geometry=GetPageGeometry(image_info->page); (void) ParseAbsoluteGeometry(geometry,&image->page); geometry=DestroyString(geometry); } if (image_info->depth != 0) image->depth=image_info->depth; image->dither=image_info->dither; image->matte_color=image_info->matte_color; image->background_color=image_info->background_color; image->border_color=image_info->border_color; image->transparent_color=image_info->transparent_color; image->ping=image_info->ping; image->progress_monitor=image_info->progress_monitor; image->client_data=image_info->client_data; if (image_info->cache != (void *) NULL) ClonePixelCacheMethods(image->cache,image_info->cache); /* Set all global options that map to per-image settings. */ (void) SyncImageSettings(image_info,image,exception); /* Global options that are only set for new images. */ option=GetImageOption(image_info,"delay"); if (option != (const char *) NULL) { GeometryInfo geometry_info; flags=ParseGeometry(option,&geometry_info); if ((flags & GreaterValue) != 0) { if ((double) image->delay > floor(geometry_info.rho+0.5)) image->delay=(size_t) CastDoubleToLong(floor( geometry_info.rho+0.5)); } else if ((flags & LessValue) != 0) { if ((double) image->delay < floor(geometry_info.rho+0.5)) image->ticks_per_second=CastDoubleToLong(floor( geometry_info.sigma+0.5)); } else image->delay=(size_t) CastDoubleToLong(floor(geometry_info.rho+0.5)); if ((flags & SigmaValue) != 0) image->ticks_per_second=CastDoubleToLong(floor( geometry_info.sigma+0.5)); } option=GetImageOption(image_info,"dispose"); if (option != (const char *) NULL) image->dispose=(DisposeType) ParseCommandOption(MagickDisposeOptions, MagickFalse,option); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e I m a g e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireImageInfo() allocates the ImageInfo structure. % % The format of the AcquireImageInfo method is: % % ImageInfo *AcquireImageInfo(void) % */ MagickExport ImageInfo *AcquireImageInfo(void) { ImageInfo *image_info; image_info=(ImageInfo *) AcquireCriticalMemory(sizeof(*image_info)); GetImageInfo(image_info); return(image_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e N e x t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireNextImage() initializes the next image in a sequence to % default values. The next member of image points to the newly allocated % image. If there is a memory shortage, next is assigned NULL. % % The format of the AcquireNextImage method is: % % void AcquireNextImage(const ImageInfo *image_info,Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: Many of the image default values are set from this % structure. For example, filename, compression, depth, background color, % and others. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport void AcquireNextImage(const ImageInfo *image_info,Image *image, ExceptionInfo *exception) { /* Allocate image structure. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); image->next=AcquireImage(image_info,exception); if (GetNextImageInList(image) == (Image *) NULL) return; (void) CopyMagickString(GetNextImageInList(image)->filename,image->filename, MagickPathExtent); if (image_info != (ImageInfo *) NULL) (void) CopyMagickString(GetNextImageInList(image)->filename, image_info->filename,MagickPathExtent); DestroyBlob(GetNextImageInList(image)); image->next->blob=ReferenceBlob(image->blob); image->next->endian=image->endian; image->next->scene=image->scene+1; image->next->previous=image; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A p p e n d I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AppendImages() takes all images from the current image pointer to the end % of the image list and appends them to each other top-to-bottom if the % stack parameter is true, otherwise left-to-right. % % The current gravity setting effects how the image is justified in the % final image. % % The format of the AppendImages method is: % % Image *AppendImages(const Image *images,const MagickBooleanType stack, % ExceptionInfo *exception) % % A description of each parameter follows: % % o images: the image sequence. % % o stack: A value other than 0 stacks the images top-to-bottom. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AppendImages(const Image *images, const MagickBooleanType stack,ExceptionInfo *exception) { #define AppendImageTag "Append/Image" CacheView *append_view; Image *append_image; ImageType image_type; MagickBooleanType homogeneous_colorspace, status; MagickOffsetType n; PixelTrait alpha_trait; RectangleInfo geometry; const Image *next; size_t depth, height, number_images, width; ssize_t x_offset, y, y_offset; /* Compute maximum area of appended area. */ assert(images != (Image *) NULL); assert(images->signature == MagickCoreSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); alpha_trait=images->alpha_trait; number_images=1; width=images->columns; height=images->rows; depth=images->depth; image_type=images->type; homogeneous_colorspace=MagickTrue; next=GetNextImageInList(images); for ( ; next != (Image *) NULL; next=GetNextImageInList(next)) { if (next->depth > depth) depth=next->depth; if (next->type != images->type) image_type=UndefinedType; if (next->colorspace != images->colorspace) homogeneous_colorspace=MagickFalse; if (next->alpha_trait != UndefinedPixelTrait) alpha_trait=BlendPixelTrait; number_images++; if (stack != MagickFalse) { if (next->columns > width) width=next->columns; height+=next->rows; continue; } width+=next->columns; if (next->rows > height) height=next->rows; } /* Append images. */ append_image=CloneImage(images,width,height,MagickTrue,exception); if (append_image == (Image *) NULL) return((Image *) NULL); if (image_type != BilevelType) { if (SetImageStorageClass(append_image,DirectClass,exception) == MagickFalse) { append_image=DestroyImage(append_image); return((Image *) NULL); } if (homogeneous_colorspace == MagickFalse) (void) SetImageColorspace(append_image,sRGBColorspace,exception); } append_image->depth=depth; append_image->alpha_trait=alpha_trait; append_image->page=images->page; (void) SetImageBackgroundColor(append_image,exception); status=MagickTrue; x_offset=0; y_offset=0; next=images; append_view=AcquireAuthenticCacheView(append_image,exception); for (n=0; n < (MagickOffsetType) number_images; n++) { CacheView *image_view; MagickBooleanType proceed; SetGeometry(append_image,&geometry); GravityAdjustGeometry(next->columns,next->rows,next->gravity,&geometry); if (stack != MagickFalse) x_offset-=geometry.x; else y_offset-=geometry.y; image_view=AcquireVirtualCacheView(next,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(next,next,next->rows,1) #endif for (y=0; y < (ssize_t) next->rows; y++) { MagickBooleanType sync; PixelInfo pixel; const Quantum *magick_restrict p; Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception); q=QueueCacheViewAuthenticPixels(append_view,x_offset,y+y_offset, next->columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } GetPixelInfo(next,&pixel); for (x=0; x < (ssize_t) next->columns; x++) { GetPixelInfoPixel(next,p,&pixel); SetPixelViaPixelInfo(append_image,&pixel,q); p+=GetPixelChannels(next); q+=GetPixelChannels(append_image); } sync=SyncCacheViewAuthenticPixels(append_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (stack == MagickFalse) { x_offset+=(ssize_t) next->columns; y_offset=0; } else { x_offset=0; y_offset+=(ssize_t) next->rows; } proceed=SetImageProgress(append_image,AppendImageTag,n,number_images); if (proceed == MagickFalse) break; next=GetNextImageInList(next); } append_view=DestroyCacheView(append_view); if (status == MagickFalse) append_image=DestroyImage(append_image); return(append_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C a t c h I m a g e E x c e p t i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CatchImageException() returns if no exceptions are found in the image % sequence, otherwise it determines the most severe exception and reports % it as a warning or error depending on the severity. % % The format of the CatchImageException method is: % % ExceptionType CatchImageException(Image *image) % % A description of each parameter follows: % % o image: An image sequence. % */ MagickExport ExceptionType CatchImageException(Image *image) { ExceptionInfo *exception; ExceptionType severity; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); exception=AcquireExceptionInfo(); CatchException(exception); severity=exception->severity; exception=DestroyExceptionInfo(exception); return(severity); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l i p I m a g e P a t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClipImagePath() sets the image clip mask based any clipping path information % if it exists. % % The format of the ClipImagePath method is: % % MagickBooleanType ClipImagePath(Image *image,const char *pathname, % const MagickBooleanType inside,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o pathname: name of clipping path resource. If name is preceded by #, use % clipping path numbered by name. % % o inside: if non-zero, later operations take effect inside clipping path. % Otherwise later operations take effect outside clipping path. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType ClipImage(Image *image,ExceptionInfo *exception) { return(ClipImagePath(image,"#1",MagickTrue,exception)); } MagickExport MagickBooleanType ClipImagePath(Image *image,const char *pathname, const MagickBooleanType inside,ExceptionInfo *exception) { #define ClipImagePathTag "ClipPath/Image" char *property; const char *value; Image *clip_mask; ImageInfo *image_info; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(pathname != NULL); property=AcquireString(pathname); (void) FormatLocaleString(property,MagickPathExtent,"8BIM:1999,2998:%s", pathname); value=GetImageProperty(image,property,exception); property=DestroyString(property); if (value == (const char *) NULL) { ThrowFileException(exception,OptionError,"NoClipPathDefined", image->filename); return(MagickFalse); } image_info=AcquireImageInfo(); (void) CopyMagickString(image_info->filename,image->filename, MagickPathExtent); (void) ConcatenateMagickString(image_info->filename,pathname, MagickPathExtent); clip_mask=BlobToImage(image_info,value,strlen(value),exception); image_info=DestroyImageInfo(image_info); if (clip_mask == (Image *) NULL) return(MagickFalse); if (clip_mask->storage_class == PseudoClass) { (void) SyncImage(clip_mask,exception); if (SetImageStorageClass(clip_mask,DirectClass,exception) == MagickFalse) return(MagickFalse); } if (inside != MagickFalse) (void) NegateImage(clip_mask,MagickFalse,exception); (void) FormatLocaleString(clip_mask->magick_filename,MagickPathExtent, "8BIM:1999,2998:%s\nPS",pathname); (void) SetImageMask(image,WritePixelMask,clip_mask,exception); image->mask_trait=UpdatePixelTrait; clip_mask=DestroyImage(clip_mask); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneImage() copies an image and returns the copy as a new image object. % % If the specified columns and rows is 0, an exact copy of the image is % returned, otherwise the pixel data is undefined and must be initialized % with the QueueAuthenticPixels() and SyncAuthenticPixels() methods. On % failure, a NULL image is returned and exception describes the reason for the % failure. % % The format of the CloneImage method is: % % Image *CloneImage(const Image *image,const size_t columns, % const size_t rows,const MagickBooleanType orphan, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: the number of columns in the cloned image. % % o rows: the number of rows in the cloned image. % % o detach: With a value other than 0, the cloned image is detached from % its parent I/O stream. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *CloneImage(const Image *image,const size_t columns, const size_t rows,const MagickBooleanType detach,ExceptionInfo *exception) { double scale_x, scale_y; Image *clone_image; size_t length; /* Clone the image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if ((image->columns == 0) || (image->rows == 0)) { (void) ThrowMagickException(exception,GetMagickModule(),CorruptImageError, "NegativeOrZeroImageSize","`%s'",image->filename); return((Image *) NULL); } clone_image=(Image *) AcquireCriticalMemory(sizeof(*clone_image)); (void) memset(clone_image,0,sizeof(*clone_image)); clone_image->signature=MagickCoreSignature; clone_image->storage_class=image->storage_class; clone_image->number_channels=image->number_channels; clone_image->number_meta_channels=image->number_meta_channels; clone_image->metacontent_extent=image->metacontent_extent; clone_image->colorspace=image->colorspace; clone_image->alpha_trait=image->alpha_trait; clone_image->channels=image->channels; clone_image->mask_trait=image->mask_trait; clone_image->columns=image->columns; clone_image->rows=image->rows; clone_image->dither=image->dither; clone_image->image_info=CloneImageInfo(image->image_info); (void) CloneImageProfiles(clone_image,image); (void) CloneImageProperties(clone_image,image); (void) CloneImageArtifacts(clone_image,image); GetTimerInfo(&clone_image->timer); if (image->ascii85 != (void *) NULL) Ascii85Initialize(clone_image); clone_image->extent=image->extent; clone_image->magick_columns=image->magick_columns; clone_image->magick_rows=image->magick_rows; clone_image->type=image->type; clone_image->channel_mask=image->channel_mask; clone_image->channel_map=ClonePixelChannelMap(image->channel_map); (void) CopyMagickString(clone_image->magick_filename,image->magick_filename, MagickPathExtent); (void) CopyMagickString(clone_image->magick,image->magick,MagickPathExtent); (void) CopyMagickString(clone_image->filename,image->filename, MagickPathExtent); clone_image->progress_monitor=image->progress_monitor; clone_image->client_data=image->client_data; clone_image->reference_count=1; clone_image->next=image->next; clone_image->previous=image->previous; clone_image->list=NewImageList(); if (detach == MagickFalse) clone_image->blob=ReferenceBlob(image->blob); else { clone_image->next=NewImageList(); clone_image->previous=NewImageList(); clone_image->blob=CloneBlobInfo((BlobInfo *) NULL); } clone_image->ping=image->ping; clone_image->debug=IsEventLogging(); clone_image->semaphore=AcquireSemaphoreInfo(); if (image->colormap != (PixelInfo *) NULL) { /* Allocate and copy the image colormap. */ clone_image->colors=image->colors; length=(size_t) image->colors; clone_image->colormap=(PixelInfo *) AcquireQuantumMemory(length+1, sizeof(*clone_image->colormap)); if (clone_image->colormap == (PixelInfo *) NULL) { clone_image=DestroyImage(clone_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } (void) memcpy(clone_image->colormap,image->colormap,length* sizeof(*clone_image->colormap)); } if ((columns == 0) || (rows == 0)) { if (image->montage != (char *) NULL) (void) CloneString(&clone_image->montage,image->montage); if (image->directory != (char *) NULL) (void) CloneString(&clone_image->directory,image->directory); clone_image->cache=ReferencePixelCache(image->cache); return(clone_image); } scale_x=1.0; scale_y=1.0; if (image->columns != 0) scale_x=(double) columns/(double) image->columns; if (image->rows != 0) scale_y=(double) rows/(double) image->rows; clone_image->page.width=(size_t) CastDoubleToLong(floor(scale_x* image->page.width+0.5)); clone_image->page.height=(size_t) CastDoubleToLong(floor(scale_y* image->page.height+0.5)); if (MagickAbsoluteValue(scale_x-scale_y) < 2.0) scale_x=scale_y=MagickMin(scale_x,scale_y); clone_image->page.x=CastDoubleToLong(ceil(scale_x*image->page.x-0.5)); clone_image->tile_offset.x=CastDoubleToLong(ceil(scale_x* image->tile_offset.x-0.5)); clone_image->page.y=CastDoubleToLong(ceil(scale_y*image->page.y-0.5)); clone_image->tile_offset.y=CastDoubleToLong(ceil(scale_y* image->tile_offset.y-0.5)); clone_image->cache=ClonePixelCache(image->cache); if (SetImageExtent(clone_image,columns,rows,exception) == MagickFalse) clone_image=DestroyImage(clone_image); return(clone_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e I m a g e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneImageInfo() makes a copy of the given image info structure. If % NULL is specified, a new image info structure is created initialized to % default values. % % The format of the CloneImageInfo method is: % % ImageInfo *CloneImageInfo(const ImageInfo *image_info) % % A description of each parameter follows: % % o image_info: the image info. % */ MagickExport ImageInfo *CloneImageInfo(const ImageInfo *image_info) { ImageInfo *clone_info; clone_info=AcquireImageInfo(); if (image_info == (ImageInfo *) NULL) return(clone_info); clone_info->compression=image_info->compression; clone_info->temporary=image_info->temporary; clone_info->adjoin=image_info->adjoin; clone_info->antialias=image_info->antialias; clone_info->scene=image_info->scene; clone_info->number_scenes=image_info->number_scenes; clone_info->depth=image_info->depth; if (image_info->size != (char *) NULL) (void) CloneString(&clone_info->size,image_info->size); if (image_info->extract != (char *) NULL) (void) CloneString(&clone_info->extract,image_info->extract); if (image_info->scenes != (char *) NULL) (void) CloneString(&clone_info->scenes,image_info->scenes); if (image_info->page != (char *) NULL) (void) CloneString(&clone_info->page,image_info->page); clone_info->interlace=image_info->interlace; clone_info->endian=image_info->endian; clone_info->units=image_info->units; clone_info->quality=image_info->quality; if (image_info->sampling_factor != (char *) NULL) (void) CloneString(&clone_info->sampling_factor, image_info->sampling_factor); if (image_info->server_name != (char *) NULL) (void) CloneString(&clone_info->server_name,image_info->server_name); if (image_info->font != (char *) NULL) (void) CloneString(&clone_info->font,image_info->font); if (image_info->texture != (char *) NULL) (void) CloneString(&clone_info->texture,image_info->texture); if (image_info->density != (char *) NULL) (void) CloneString(&clone_info->density,image_info->density); clone_info->pointsize=image_info->pointsize; clone_info->fuzz=image_info->fuzz; clone_info->matte_color=image_info->matte_color; clone_info->background_color=image_info->background_color; clone_info->border_color=image_info->border_color; clone_info->transparent_color=image_info->transparent_color; clone_info->dither=image_info->dither; clone_info->monochrome=image_info->monochrome; clone_info->colorspace=image_info->colorspace; clone_info->type=image_info->type; clone_info->orientation=image_info->orientation; clone_info->ping=image_info->ping; clone_info->verbose=image_info->verbose; clone_info->progress_monitor=image_info->progress_monitor; clone_info->client_data=image_info->client_data; clone_info->cache=image_info->cache; if (image_info->cache != (void *) NULL) clone_info->cache=ReferencePixelCache(image_info->cache); if (image_info->profile != (void *) NULL) clone_info->profile=(void *) CloneStringInfo((StringInfo *) image_info->profile); SetImageInfoFile(clone_info,image_info->file); SetImageInfoBlob(clone_info,image_info->blob,image_info->length); clone_info->stream=image_info->stream; clone_info->custom_stream=image_info->custom_stream; (void) CopyMagickString(clone_info->magick,image_info->magick, MagickPathExtent); (void) CopyMagickString(clone_info->unique,image_info->unique, MagickPathExtent); (void) CopyMagickString(clone_info->filename,image_info->filename, MagickPathExtent); clone_info->channel=image_info->channel; (void) CloneImageOptions(clone_info,image_info); clone_info->debug=IsEventLogging(); clone_info->signature=image_info->signature; return(clone_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o p y I m a g e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CopyImagePixels() copies pixels from the source image as defined by the % geometry the destination image at the specified offset. % % The format of the CopyImagePixels method is: % % MagickBooleanType CopyImagePixels(Image *image,const Image *source_image, % const RectangleInfo *geometry,const OffsetInfo *offset, % ExceptionInfo *exception); % % A description of each parameter follows: % % o image: the destination image. % % o source_image: the source image. % % o geometry: define the dimensions of the source pixel rectangle. % % o offset: define the offset in the destination image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType CopyImagePixels(Image *image, const Image *source_image,const RectangleInfo *geometry, const OffsetInfo *offset,ExceptionInfo *exception) { #define CopyImageTag "Copy/Image" CacheView *image_view, *source_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(source_image != (Image *) NULL); assert(geometry != (RectangleInfo *) NULL); assert(offset != (OffsetInfo *) NULL); if ((offset->x < 0) || (offset->y < 0) || ((ssize_t) (offset->x+geometry->width) > (ssize_t) image->columns) || ((ssize_t) (offset->y+geometry->height) > (ssize_t) image->rows)) ThrowBinaryException(OptionError,"GeometryDoesNotContainImage", image->filename); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); /* Copy image pixels. */ status=MagickTrue; progress=0; source_view=AcquireVirtualCacheView(source_image,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,source_image,geometry->height,1) #endif for (y=0; y < (ssize_t) geometry->height; y++) { MagickBooleanType sync; const Quantum *magick_restrict p; ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(source_view,geometry->x,y+geometry->y, geometry->width,1,exception); q=QueueCacheViewAuthenticPixels(image_view,offset->x,y+offset->y, geometry->width,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) geometry->width; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait source_traits=GetPixelChannelTraits(source_image,channel); if ((traits == UndefinedPixelTrait) || ((traits & UpdatePixelTrait) == 0) || (source_traits == UndefinedPixelTrait)) continue; SetPixelChannel(image,channel,p[i],q); } p+=GetPixelChannels(source_image); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,CopyImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } source_view=DestroyCacheView(source_view); image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyImage() dereferences an image, deallocating memory associated with % the image if the reference count becomes zero. % % The format of the DestroyImage method is: % % Image *DestroyImage(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport Image *DestroyImage(Image *image) { MagickBooleanType destroy; /* Dereference image. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); destroy=MagickFalse; LockSemaphoreInfo(image->semaphore); image->reference_count--; if (image->reference_count == 0) destroy=MagickTrue; UnlockSemaphoreInfo(image->semaphore); if (destroy == MagickFalse) return((Image *) NULL); /* Destroy image. */ DestroyImagePixels(image); image->channel_map=DestroyPixelChannelMap(image->channel_map); if (image->montage != (char *) NULL) image->montage=DestroyString(image->montage); if (image->directory != (char *) NULL) image->directory=DestroyString(image->directory); if (image->colormap != (PixelInfo *) NULL) image->colormap=(PixelInfo *) RelinquishMagickMemory(image->colormap); if (image->geometry != (char *) NULL) image->geometry=DestroyString(image->geometry); DestroyImageProfiles(image); DestroyImageProperties(image); DestroyImageArtifacts(image); if (image->ascii85 != (Ascii85Info *) NULL) image->ascii85=(Ascii85Info *) RelinquishMagickMemory(image->ascii85); if (image->image_info != (ImageInfo *) NULL) image->image_info=DestroyImageInfo(image->image_info); DestroyBlob(image); if (image->semaphore != (SemaphoreInfo *) NULL) RelinquishSemaphoreInfo(&image->semaphore); image->signature=(~MagickCoreSignature); image=(Image *) RelinquishMagickMemory(image); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y I m a g e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyImageInfo() deallocates memory associated with an ImageInfo % structure. % % The format of the DestroyImageInfo method is: % % ImageInfo *DestroyImageInfo(ImageInfo *image_info) % % A description of each parameter follows: % % o image_info: the image info. % */ MagickExport ImageInfo *DestroyImageInfo(ImageInfo *image_info) { assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); if (image_info->size != (char *) NULL) image_info->size=DestroyString(image_info->size); if (image_info->extract != (char *) NULL) image_info->extract=DestroyString(image_info->extract); if (image_info->scenes != (char *) NULL) image_info->scenes=DestroyString(image_info->scenes); if (image_info->page != (char *) NULL) image_info->page=DestroyString(image_info->page); if (image_info->sampling_factor != (char *) NULL) image_info->sampling_factor=DestroyString( image_info->sampling_factor); if (image_info->server_name != (char *) NULL) image_info->server_name=DestroyString( image_info->server_name); if (image_info->font != (char *) NULL) image_info->font=DestroyString(image_info->font); if (image_info->texture != (char *) NULL) image_info->texture=DestroyString(image_info->texture); if (image_info->density != (char *) NULL) image_info->density=DestroyString(image_info->density); if (image_info->cache != (void *) NULL) image_info->cache=DestroyPixelCache(image_info->cache); if (image_info->profile != (StringInfo *) NULL) image_info->profile=(void *) DestroyStringInfo((StringInfo *) image_info->profile); DestroyImageOptions(image_info); image_info->signature=(~MagickCoreSignature); image_info=(ImageInfo *) RelinquishMagickMemory(image_info); return(image_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D i s a s s o c i a t e I m a g e S t r e a m % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DisassociateImageStream() disassociates the image stream. It checks if the % blob of the specified image is referenced by other images. If the reference % count is higher then 1 a new blob is assigned to the specified image. % % The format of the DisassociateImageStream method is: % % void DisassociateImageStream(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport void DisassociateImageStream(Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); DisassociateBlob(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageInfo() initializes image_info to default values. % % The format of the GetImageInfo method is: % % void GetImageInfo(ImageInfo *image_info) % % A description of each parameter follows: % % o image_info: the image info. % */ MagickExport void GetImageInfo(ImageInfo *image_info) { char *synchronize; ExceptionInfo *exception; /* File and image dimension members. */ (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image_info != (ImageInfo *) NULL); (void) memset(image_info,0,sizeof(*image_info)); image_info->adjoin=MagickTrue; image_info->interlace=NoInterlace; image_info->channel=DefaultChannels; image_info->quality=UndefinedCompressionQuality; image_info->antialias=MagickTrue; image_info->dither=MagickTrue; synchronize=GetEnvironmentValue("MAGICK_SYNCHRONIZE"); if (synchronize != (const char *) NULL) { image_info->synchronize=IsStringTrue(synchronize); synchronize=DestroyString(synchronize); } exception=AcquireExceptionInfo(); (void) QueryColorCompliance(BackgroundColor,AllCompliance, &image_info->background_color,exception); (void) QueryColorCompliance(BorderColor,AllCompliance, &image_info->border_color,exception); (void) QueryColorCompliance(MatteColor,AllCompliance,&image_info->matte_color, exception); (void) QueryColorCompliance(TransparentColor,AllCompliance, &image_info->transparent_color,exception); exception=DestroyExceptionInfo(exception); image_info->debug=IsEventLogging(); image_info->signature=MagickCoreSignature; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e I n f o F i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageInfoFile() returns the image info file member. % % The format of the GetImageInfoFile method is: % % FILE *GetImageInfoFile(const ImageInfo *image_info) % % A description of each parameter follows: % % o image_info: the image info. % */ MagickExport FILE *GetImageInfoFile(const ImageInfo *image_info) { return(image_info->file); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e M a s k % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageMask() returns the mask associated with the image. % % The format of the GetImageMask method is: % % Image *GetImageMask(const Image *image,const PixelMask type, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o type: the mask type, ReadPixelMask or WritePixelMask. % */ MagickExport Image *GetImageMask(const Image *image,const PixelMask type, ExceptionInfo *exception) { CacheView *mask_view, *image_view; Image *mask_image; MagickBooleanType status; ssize_t y; /* Get image mask. */ assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); switch (type) { case ReadPixelMask: { if ((image->channels & ReadMaskChannel) == 0) return((Image *) NULL); break; } case WritePixelMask: { if ((image->channels & WriteMaskChannel) == 0) return((Image *) NULL); break; } default: { if ((image->channels & CompositeMaskChannel) == 0) return((Image *) NULL); break; } } mask_image=AcquireImage((ImageInfo *) NULL,exception); status=SetImageExtent(mask_image,image->columns,image->rows,exception); if (status == MagickFalse) return(DestroyImage(mask_image)); status=MagickTrue; mask_image->alpha_trait=UndefinedPixelTrait; (void) SetImageColorspace(mask_image,GRAYColorspace,exception); image_view=AcquireVirtualCacheView(image,exception); mask_view=AcquireAuthenticCacheView(mask_image,exception); for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *magick_restrict p; Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=GetCacheViewAuthenticPixels(mask_view,0,y,mask_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { switch (type) { case ReadPixelMask: { SetPixelGray(mask_image,GetPixelReadMask(image,p),q); break; } case WritePixelMask: { SetPixelGray(mask_image,GetPixelWriteMask(image,p),q); break; } default: { SetPixelGray(mask_image,GetPixelCompositeMask(image,p),q); break; } } p+=GetPixelChannels(image); q+=GetPixelChannels(mask_image); } if (SyncCacheViewAuthenticPixels(mask_view,exception) == MagickFalse) status=MagickFalse; } mask_view=DestroyCacheView(mask_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) mask_image=DestroyImage(mask_image); return(mask_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e R e f e r e n c e C o u n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageReferenceCount() returns the image reference count. % % The format of the GetReferenceCount method is: % % ssize_t GetImageReferenceCount(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport ssize_t GetImageReferenceCount(Image *image) { ssize_t reference_count; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); LockSemaphoreInfo(image->semaphore); reference_count=image->reference_count; UnlockSemaphoreInfo(image->semaphore); return(reference_count); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e V i r t u a l P i x e l M e t h o d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageVirtualPixelMethod() gets the "virtual pixels" method for the % image. A virtual pixel is any pixel access that is outside the boundaries % of the image cache. % % The format of the GetImageVirtualPixelMethod() method is: % % VirtualPixelMethod GetImageVirtualPixelMethod(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport VirtualPixelMethod GetImageVirtualPixelMethod(const Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); return(GetPixelCacheVirtualMethod(image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I n t e r p r e t I m a g e F i l e n a m e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % InterpretImageFilename() interprets embedded characters in an image filename. % The filename length is returned. % % The format of the InterpretImageFilename method is: % % size_t InterpretImageFilename(const ImageInfo *image_info,Image *image, % const char *format,int value,char *filename,ExceptionInfo *exception) % % A description of each parameter follows. % % o image_info: the image info.. % % o image: the image. % % o format: A filename describing the format to use to write the numeric % argument. Only the first numeric format identifier is replaced. % % o value: Numeric value to substitute into format filename. % % o filename: return the formatted filename in this character buffer. % % o exception: return any errors or warnings in this structure. % */ MagickExport size_t InterpretImageFilename(const ImageInfo *image_info, Image *image,const char *format,int value,char *filename, ExceptionInfo *exception) { char *q; const char *p; int c; MagickBooleanType canonical; ssize_t field_width, offset; canonical=MagickFalse; offset=0; (void) CopyMagickString(filename,format,MagickPathExtent); if (IsStringTrue(GetImageOption(image_info,"filename:literal")) != MagickFalse) return(strlen(filename)); for (p=strchr(format,'%'); p != (char *) NULL; p=strchr(p+1,'%')) { q=(char *) p+1; if (*q == '%') { p=q+1; continue; } field_width=0; if (*q == '0') field_width=(ssize_t) strtol(q,&q,10); switch (*q) { case 'd': case 'o': case 'x': { q++; c=(*q); *q='\0'; (void) FormatLocaleString(filename+(p-format-offset),(size_t) (MagickPathExtent-(p-format-offset)),p,value); offset+=(4-field_width); *q=c; (void) ConcatenateMagickString(filename,q,MagickPathExtent); canonical=MagickTrue; if (*(q-1) != '%') break; p++; break; } case '[': { char pattern[MagickPathExtent]; const char *option; char *r; ssize_t i; ssize_t depth; /* Image option. */ if (strchr(p,']') == (char *) NULL) break; depth=1; r=q+1; for (i=0; (i < (MagickPathExtent-1L)) && (*r != '\0'); i++) { if (*r == '[') depth++; if (*r == ']') depth--; if (depth <= 0) break; pattern[i]=(*r++); } pattern[i]='\0'; if (LocaleNCompare(pattern,"filename:",9) != 0) break; option=(const char *) NULL; if (image != (Image *) NULL) option=GetImageProperty(image,pattern,exception); if ((option == (const char *) NULL) && (image != (Image *) NULL)) option=GetImageArtifact(image,pattern); if ((option == (const char *) NULL) && (image_info != (ImageInfo *) NULL)) option=GetImageOption(image_info,pattern); if (option == (const char *) NULL) break; q--; c=(*q); *q='\0'; (void) CopyMagickString(filename+(p-format-offset),option,(size_t) (MagickPathExtent-(p-format-offset))); offset+=strlen(pattern)-strlen(option)+3; *q=c; (void) ConcatenateMagickString(filename,r+1,MagickPathExtent); canonical=MagickTrue; if (*(q-1) != '%') break; p++; break; } default: break; } } if (canonical == MagickFalse) (void) CopyMagickString(filename,format,MagickPathExtent); else for (q=filename; *q != '\0'; q++) if ((*q == '%') && (*(q+1) == '%')) (void) CopyMagickString(q,q+1,(size_t) (MagickPathExtent-(q-filename))); return(strlen(filename)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s H i g h D y n a m i c R a n g e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsHighDynamicRangeImage() returns MagickTrue if any pixel component is % non-integer or exceeds the bounds of the quantum depth (e.g. for Q16 % 0..65535. % % The format of the IsHighDynamicRangeImage method is: % % MagickBooleanType IsHighDynamicRangeImage(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType IsHighDynamicRangeImage(const Image *image, ExceptionInfo *exception) { #if !defined(MAGICKCORE_HDRI_SUPPORT) (void) image; (void) exception; return(MagickFalse); #else CacheView *image_view; MagickBooleanType status; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); status=MagickTrue; image_view=AcquireVirtualCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *p; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double pixel; PixelTrait traits; traits=GetPixelChannelTraits(image,(PixelChannel) i); if (traits == UndefinedPixelTrait) continue; pixel=(double) p[i]; if ((pixel < 0.0) || (pixel > QuantumRange) || (pixel != (double) ((QuantumAny) pixel))) break; } p+=GetPixelChannels(image); if (i < (ssize_t) GetPixelChannels(image)) status=MagickFalse; } if (x < (ssize_t) image->columns) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status != MagickFalse ? MagickFalse : MagickTrue); #endif } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s I m a g e O b j e c t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsImageObject() returns MagickTrue if the image sequence contains a valid % set of image objects. % % The format of the IsImageObject method is: % % MagickBooleanType IsImageObject(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickBooleanType IsImageObject(const Image *image) { const Image *p; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); for (p=image; p != (Image *) NULL; p=GetNextImageInList(p)) if (p->signature != MagickCoreSignature) return(MagickFalse); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s T a i n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsTaintImage() returns MagickTrue any pixel in the image has been altered % since it was first constituted. % % The format of the IsTaintImage method is: % % MagickBooleanType IsTaintImage(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickBooleanType IsTaintImage(const Image *image) { char magick[MagickPathExtent], filename[MagickPathExtent]; const Image *p; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); (void) CopyMagickString(magick,image->magick,MagickPathExtent); (void) CopyMagickString(filename,image->filename,MagickPathExtent); for (p=image; p != (Image *) NULL; p=GetNextImageInList(p)) { if (p->taint != MagickFalse) return(MagickTrue); if (LocaleCompare(p->magick,magick) != 0) return(MagickTrue); if (LocaleCompare(p->filename,filename) != 0) return(MagickTrue); } return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M o d i f y I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ModifyImage() ensures that there is only a single reference to the image % to be modified, updating the provided image pointer to point to a clone of % the original image if necessary. % % The format of the ModifyImage method is: % % MagickBooleanType ModifyImage(Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType ModifyImage(Image **image, ExceptionInfo *exception) { Image *clone_image; assert(image != (Image **) NULL); assert(*image != (Image *) NULL); assert((*image)->signature == MagickCoreSignature); if ((*image)->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",(*image)->filename); if (GetImageReferenceCount(*image) <= 1) return(MagickTrue); clone_image=CloneImage(*image,0,0,MagickTrue,exception); LockSemaphoreInfo((*image)->semaphore); (*image)->reference_count--; UnlockSemaphoreInfo((*image)->semaphore); *image=clone_image; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N e w M a g i c k I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % NewMagickImage() creates a blank image canvas of the specified size and % background color. % % The format of the NewMagickImage method is: % % Image *NewMagickImage(const ImageInfo *image_info,const size_t width, % const size_t height,const PixelInfo *background, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o width: the image width. % % o height: the image height. % % o background: the image color. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *NewMagickImage(const ImageInfo *image_info, const size_t width,const size_t height,const PixelInfo *background, ExceptionInfo *exception) { CacheView *image_view; Image *image; MagickBooleanType status; ssize_t y; assert(image_info != (const ImageInfo *) NULL); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image_info->signature == MagickCoreSignature); assert(background != (const PixelInfo *) NULL); image=AcquireImage(image_info,exception); image->columns=width; image->rows=height; image->colorspace=background->colorspace; image->alpha_trait=background->alpha_trait; image->fuzz=background->fuzz; image->depth=background->depth; status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { SetPixelViaPixelInfo(image,background,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (status == MagickFalse) image=DestroyImage(image); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e f e r e n c e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReferenceImage() increments the reference count associated with an image % returning a pointer to the image. % % The format of the ReferenceImage method is: % % Image *ReferenceImage(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport Image *ReferenceImage(Image *image) { assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); LockSemaphoreInfo(image->semaphore); image->reference_count++; UnlockSemaphoreInfo(image->semaphore); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e s e t I m a g e P a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResetImagePage() resets the image page canvas and position. % % The format of the ResetImagePage method is: % % MagickBooleanType ResetImagePage(Image *image,const char *page) % % A description of each parameter follows: % % o image: the image. % % o page: the relative page specification. % */ MagickExport MagickBooleanType ResetImagePage(Image *image,const char *page) { MagickStatusType flags; RectangleInfo geometry; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); flags=ParseAbsoluteGeometry(page,&geometry); if ((flags & WidthValue) != 0) { if ((flags & HeightValue) == 0) geometry.height=geometry.width; image->page.width=geometry.width; image->page.height=geometry.height; } if ((flags & AspectValue) != 0) { if ((flags & XValue) != 0) image->page.x+=geometry.x; if ((flags & YValue) != 0) image->page.y+=geometry.y; } else { if ((flags & XValue) != 0) { image->page.x=geometry.x; if ((image->page.width == 0) && (geometry.x > 0)) image->page.width=image->columns+geometry.x; } if ((flags & YValue) != 0) { image->page.y=geometry.y; if ((image->page.height == 0) && (geometry.y > 0)) image->page.height=image->rows+geometry.y; } } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e s e t I m a g e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResetImagePixels() reset the image pixels, that is, all the pixel components % are zereod. % % The format of the SetImage method is: % % MagickBooleanType ResetImagePixels(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType ResetImagePixels(Image *image, ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; size_t length; ssize_t y; void *pixels; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); pixels=AcquirePixelCachePixels(image,&length,exception); if (pixels != (void *) NULL) { /* Reset in-core image pixels. */ (void) memset(pixels,0,length); return(MagickTrue); } /* Reset image pixels. */ status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { (void) memset(q,0,GetPixelChannels(image)*sizeof(Quantum)); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e A l p h a % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageAlpha() sets the alpha levels of the image. % % The format of the SetImageAlpha method is: % % MagickBooleanType SetImageAlpha(Image *image,const Quantum alpha, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o alpha: the level of transparency: 0 is fully transparent and QuantumRange % is fully opaque. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageAlpha(Image *image,const Quantum alpha, ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); image->alpha_trait=BlendPixelTrait; status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelWriteMask(image,q) > (QuantumRange/2)) SetPixelAlpha(image,alpha,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e B a c k g r o u n d C o l o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageBackgroundColor() initializes the image pixels to the image % background color. The background color is defined by the background_color % member of the image structure. % % The format of the SetImage method is: % % MagickBooleanType SetImageBackgroundColor(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageBackgroundColor(Image *image, ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; PixelInfo background; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); if ((image->background_color.alpha_trait != UndefinedPixelTrait) && (image->alpha_trait == UndefinedPixelTrait)) (void) SetImageAlphaChannel(image,OnAlphaChannel,exception); ConformPixelInfo(image,&image->background_color,&background,exception); /* Set image background color. */ status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { SetPixelViaPixelInfo(image,&background,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e C h a n n e l M a s k % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageChannelMask() sets the image channel mask from the specified channel % mask. % % The format of the SetImageChannelMask method is: % % ChannelType SetImageChannelMask(Image *image, % const ChannelType channel_mask) % % A description of each parameter follows: % % o image: the image. % % o channel_mask: the channel mask. % */ MagickExport ChannelType SetImageChannelMask(Image *image, const ChannelType channel_mask) { return(SetPixelChannelMask(image,channel_mask)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e C o l o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageColor() set the entire image canvas to the specified color. % % The format of the SetImageColor method is: % % MagickBooleanType SetImageColor(Image *image,const PixelInfo *color, % ExeptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o background: the image color. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageColor(Image *image, const PixelInfo *color,ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); assert(color != (const PixelInfo *) NULL); image->colorspace=color->colorspace; image->alpha_trait=color->alpha_trait; image->fuzz=color->fuzz; image->depth=color->depth; status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { SetPixelViaPixelInfo(image,color,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e S t o r a g e C l a s s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageStorageClass() sets the image class: DirectClass for true color % images or PseudoClass for colormapped images. % % The format of the SetImageStorageClass method is: % % MagickBooleanType SetImageStorageClass(Image *image, % const ClassType storage_class,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o storage_class: The image class. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageStorageClass(Image *image, const ClassType storage_class,ExceptionInfo *exception) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image->storage_class=storage_class; return(SyncImagePixelCache(image,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e E x t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageExtent() sets the image size (i.e. columns & rows). % % The format of the SetImageExtent method is: % % MagickBooleanType SetImageExtent(Image *image,const size_t columns, % const size_t rows,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: The image width in pixels. % % o rows: The image height in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageExtent(Image *image,const size_t columns, const size_t rows,ExceptionInfo *exception) { if ((columns == 0) || (rows == 0)) ThrowBinaryException(ImageError,"NegativeOrZeroImageSize",image->filename); image->columns=columns; image->rows=rows; if (image->depth == 0) { image->depth=8; (void) ThrowMagickException(exception,GetMagickModule(),ImageError, "ImageDepthNotSupported","`%s'",image->filename); } if (image->depth > (8*sizeof(MagickSizeType))) { image->depth=8*sizeof(MagickSizeType); (void) ThrowMagickException(exception,GetMagickModule(),ImageError, "ImageDepthNotSupported","`%s'",image->filename); } return(SyncImagePixelCache(image,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S e t I m a g e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageInfo() initializes the 'magick' field of the ImageInfo structure. % It is set to a type of image format based on the prefix or suffix of the % filename. For example, 'ps:image' returns PS indicating a Postscript image. % JPEG is returned for this filename: 'image.jpg'. The filename prefix has % precendence over the suffix. Use an optional index enclosed in brackets % after a file name to specify a desired scene of a multi-resolution image % format like Photo CD (e.g. img0001.pcd[4]). A True (non-zero) return value % indicates success. % % The format of the SetImageInfo method is: % % MagickBooleanType SetImageInfo(ImageInfo *image_info, % const unsigned int frames,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o frames: the number of images you intend to write. % % o exception: return any errors or warnings in this structure. % */ static const MagickInfo *SetImageInfoFromExtension(ImageInfo *image_info, const char *component,char *magic,ExceptionInfo *exception) { const MagickInfo *magick_info; MagickFormatType format_type; ssize_t i; static const char *format_type_formats[] = { "AUTOTRACE", "BROWSE", "DCRAW", "EDIT", "LAUNCH", "MPEG:DECODE", "MPEG:ENCODE", "PRINT", "PS:ALPHA", "PS:CMYK", "PS:COLOR", "PS:GRAY", "PS:MONO", "SCAN", "SHOW", "WIN", (char *) NULL }; /* User specified image format. */ (void) CopyMagickString(magic,component,MagickPathExtent); LocaleUpper(magic); /* Look for explicit image formats. */ format_type=UndefinedFormatType; magick_info=GetMagickInfo(magic,exception); if ((magick_info != (const MagickInfo *) NULL) && (magick_info->format_type != UndefinedFormatType)) format_type=magick_info->format_type; i=0; while ((format_type == UndefinedFormatType) && (format_type_formats[i] != (char *) NULL)) { if ((*magic == *format_type_formats[i]) && (LocaleCompare(magic,format_type_formats[i]) == 0)) format_type=ExplicitFormatType; i++; } if (format_type == UndefinedFormatType) (void) CopyMagickString(image_info->magick,magic,MagickPathExtent); else if (format_type == ExplicitFormatType) { image_info->affirm=MagickTrue; (void) CopyMagickString(image_info->magick,magic,MagickPathExtent); } if (LocaleCompare(magic,"RGB") == 0) image_info->affirm=MagickFalse; /* maybe SGI disguised as RGB */ return(magick_info); } MagickExport MagickBooleanType SetImageInfo(ImageInfo *image_info, const unsigned int frames,ExceptionInfo *exception) { char component[MagickPathExtent], magic[MagickPathExtent], path[MagickPathExtent], *q; const MagicInfo *magic_info; const MagickInfo *magick_info; ExceptionInfo *sans_exception; Image *image; MagickBooleanType status; const char *p; ssize_t count; /* Look for 'image.format' in filename. */ assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); *component='\0'; GetPathComponent(image_info->filename,SubimagePath,component); if (*component != '\0') { /* Look for scene specification (e.g. img0001.pcd[4]). */ if (IsSceneGeometry(component,MagickFalse) == MagickFalse) { if (IsGeometry(component) != MagickFalse) (void) CloneString(&image_info->extract,component); } else { size_t first, last; (void) CloneString(&image_info->scenes,component); image_info->scene=StringToUnsignedLong(image_info->scenes); image_info->number_scenes=image_info->scene; p=image_info->scenes; for (q=(char *) image_info->scenes; *q != '\0'; p++) { while ((isspace((int) ((unsigned char) *p)) != 0) || (*p == ',')) p++; first=(size_t) strtol(p,&q,10); last=first; while (isspace((int) ((unsigned char) *q)) != 0) q++; if (*q == '-') last=(size_t) strtol(q+1,&q,10); if (first > last) Swap(first,last); if (first < image_info->scene) image_info->scene=first; if (last > image_info->number_scenes) image_info->number_scenes=last; p=q; } image_info->number_scenes-=image_info->scene-1; } } *component='\0'; if (*image_info->magick == '\0') GetPathComponent(image_info->filename,ExtensionPath,component); if (*component != '\0') { /* Base path sans any compression extension. */ GetPathComponent(image_info->filename,BasePathSansCompressExtension,path); GetPathComponent(path,ExtensionPath,component); } image_info->affirm=MagickFalse; sans_exception=AcquireExceptionInfo(); if ((*component != '\0') && (IsGlob(component) == MagickFalse)) magick_info=SetImageInfoFromExtension(image_info,component,magic, sans_exception); /* Look for explicit 'format:image' in filename. */ *magic='\0'; GetPathComponent(image_info->filename,MagickPath,magic); if (*magic == '\0') { (void) CopyMagickString(magic,image_info->magick,MagickPathExtent); magick_info=GetMagickInfo(magic,sans_exception); if (frames == 0) GetPathComponent(image_info->filename,CanonicalPath,component); else GetPathComponent(image_info->filename,SubcanonicalPath,component); (void) CopyMagickString(image_info->filename,component,MagickPathExtent); } else { const DelegateInfo *delegate_info; /* User specified image format. */ LocaleUpper(magic); magick_info=GetMagickInfo(magic,sans_exception); delegate_info=(const DelegateInfo *) NULL; if (magick_info == (const MagickInfo *) NULL) { delegate_info=GetDelegateInfo(magic,"*",sans_exception); if (delegate_info == (const DelegateInfo *) NULL) delegate_info=GetDelegateInfo("*",magic,sans_exception); if ((delegate_info == (const DelegateInfo *) NULL) && ((*component != '\0') && (IsGlob(component) == MagickFalse))) { /* Retry in case GetMagickInfo loaded a custom module. */ magick_info=SetImageInfoFromExtension(image_info,component,magic, sans_exception); } } if (((magick_info != (const MagickInfo *) NULL) || (delegate_info != (const DelegateInfo *) NULL)) && (IsMagickConflict(magic) == MagickFalse)) { image_info->affirm=MagickTrue; (void) CopyMagickString(image_info->magick,magic,MagickPathExtent); GetPathComponent(image_info->filename,CanonicalPath,component); (void) CopyMagickString(image_info->filename,component, MagickPathExtent); } } sans_exception=DestroyExceptionInfo(sans_exception); if ((magick_info == (const MagickInfo *) NULL) || (GetMagickEndianSupport(magick_info) == MagickFalse)) image_info->endian=UndefinedEndian; if ((image_info->adjoin != MagickFalse) && (frames > 1)) { /* Test for multiple image support (e.g. image%02d.png). */ (void) InterpretImageFilename(image_info,(Image *) NULL, image_info->filename,(int) image_info->scene,component,exception); if ((LocaleCompare(component,image_info->filename) != 0) && (strchr(component,'%') == (char *) NULL)) image_info->adjoin=MagickFalse; } if ((image_info->adjoin != MagickFalse) && (frames > 0)) { /* Some image formats do not support multiple frames per file. */ magick_info=GetMagickInfo(magic,exception); if (magick_info != (const MagickInfo *) NULL) if (GetMagickAdjoin(magick_info) == MagickFalse) image_info->adjoin=MagickFalse; } if (image_info->affirm != MagickFalse) return(MagickTrue); if (frames == 0) { unsigned char *magick; size_t magick_size; /* Determine the image format from the first few bytes of the file. */ magick_size=GetMagicPatternExtent(exception); if (magick_size == 0) return(MagickFalse); image=AcquireImage(image_info,exception); (void) CopyMagickString(image->filename,image_info->filename, MagickPathExtent); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImage(image); return(MagickFalse); } if ((IsBlobSeekable(image) == MagickFalse) || (IsBlobExempt(image) != MagickFalse)) { /* Copy image to seekable temporary file. */ *component='\0'; status=ImageToFile(image,component,exception); (void) CloseBlob(image); if (status == MagickFalse) { (void) RelinquishUniqueFileResource(component); image=DestroyImage(image); return(MagickFalse); } SetImageInfoFile(image_info,(FILE *) NULL); (void) CopyMagickString(image->filename,component,MagickPathExtent); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { (void) RelinquishUniqueFileResource(component); image=DestroyImage(image); return(MagickFalse); } (void) CopyMagickString(image_info->filename,component, MagickPathExtent); image_info->temporary=MagickTrue; } magick=(unsigned char *) AcquireQuantumMemory(1,magick_size); if (magick == (unsigned char *) NULL) { (void) CloseBlob(image); image=DestroyImage(image); return(MagickFalse); } (void) memset(magick,0,magick_size); count=ReadBlob(image,magick_size,magick); (void) SeekBlob(image,-((MagickOffsetType) count),SEEK_CUR); (void) CloseBlob(image); image=DestroyImage(image); /* Check magic cache. */ sans_exception=AcquireExceptionInfo(); magic_info=GetMagicInfo(magick,(size_t) count,sans_exception); magick=(unsigned char *) RelinquishMagickMemory(magick); if ((magic_info != (const MagicInfo *) NULL) && (GetMagicName(magic_info) != (char *) NULL)) { /* Try to use magick_info that was determined earlier by the extension */ if ((magick_info != (const MagickInfo *) NULL) && (GetMagickUseExtension(magick_info) != MagickFalse) && (LocaleCompare(magick_info->magick_module,GetMagicName( magic_info)) == 0)) (void) CopyMagickString(image_info->magick,magick_info->name, MagickPathExtent); else { (void) CopyMagickString(image_info->magick,GetMagicName( magic_info),MagickPathExtent); magick_info=GetMagickInfo(image_info->magick,sans_exception); } if ((magick_info == (const MagickInfo *) NULL) || (GetMagickEndianSupport(magick_info) == MagickFalse)) image_info->endian=UndefinedEndian; sans_exception=DestroyExceptionInfo(sans_exception); return(MagickTrue); } magick_info=GetMagickInfo(image_info->magick,sans_exception); if ((magick_info == (const MagickInfo *) NULL) || (GetMagickEndianSupport(magick_info) == MagickFalse)) image_info->endian=UndefinedEndian; sans_exception=DestroyExceptionInfo(sans_exception); } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e I n f o B l o b % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageInfoBlob() sets the image info blob member. % % The format of the SetImageInfoBlob method is: % % void SetImageInfoBlob(ImageInfo *image_info,const void *blob, % const size_t length) % % A description of each parameter follows: % % o image_info: the image info. % % o blob: the blob. % % o length: the blob length. % */ MagickExport void SetImageInfoBlob(ImageInfo *image_info,const void *blob, const size_t length) { assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); image_info->blob=(void *) blob; image_info->length=length; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e I n f o C u s t o m S t r e a m % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageInfoCustomStream() sets the image info custom stream handlers. % % The format of the SetImageInfoCustomStream method is: % % void SetImageInfoCustomStream(ImageInfo *image_info, % CustomStreamInfo *custom_stream) % % A description of each parameter follows: % % o image_info: the image info. % % o custom_stream: your custom stream methods. % */ MagickExport void SetImageInfoCustomStream(ImageInfo *image_info, CustomStreamInfo *custom_stream) { assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); image_info->custom_stream=(CustomStreamInfo *) custom_stream; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e I n f o F i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageInfoFile() sets the image info file member. % % The format of the SetImageInfoFile method is: % % void SetImageInfoFile(ImageInfo *image_info,FILE *file) % % A description of each parameter follows: % % o image_info: the image info. % % o file: the file. % */ MagickExport void SetImageInfoFile(ImageInfo *image_info,FILE *file) { assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); image_info->file=file; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e M a s k % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageMask() associates a mask with the image. The mask must be the same % dimensions as the image. % % The format of the SetImageMask method is: % % MagickBooleanType SetImageMask(Image *image,const PixelMask type, % const Image *mask,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o type: the mask type, ReadPixelMask or WritePixelMask. % % o mask: the image mask. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageMask(Image *image,const PixelMask type, const Image *mask,ExceptionInfo *exception) { CacheView *mask_view, *image_view; MagickBooleanType status; ssize_t y; /* Set image mask. */ assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); if (mask == (const Image *) NULL) { switch (type) { case ReadPixelMask: { image->channels=(ChannelType) (image->channels & ~ReadMaskChannel); break; } case WritePixelMask: { image->channels=(ChannelType) (image->channels & ~WriteMaskChannel); } default: { image->channels=(ChannelType) (image->channels & ~CompositeMaskChannel); break; } } return(SyncImagePixelCache(image,exception)); } switch (type) { case ReadPixelMask: { image->channels=(ChannelType) (image->channels | ReadMaskChannel); break; } case WritePixelMask: { image->channels=(ChannelType) (image->channels | WriteMaskChannel); break; } default: { image->channels=(ChannelType) (image->channels | CompositeMaskChannel); break; } } if (SyncImagePixelCache(image,exception) == MagickFalse) return(MagickFalse); status=MagickTrue; image->mask_trait=UpdatePixelTrait; mask_view=AcquireVirtualCacheView(mask,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(mask,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *magick_restrict p; Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(mask_view,0,y,mask->columns,1,exception); q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { MagickRealType intensity; intensity=0.0; if ((x < (ssize_t) mask->columns) && (y < (ssize_t) mask->rows)) intensity=GetPixelIntensity(mask,p); switch (type) { case ReadPixelMask: { SetPixelReadMask(image,ClampToQuantum(intensity),q); break; } case WritePixelMask: { SetPixelWriteMask(image,ClampToQuantum(intensity),q); break; } default: { SetPixelCompositeMask(image,ClampToQuantum(intensity),q); break; } } p+=GetPixelChannels(mask); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image->mask_trait=UndefinedPixelTrait; mask_view=DestroyCacheView(mask_view); image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e R e g i o n M a s k % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageRegionMask() associates a mask with the image as defined by the % specified region. % % The format of the SetImageRegionMask method is: % % MagickBooleanType SetImageRegionMask(Image *image,const PixelMask type, % const RectangleInfo *region,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o type: the mask type, ReadPixelMask or WritePixelMask. % % o geometry: the mask region. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageRegionMask(Image *image, const PixelMask type,const RectangleInfo *region,ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; ssize_t y; /* Set image mask as defined by the region. */ assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); if (region == (const RectangleInfo *) NULL) { switch (type) { case ReadPixelMask: { image->channels=(ChannelType) (image->channels & ~ReadMaskChannel); break; } case WritePixelMask: { image->channels=(ChannelType) (image->channels & ~WriteMaskChannel); break; } default: { image->channels=(ChannelType) (image->channels & ~CompositeMaskChannel); break; } } return(SyncImagePixelCache(image,exception)); } switch (type) { case ReadPixelMask: { image->channels=(ChannelType) (image->channels | ReadMaskChannel); break; } case WritePixelMask: { image->channels=(ChannelType) (image->channels | WriteMaskChannel); break; } default: { image->channels=(ChannelType) (image->channels | CompositeMaskChannel); break; } } if (SyncImagePixelCache(image,exception) == MagickFalse) return(MagickFalse); status=MagickTrue; image->mask_trait=UpdatePixelTrait; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { Quantum pixel; pixel=QuantumRange; if (((x >= region->x) && (x < (region->x+(ssize_t) region->width))) && ((y >= region->y) && (y < (region->y+(ssize_t) region->height)))) pixel=(Quantum) 0; switch (type) { case ReadPixelMask: { SetPixelReadMask(image,pixel,q); break; } case WritePixelMask: { SetPixelWriteMask(image,pixel,q); break; } default: { SetPixelCompositeMask(image,pixel,q); break; } } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image->mask_trait=UndefinedPixelTrait; image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e V i r t u a l P i x e l M e t h o d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageVirtualPixelMethod() sets the "virtual pixels" method for the % image and returns the previous setting. A virtual pixel is any pixel access % that is outside the boundaries of the image cache. % % The format of the SetImageVirtualPixelMethod() method is: % % VirtualPixelMethod SetImageVirtualPixelMethod(Image *image, % const VirtualPixelMethod virtual_pixel_method,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o virtual_pixel_method: choose the type of virtual pixel. % % o exception: return any errors or warnings in this structure. % */ MagickExport VirtualPixelMethod SetImageVirtualPixelMethod(Image *image, const VirtualPixelMethod virtual_pixel_method,ExceptionInfo *exception) { assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); return(SetPixelCacheVirtualMethod(image,virtual_pixel_method,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S m u s h I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SmushImages() takes all images from the current image pointer to the end % of the image list and smushes them to each other top-to-bottom if the % stack parameter is true, otherwise left-to-right. % % The current gravity setting now effects how the image is justified in the % final image. % % The format of the SmushImages method is: % % Image *SmushImages(const Image *images,const MagickBooleanType stack, % ExceptionInfo *exception) % % A description of each parameter follows: % % o images: the image sequence. % % o stack: A value other than 0 stacks the images top-to-bottom. % % o offset: minimum distance in pixels between images. % % o exception: return any errors or warnings in this structure. % */ static ssize_t SmushXGap(const Image *smush_image,const Image *images, const ssize_t offset,ExceptionInfo *exception) { CacheView *left_view, *right_view; const Image *left_image, *right_image; RectangleInfo left_geometry, right_geometry; const Quantum *p; ssize_t i, y; size_t gap; ssize_t x; if (images->previous == (Image *) NULL) return(0); right_image=images; SetGeometry(smush_image,&right_geometry); GravityAdjustGeometry(right_image->columns,right_image->rows, right_image->gravity,&right_geometry); left_image=images->previous; SetGeometry(smush_image,&left_geometry); GravityAdjustGeometry(left_image->columns,left_image->rows, left_image->gravity,&left_geometry); gap=right_image->columns; left_view=AcquireVirtualCacheView(left_image,exception); right_view=AcquireVirtualCacheView(right_image,exception); for (y=0; y < (ssize_t) smush_image->rows; y++) { for (x=(ssize_t) left_image->columns-1; x > 0; x--) { p=GetCacheViewVirtualPixels(left_view,x,left_geometry.y+y,1,1,exception); if ((p == (const Quantum *) NULL) || (GetPixelAlpha(left_image,p) != TransparentAlpha) || ((left_image->columns-x-1) >= gap)) break; } i=(ssize_t) left_image->columns-x-1; for (x=0; x < (ssize_t) right_image->columns; x++) { p=GetCacheViewVirtualPixels(right_view,x,right_geometry.y+y,1,1, exception); if ((p == (const Quantum *) NULL) || (GetPixelAlpha(right_image,p) != TransparentAlpha) || ((x+i) >= (ssize_t) gap)) break; } if ((x+i) < (ssize_t) gap) gap=(size_t) (x+i); } right_view=DestroyCacheView(right_view); left_view=DestroyCacheView(left_view); if (y < (ssize_t) smush_image->rows) return(offset); return((ssize_t) gap-offset); } static ssize_t SmushYGap(const Image *smush_image,const Image *images, const ssize_t offset,ExceptionInfo *exception) { CacheView *bottom_view, *top_view; const Image *bottom_image, *top_image; RectangleInfo bottom_geometry, top_geometry; const Quantum *p; ssize_t i, x; size_t gap; ssize_t y; if (images->previous == (Image *) NULL) return(0); bottom_image=images; SetGeometry(smush_image,&bottom_geometry); GravityAdjustGeometry(bottom_image->columns,bottom_image->rows, bottom_image->gravity,&bottom_geometry); top_image=images->previous; SetGeometry(smush_image,&top_geometry); GravityAdjustGeometry(top_image->columns,top_image->rows,top_image->gravity, &top_geometry); gap=bottom_image->rows; top_view=AcquireVirtualCacheView(top_image,exception); bottom_view=AcquireVirtualCacheView(bottom_image,exception); for (x=0; x < (ssize_t) smush_image->columns; x++) { for (y=(ssize_t) top_image->rows-1; y > 0; y--) { p=GetCacheViewVirtualPixels(top_view,top_geometry.x+x,y,1,1,exception); if ((p == (const Quantum *) NULL) || (GetPixelAlpha(top_image,p) != TransparentAlpha) || ((top_image->rows-y-1) >= gap)) break; } i=(ssize_t) top_image->rows-y-1; for (y=0; y < (ssize_t) bottom_image->rows; y++) { p=GetCacheViewVirtualPixels(bottom_view,bottom_geometry.x+x,y,1,1, exception); if ((p == (const Quantum *) NULL) || (GetPixelAlpha(bottom_image,p) != TransparentAlpha) || ((y+i) >= (ssize_t) gap)) break; } if ((y+i) < (ssize_t) gap) gap=(size_t) (y+i); } bottom_view=DestroyCacheView(bottom_view); top_view=DestroyCacheView(top_view); if (x < (ssize_t) smush_image->columns) return(offset); return((ssize_t) gap-offset); } MagickExport Image *SmushImages(const Image *images, const MagickBooleanType stack,const ssize_t offset,ExceptionInfo *exception) { #define SmushImageTag "Smush/Image" const Image *image; Image *smush_image; MagickBooleanType proceed, status; MagickOffsetType n; PixelTrait alpha_trait; RectangleInfo geometry; const Image *next; size_t height, number_images, width; ssize_t x_offset, y_offset; /* Compute maximum area of smushed area. */ assert(images != (Image *) NULL); assert(images->signature == MagickCoreSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image=images; alpha_trait=image->alpha_trait; number_images=1; width=image->columns; height=image->rows; next=GetNextImageInList(image); for ( ; next != (Image *) NULL; next=GetNextImageInList(next)) { if (next->alpha_trait != UndefinedPixelTrait) alpha_trait=BlendPixelTrait; number_images++; if (stack != MagickFalse) { if (next->columns > width) width=next->columns; height+=next->rows; if (next->previous != (Image *) NULL) height+=offset; continue; } width+=next->columns; if (next->previous != (Image *) NULL) width+=offset; if (next->rows > height) height=next->rows; } /* Smush images. */ smush_image=CloneImage(image,width,height,MagickTrue,exception); if (smush_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(smush_image,DirectClass,exception) == MagickFalse) { smush_image=DestroyImage(smush_image); return((Image *) NULL); } smush_image->alpha_trait=alpha_trait; (void) SetImageBackgroundColor(smush_image,exception); status=MagickTrue; x_offset=0; y_offset=0; for (n=0; n < (MagickOffsetType) number_images; n++) { SetGeometry(smush_image,&geometry); GravityAdjustGeometry(image->columns,image->rows,image->gravity,&geometry); if (stack != MagickFalse) { x_offset-=geometry.x; y_offset-=SmushYGap(smush_image,image,offset,exception); } else { x_offset-=SmushXGap(smush_image,image,offset,exception); y_offset-=geometry.y; } status=CompositeImage(smush_image,image,OverCompositeOp,MagickTrue,x_offset, y_offset,exception); proceed=SetImageProgress(image,SmushImageTag,n,number_images); if (proceed == MagickFalse) break; if (stack == MagickFalse) { x_offset+=(ssize_t) image->columns; y_offset=0; } else { x_offset=0; y_offset+=(ssize_t) image->rows; } image=GetNextImageInList(image); } if (stack == MagickFalse) smush_image->columns=(size_t) x_offset; else smush_image->rows=(size_t) y_offset; if (status == MagickFalse) smush_image=DestroyImage(smush_image); return(smush_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S t r i p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % StripImage() strips an image of all profiles and comments. % % The format of the StripImage method is: % % MagickBooleanType StripImage(Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType StripImage(Image *image,ExceptionInfo *exception) { MagickBooleanType status; magick_unreferenced(exception); assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); DestroyImageProfiles(image); (void) DeleteImageProperty(image,"comment"); (void) DeleteImageProperty(image,"date:create"); (void) DeleteImageProperty(image,"date:modify"); status=SetImageArtifact(image,"png:exclude-chunk", "bKGD,caNv,cHRM,eXIf,gAMA,iCCP,iTXt,pHYs,sRGB,tEXt,zCCP,zTXt,date"); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S y n c I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncImage() initializes the red, green, and blue intensities of each pixel % as defined by the colormap index. % % The format of the SyncImage method is: % % MagickBooleanType SyncImage(Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ static inline Quantum PushColormapIndex(Image *image,const Quantum index, MagickBooleanType *range_exception) { if ((size_t) index < image->colors) return(index); *range_exception=MagickTrue; return((Quantum) 0); } MagickExport MagickBooleanType SyncImage(Image *image,ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType range_exception, status, taint; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); if (image->ping != MagickFalse) return(MagickTrue); if (image->storage_class != PseudoClass) return(MagickFalse); assert(image->colormap != (PixelInfo *) NULL); range_exception=MagickFalse; status=MagickTrue; taint=image->taint; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(range_exception,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Quantum index; Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { index=PushColormapIndex(image,GetPixelIndex(image,q),&range_exception); SetPixelViaPixelInfo(image,image->colormap+(ssize_t) index,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); image->taint=taint; if ((image->ping == MagickFalse) && (range_exception != MagickFalse)) (void) ThrowMagickException(exception,GetMagickModule(), CorruptImageWarning,"InvalidColormapIndex","`%s'",image->filename); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S y n c I m a g e S e t t i n g s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncImageSettings() syncs any image_info global options into per-image % attributes. % % Note: in IMv6 free form 'options' were always mapped into 'artifacts', so % that operations and coders can find such settings. In IMv7 if a desired % per-image artifact is not set, then it will directly look for a global % option as a fallback, as such this copy is no longer needed, only the % link set up. % % The format of the SyncImageSettings method is: % % MagickBooleanType SyncImageSettings(const ImageInfo *image_info, % Image *image,ExceptionInfo *exception) % MagickBooleanType SyncImagesSettings(const ImageInfo *image_info, % Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SyncImagesSettings(ImageInfo *image_info, Image *images,ExceptionInfo *exception) { Image *image; assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(images != (Image *) NULL); assert(images->signature == MagickCoreSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); image=images; for ( ; image != (Image *) NULL; image=GetNextImageInList(image)) (void) SyncImageSettings(image_info,image,exception); (void) DeleteImageOption(image_info,"page"); return(MagickTrue); } MagickExport MagickBooleanType SyncImageSettings(const ImageInfo *image_info, Image *image,ExceptionInfo *exception) { const char *option; GeometryInfo geometry_info; MagickStatusType flags; ResolutionType units; /* Sync image options. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); option=GetImageOption(image_info,"background"); if (option != (const char *) NULL) (void) QueryColorCompliance(option,AllCompliance,&image->background_color, exception); option=GetImageOption(image_info,"black-point-compensation"); if (option != (const char *) NULL) image->black_point_compensation=(MagickBooleanType) ParseCommandOption( MagickBooleanOptions,MagickFalse,option); option=GetImageOption(image_info,"blue-primary"); if (option != (const char *) NULL) { flags=ParseGeometry(option,&geometry_info); if ((flags & RhoValue) != 0) image->chromaticity.blue_primary.x=geometry_info.rho; image->chromaticity.blue_primary.y=image->chromaticity.blue_primary.x; if ((flags & SigmaValue) != 0) image->chromaticity.blue_primary.y=geometry_info.sigma; } option=GetImageOption(image_info,"bordercolor"); if (option != (const char *) NULL) (void) QueryColorCompliance(option,AllCompliance,&image->border_color, exception); /* FUTURE: do not sync compose to per-image compose setting here */ option=GetImageOption(image_info,"compose"); if (option != (const char *) NULL) image->compose=(CompositeOperator) ParseCommandOption(MagickComposeOptions, MagickFalse,option); /* -- */ option=GetImageOption(image_info,"compress"); if (option != (const char *) NULL) image->compression=(CompressionType) ParseCommandOption( MagickCompressOptions,MagickFalse,option); option=GetImageOption(image_info,"debug"); if (option != (const char *) NULL) image->debug=(MagickBooleanType) ParseCommandOption(MagickBooleanOptions, MagickFalse,option); option=GetImageOption(image_info,"density"); if (option != (const char *) NULL) { flags=ParseGeometry(option,&geometry_info); if ((flags & RhoValue) != 0) image->resolution.x=geometry_info.rho; image->resolution.y=image->resolution.x; if ((flags & SigmaValue) != 0) image->resolution.y=geometry_info.sigma; } option=GetImageOption(image_info,"depth"); if (option != (const char *) NULL) image->depth=StringToUnsignedLong(option); option=GetImageOption(image_info,"endian"); if (option != (const char *) NULL) image->endian=(EndianType) ParseCommandOption(MagickEndianOptions, MagickFalse,option); option=GetImageOption(image_info,"filter"); if (option != (const char *) NULL) image->filter=(FilterType) ParseCommandOption(MagickFilterOptions, MagickFalse,option); option=GetImageOption(image_info,"fuzz"); if (option != (const char *) NULL) image->fuzz=StringToDoubleInterval(option,(double) QuantumRange+1.0); option=GetImageOption(image_info,"gravity"); if (option != (const char *) NULL) image->gravity=(GravityType) ParseCommandOption(MagickGravityOptions, MagickFalse,option); option=GetImageOption(image_info,"green-primary"); if (option != (const char *) NULL) { flags=ParseGeometry(option,&geometry_info); if ((flags & RhoValue) != 0) image->chromaticity.green_primary.x=geometry_info.rho; image->chromaticity.green_primary.y=image->chromaticity.green_primary.x; if ((flags & SigmaValue) != 0) image->chromaticity.green_primary.y=geometry_info.sigma; } option=GetImageOption(image_info,"intent"); if (option != (const char *) NULL) image->rendering_intent=(RenderingIntent) ParseCommandOption( MagickIntentOptions,MagickFalse,option); option=GetImageOption(image_info,"intensity"); if (option != (const char *) NULL) image->intensity=(PixelIntensityMethod) ParseCommandOption( MagickPixelIntensityOptions,MagickFalse,option); option=GetImageOption(image_info,"interlace"); if (option != (const char *) NULL) image->interlace=(InterlaceType) ParseCommandOption(MagickInterlaceOptions, MagickFalse,option); option=GetImageOption(image_info,"interpolate"); if (option != (const char *) NULL) image->interpolate=(PixelInterpolateMethod) ParseCommandOption( MagickInterpolateOptions,MagickFalse,option); option=GetImageOption(image_info,"loop"); if (option != (const char *) NULL) image->iterations=StringToUnsignedLong(option); option=GetImageOption(image_info,"mattecolor"); if (option != (const char *) NULL) (void) QueryColorCompliance(option,AllCompliance,&image->matte_color, exception); option=GetImageOption(image_info,"orient"); if (option != (const char *) NULL) image->orientation=(OrientationType) ParseCommandOption( MagickOrientationOptions,MagickFalse,option); option=GetImageOption(image_info,"page"); if (option != (const char *) NULL) { char *geometry; geometry=GetPageGeometry(option); flags=ParseAbsoluteGeometry(geometry,&image->page); geometry=DestroyString(geometry); } option=GetImageOption(image_info,"quality"); if (option != (const char *) NULL) image->quality=StringToUnsignedLong(option); option=GetImageOption(image_info,"red-primary"); if (option != (const char *) NULL) { flags=ParseGeometry(option,&geometry_info); if ((flags & RhoValue) != 0) image->chromaticity.red_primary.x=geometry_info.rho; image->chromaticity.red_primary.y=image->chromaticity.red_primary.x; if ((flags & SigmaValue) != 0) image->chromaticity.red_primary.y=geometry_info.sigma; } if (image_info->quality != UndefinedCompressionQuality) image->quality=image_info->quality; option=GetImageOption(image_info,"scene"); if (option != (const char *) NULL) image->scene=StringToUnsignedLong(option); option=GetImageOption(image_info,"taint"); if (option != (const char *) NULL) image->taint=(MagickBooleanType) ParseCommandOption(MagickBooleanOptions, MagickFalse,option); option=GetImageOption(image_info,"tile-offset"); if (option != (const char *) NULL) { char *geometry; geometry=GetPageGeometry(option); flags=ParseAbsoluteGeometry(geometry,&image->tile_offset); geometry=DestroyString(geometry); } option=GetImageOption(image_info,"transparent-color"); if (option != (const char *) NULL) (void) QueryColorCompliance(option,AllCompliance,&image->transparent_color, exception); option=GetImageOption(image_info,"type"); if (option != (const char *) NULL) image->type=(ImageType) ParseCommandOption(MagickTypeOptions,MagickFalse, option); option=GetImageOption(image_info,"units"); units=image_info->units; if (option != (const char *) NULL) units=(ResolutionType) ParseCommandOption(MagickResolutionOptions, MagickFalse,option); if (units != UndefinedResolution) { if (image->units != units) switch (image->units) { case PixelsPerInchResolution: { if (units == PixelsPerCentimeterResolution) { image->resolution.x/=2.54; image->resolution.y/=2.54; } break; } case PixelsPerCentimeterResolution: { if (units == PixelsPerInchResolution) { image->resolution.x=(double) ((size_t) (100.0*2.54* image->resolution.x+0.5))/100.0; image->resolution.y=(double) ((size_t) (100.0*2.54* image->resolution.y+0.5))/100.0; } break; } default: break; } image->units=units; option=GetImageOption(image_info,"density"); if (option != (const char *) NULL) { flags=ParseGeometry(option,&geometry_info); if ((flags & RhoValue) != 0) image->resolution.x=geometry_info.rho; image->resolution.y=image->resolution.x; if ((flags & SigmaValue) != 0) image->resolution.y=geometry_info.sigma; } } option=GetImageOption(image_info,"virtual-pixel"); if (option != (const char *) NULL) (void) SetImageVirtualPixelMethod(image,(VirtualPixelMethod) ParseCommandOption(MagickVirtualPixelOptions,MagickFalse,option), exception); option=GetImageOption(image_info,"white-point"); if (option != (const char *) NULL) { flags=ParseGeometry(option,&geometry_info); if ((flags & RhoValue) != 0) image->chromaticity.white_point.x=geometry_info.rho; image->chromaticity.white_point.y=image->chromaticity.white_point.x; if ((flags & SigmaValue) != 0) image->chromaticity.white_point.y=geometry_info.sigma; } /* Pointer to allow the lookup of pre-image artifact will fallback to a global option setting/define. This saves a lot of duplication of global options into per-image artifacts, while ensuring only specifically set per-image artifacts are preserved when parenthesis ends. */ if (image->image_info != (ImageInfo *) NULL) image->image_info=DestroyImageInfo(image->image_info); image->image_info=CloneImageInfo(image_info); return(MagickTrue); }
calib.c
/* Copyright 2013-2016. The Regents of the University of California. * Copyright 2016. Martin Uecker. * All rights reserved. Use of this source code is governed by * a BSD-style license which can be found in the LICENSE file. * * Authors: * 2012-2016 Martin Uecker <martin.uecker@med.uni-goettingen.de> * 2013 Dara Bahri <dbahri123@gmail.com> * 2015-2016 Siddharth Iyer <sid8795@gmail.com> * * * Uecker M, Lai P, Murphy MJ, Virtue P, Elad M, Pauly JM, Vasanawala SS, Lustig M. * ESPIRiT - An Eigenvalue Approach to Autocalibrating Parallel MRI: Where SENSE * meets GRAPPA. Magn Reson Med, 71:990-1001 (2014) * * Iyer S, Ong F, Lustig M. * Towards A Parameter Free ESPIRiT: Soft-Weighting For Robust Coil Sensitivity Estimation. * Presented in the session: "New Frontiers In Image Reconstruction" at ISMRM 2016. * http://www.ismrm.org/16/program_files/O86.htm * */ #include <assert.h> #include <complex.h> #include <math.h> #include <stdbool.h> #include "num/multind.h" #include "num/fft.h" #include "num/flpmath.h" #include "num/linalg.h" #include "num/lapack.h" #include "num/casorati.h" #include "num/rand.h" #include "misc/misc.h" #include "misc/mri.h" #include "misc/resize.h" #include "misc/debug.h" #include "misc/utils.h" #include "calib/calmat.h" #include "calib/cc.h" #include "calib/softweight.h" #include "calib.h" #ifdef USE_CUDA #include "calib/calibcu.h" #endif #if 0 #define CALMAT_SVD #endif #if 0 #define FLIP #endif #ifndef M_PI #define M_PI 3.14159265358979323846 #endif static void eigen_herm3(int M, int N, float val[M], complex float matrix[N][N]) // ordering might be different to herm2 { complex float mout[M][N]; for (int li = 0; li < N; li++) for (int lj = 0; lj < li; lj++) matrix[lj][li] = conj(matrix[li][lj]); //mat_identity(M, N, mout); orthiter(M, N, 30, val, mout, matrix); for (int i = 0; i < M; i++) for (int j = 0; j < N; j++) matrix[i][j] = mout[i][j]; } static float scurve(float x) { if (x <= -1.) return 0.; if (x >= 1.) return 1.; return 0.5 * (1. + 2. * x / (1. + powf(x, 2.))); } static float crop_weight_function(float crth, float val) { return scurve((sqrtf(val) - crth) / (1. - crth)); } static float crop_thresh_function(float crth, float val) { return (val <= crth) ? 0. : 1.; } typedef float (*weight_function)(float crth, float val); static void crop_weight(const long dims[DIMS], complex float* ptr, weight_function fun, float crth, const complex float* map) { long xx = dims[0]; long yy = dims[1]; long zz = dims[2]; long cc = dims[3]; long mm = dims[4]; assert(DIMS > 5); assert(1 == md_calc_size(DIMS - 5, dims + 5)); for (long m = 0; m < mm; m++) { #pragma omp parallel for for (long k = 0; k < zz; k++) { for (long i = 0; i < yy; i++) { for (long j = 0; j < xx; j++) { float val = cabsf(map[((m * zz + k) * yy + i) * xx + j]); for (long c = 0; c < cc; c++) ptr[(((m * cc + c) * zz + k) * yy + i) * xx + j] *= fun(crth, val); } } } } } void crop_sens(const long dims[DIMS], complex float* ptr, bool soft, float crth, const complex float* map) { crop_weight(dims, ptr, soft ? crop_weight_function : crop_thresh_function, crth, map); } /** * sure_crop - This determines the crop-threshold to use as described in the talk: "Towards A Parameter * Free ESPIRiT: Soft-Weighting For Robust Coil Sensitivity Estimation". This was given at the * session: "New Frontiers In Image Reconstruction" at ISMRM 2016. * * Parameters: * var - Estimated variance in data. * evec_dims - The eigenvector dimensions. * evec_data - The eigenvectors. * eptr - The eigenvalues. * calreg_dims - Dimension of the calibration region. * calreg - Calibration data. */ static float sure_crop(float var, const long evec_dims[5], complex float* evec_data, complex float* eptr, const long calreg_dims[5], const complex float* calreg) { long num_maps = evec_dims[4]; // Construct low-resolution image long im_dims[5]; md_select_dims(5, 15, im_dims, evec_dims); complex float* im = md_alloc_sameplace(5, im_dims, CFL_SIZE, calreg); md_clear(5, im_dims, im, CFL_SIZE); md_resize_center(5, im_dims, im, calreg_dims, calreg, CFL_SIZE); ifftuc(5, im_dims, FFT_FLAGS, im, im); // Temporary vector for crop dimensions long cropdims[5]; md_select_dims(5, 15, cropdims, calreg_dims); cropdims[4] = num_maps; // Eigenvectors (M) complex float* M = md_alloc_sameplace(5, evec_dims, CFL_SIZE, calreg); md_copy(5, evec_dims, M, evec_data, CFL_SIZE); // Temporary eigenvector holder to hold low resolution maps complex float* LM = md_alloc_sameplace(5, evec_dims, CFL_SIZE, calreg); // Temporary holder for projection calreg complex float* TC = md_alloc_sameplace(5, calreg_dims, CFL_SIZE, calreg); // Temporary holder to hold low resolution calib maps complex float* CM = md_alloc_sameplace(5, cropdims, CFL_SIZE, calreg); // Eigenvalues (W) long W_dims[5]; md_select_dims(5, 23, W_dims, evec_dims); complex float* W = md_alloc_sameplace(5, W_dims, CFL_SIZE, calreg); md_copy(5, W_dims, W, eptr, CFL_SIZE); // Place holder for the inner product result complex float* ip = md_alloc_sameplace(5, W_dims, CFL_SIZE, calreg); // Place holder for the projection result complex float* proj = md_alloc_sameplace(5, im_dims, CFL_SIZE, calreg); // Place holder for divergence term long div_dims[5] = MD_INIT_ARRAY(5, 1); complex float* div = md_alloc_sameplace(5, div_dims, CFL_SIZE, calreg); // Calculating strides. long str1_ip[5]; long str2_ip[5]; long stro_ip[5]; md_calc_strides(5, str1_ip, im_dims, CFL_SIZE); md_calc_strides(5, str2_ip, evec_dims, CFL_SIZE); md_calc_strides(5, stro_ip, W_dims, CFL_SIZE); long str1_proj[5]; long str2_proj[5]; long stro_proj[5]; md_calc_strides(5, str1_proj, W_dims, CFL_SIZE); md_calc_strides(5, str2_proj, evec_dims, CFL_SIZE); md_calc_strides(5, stro_proj, im_dims, CFL_SIZE); long str1_div[5]; long str2_div[5]; long stro_div[5]; md_calc_strides(5, str1_div, evec_dims, CFL_SIZE); md_calc_strides(5, str2_div, evec_dims, CFL_SIZE); md_calc_strides(5, stro_div, div_dims, CFL_SIZE); long tdims_ip[5]; long tdims_proj[5]; for (unsigned int i = 0; i < 5; i++) { assert((im_dims[i] == evec_dims[i]) || (1 == im_dims[i]) || (1 == evec_dims[i])); assert((W_dims[i] == evec_dims[i]) || (1 == W_dims[i]) || (1 == evec_dims[i])); tdims_ip[i] = (1 == im_dims[i]) ? evec_dims[i] : im_dims[i]; tdims_proj[i] = (1 == W_dims[i]) ? evec_dims[i] : W_dims[i]; } // Starting parameter sweep with SURE. float mse = -1.; float old_mse = 0.; float s = -0.1; float c = 0.99; long ctr1 = 0; long ctr2 = 0; debug_printf(DP_INFO, "---------------------------------------------\n"); debug_printf(DP_INFO, "| CTR1 | CTR2 | Crop | Est. MSE |\n"); debug_printf(DP_INFO, "---------------------------------------------\n"); while (fabs(s) > 1.E-4) { ctr1++; while ( (c < 0.999) && (c > 0.001) && ( (ctr2 <= 1) || (mse < old_mse))) { ctr2++; md_clear(5, W_dims, ip, CFL_SIZE); md_clear(5, im_dims, proj, CFL_SIZE); md_clear(5, div_dims, div, CFL_SIZE); md_clear(5, evec_dims, M, CFL_SIZE); md_clear(5, evec_dims, LM, CFL_SIZE); md_clear(5, calreg_dims, TC, CFL_SIZE); md_copy(5, evec_dims, M, evec_data, CFL_SIZE); old_mse = mse; mse = 0.; crop_weight(evec_dims, M, crop_thresh_function, c, W); md_zfmacc2(5, tdims_ip, stro_ip, ip, str1_ip, im, str2_ip, M); // Projection. md_zfmac2(5, tdims_proj, stro_proj, proj, str1_proj, ip, str2_proj, M); fftuc(5, im_dims, FFT_FLAGS, proj, proj); // Low res proj img. md_resize_center(5, calreg_dims, TC, im_dims, proj, CFL_SIZE); md_resize_center(5, im_dims, proj, calreg_dims, TC, CFL_SIZE); ifftuc(5, im_dims, FFT_FLAGS, proj, proj); for (long jdx = 0; jdx < md_calc_size(5, im_dims); jdx++) mse += powf(cabsf(im[jdx] - proj[jdx]), 2.); fftuc(5, evec_dims, FFT_FLAGS, LM, M); // low-res maps . md_resize_center(5, cropdims, CM, evec_dims, LM, CFL_SIZE); md_resize_center(5, evec_dims, LM, cropdims, CM, CFL_SIZE); ifftuc(5, evec_dims, FFT_FLAGS, LM, LM); md_zfmacc2(5, evec_dims, stro_div, div, str1_div, LM, str2_div, LM); // Calc SURE div using low res maps. mse += 2. * var * crealf(*div); if (ctr2 == 1) debug_printf(DP_INFO, "| %4ld | %4ld | %0.4f | %0.12e |\n", ctr1, ctr2, c, mse); else debug_printf(DP_INFO, "| | %4ld | %0.4f | %0.12e |\n", ctr2, c, mse); c = c + s; } c -= s; ctr2 = 0; s = -s / 2; c += s; } c = c + s; debug_printf(DP_INFO, "---------------------------------------------\n"); md_free(im); md_free(TC); md_free(CM); md_free(M); md_free(LM); md_free(W); md_free(ip); md_free(proj); md_free(div); debug_printf(DP_DEBUG1, "Calculated c: %.4f\n", c); return c; } void calone(const struct ecalib_conf* conf, const long cov_dims[4], complex float* imgcov, unsigned int SN, float svals[SN], const long calreg_dims[DIMS], const complex float* data) { assert(1 == md_calc_size(DIMS - 5, calreg_dims + 5)); #if 1 long nskerns_dims[5]; complex float* nskerns; compute_kernels(conf, nskerns_dims, &nskerns, SN, svals, calreg_dims, data); #else long channels = calreg_dims[3]; long kx = conf->kdims[0]; long ky = conf->kdims[1]; long kz = conf->kdims[2]; long nskerns_dims[5] = { kx, ky, kz, channels, 0 }; long N = md_calc_size(4, nskerns_dims); assert(N > 0); nskerns_dims[4] = N; complex float* nskerns = md_alloc(5, nskerns_dims, CFL_SIZE); long nr_kernels = channels; nskerns_dims[4] = channels; spirit_kernel(nskerns_dims, nskerns, calreg_dims, data); #endif compute_imgcov(cov_dims, imgcov, nskerns_dims, nskerns); md_free(nskerns); } /* calculate point-wise maps * */ void eigenmaps(const long out_dims[DIMS], complex float* optr, complex float* eptr, const complex float* imgcov2, const long msk_dims[3], const bool* msk, bool orthiter, bool ecal_usegpu) { #ifdef USE_CUDA if (ecal_usegpu) { //FIXME cuda version should be able to return sensitivities for a subset of image-space points assert(!msk); eigenmapscu(out_dims, optr, eptr, imgcov2); return; } #else assert(!ecal_usegpu); #endif long channels = out_dims[3]; long maps = out_dims[4]; assert(DIMS >= 5); assert(1 == md_calc_size(DIMS - 5, out_dims + 5)); assert(maps <= channels); long xx = out_dims[0]; long yy = out_dims[1]; long zz = out_dims[2]; float scale = 1.; // for some reason, not if (msk_dims) { assert(msk_dims[0] == xx); assert(msk_dims[1] == yy); assert(msk_dims[2] == zz); } md_clear(5, out_dims, optr, CFL_SIZE); #pragma omp parallel for collapse(3) for (long k = 0; k < zz; k++) { for (long j = 0; j < yy; j++) { for (long i = 0; i < xx; i++) { if (!msk || msk[i + xx * (j + yy * k)]) { float val[channels]; complex float cov[channels][channels]; complex float tmp[channels * (channels + 1) / 2]; for (long l = 0; l < channels * (channels + 1) / 2; l++) tmp[l] = imgcov2[((l * zz + k) * yy + j) * xx + i] / scale; unpack_tri_matrix(channels, cov, tmp); if (orthiter) eigen_herm3(maps, channels, val, cov); else lapack_eig(channels, val, cov); for (long u = 0; u < maps; u++) { long ru = (orthiter ? maps : channels) - 1 - u; for (long v = 0; v < channels; v++) optr[((((u * channels + v) * zz + k) * yy + j) * xx + i)] = cov[ru][v]; if (NULL != eptr) eptr[((u * zz + k) * yy + j) * xx + i] = val[ru]; } } } } } } void caltwo(const struct ecalib_conf* conf, const long out_dims[DIMS], complex float* out_data, complex float* emaps, const long in_dims[4], complex float* in_data, const long msk_dims[3], const bool* msk) { long xx = out_dims[0]; long yy = out_dims[1]; long zz = out_dims[2]; long xh = in_dims[0]; long yh = in_dims[1]; long zh = in_dims[2]; long channels = out_dims[3]; long cosize = channels * (channels + 1) / 2; assert(DIMS >= 5); assert(1 == md_calc_size(DIMS - 5, out_dims + 5)); assert(in_dims[3] == cosize); long cov_dims[4] = { xh, yh, zh, cosize }; long covbig_dims[4] = { xx, yy, zz, cosize }; assert(((xx == 1) && (xh == 1)) || (xx >= xh)); assert(((yy == 1) && (yh == 1)) || (yy >= yh)); assert(((zz == 1) && (zh == 1)) || (zz >= zh)); assert((1 == xh) || (0 == xh % 2)); assert((1 == yh) || (0 == yh % 2)); assert((1 == zh) || (0 == zh % 2)); complex float* imgcov2 = md_alloc(4, covbig_dims, CFL_SIZE); debug_printf(DP_DEBUG1, "Resize...\n"); sinc_zeropad(4, covbig_dims, imgcov2, cov_dims, in_data); debug_printf(DP_DEBUG1, "Point-wise eigen-decomposition...\n"); eigenmaps(out_dims, out_data, emaps, imgcov2, msk_dims, msk, conf->orthiter, conf->usegpu); md_free(imgcov2); } void calone_dims(const struct ecalib_conf* conf, long cov_dims[4], long channels) { long kx = conf->kdims[0]; long ky = conf->kdims[1]; long kz = conf->kdims[2]; cov_dims[0] = (1 == kx) ? 1 : (2 * kx); cov_dims[1] = (1 == ky) ? 1 : (2 * ky); cov_dims[2] = (1 == kz) ? 1 : (2 * kz); cov_dims[3] = channels * (channels + 1) / 2; } const struct ecalib_conf ecalib_defaults = { { 6, 6, 6 }, 0.001, -1, -1., false, false, 0.8, true, false, -1., false, true, -1., false}; void calib2(const struct ecalib_conf* conf, const long out_dims[DIMS], complex float* out_data, complex float* eptr, unsigned int SN, float svals[SN], const long calreg_dims[DIMS], const complex float* data, const long msk_dims[3], const bool* msk) { long channels = calreg_dims[3]; long maps = out_dims[4]; assert(calreg_dims[3] == out_dims[3]); assert(maps <= channels); assert(1 == md_calc_size(DIMS - 5, out_dims + 5)); assert(1 == md_calc_size(DIMS - 5, calreg_dims + 5)); complex float rot[channels][channels]; if (conf->rotphase) { // rotate the the phase with respect to the first principle component long scc_dims[DIMS] = MD_INIT_ARRAY(DIMS, 1); scc_dims[COIL_DIM] = channels; scc_dims[MAPS_DIM] = channels; scc(scc_dims, &rot[0][0], calreg_dims, data); } else { for (unsigned int i = 0; i < channels; i++) for (unsigned int j = 0; j < channels; j++) rot[i][j] = (i == j) ? 1. : 0.; } long cov_dims[4]; calone_dims(conf, cov_dims, channels); complex float* imgcov = md_alloc(4, cov_dims, CFL_SIZE); calone(conf, cov_dims, imgcov, SN, svals, calreg_dims, data); caltwo(conf, out_dims, out_data, eptr, cov_dims, imgcov, msk_dims, msk); /* Intensity and phase normalization similar as proposed * for adaptive combine (Walsh's method) in * Griswold et al., ISMRM 10:2410 (2002) */ if (conf->intensity) { debug_printf(DP_DEBUG1, "Normalize...\n"); /* I think the reason this works is because inhomogeneity usually * comes from only a few coil elements which are close. The l1-norm * is more resilient against such outliers. -- Martin */ normalizel1(DIMS, COIL_FLAG, out_dims, out_data); md_zsmul(DIMS, out_dims, out_data, out_data, sqrtf((float)channels)); } float c = (conf->crop >= 0.) ? conf->crop : sure_crop(conf->var, out_dims, out_data, eptr, calreg_dims, data); debug_printf(DP_DEBUG1, "Crop maps... (c = %.2f)\n", c); crop_sens(out_dims, out_data, conf->softcrop, c, eptr); debug_printf(DP_DEBUG1, "Fix phase...\n"); fixphase2(DIMS, out_dims, COIL_DIM, rot[0], out_data, out_data); md_free(imgcov); } void calib(const struct ecalib_conf* conf, const long out_dims[DIMS], complex float* out_data, complex float* eptr, unsigned int SN, float svals[SN], const long calreg_dims[DIMS], const complex float* data) { calib2(conf, out_dims, out_data, eptr, SN, svals, calreg_dims, data, NULL, NULL); } static void perturb(const long dims[2], complex float* vecs, float amt) { complex float* noise = md_alloc(2, dims, CFL_SIZE); md_gaussian_rand(2, dims, noise); for (long j = 0; j < dims[1]; j++) { float nrm = md_znorm(1, dims, noise + j * dims[0]); complex float val = amt / nrm; md_zsmul(1, dims, noise + j * dims[0], noise + j * dims[0], val); } md_zadd(2, dims, vecs, vecs, noise); for (long j = 0; j < dims[1]; j++) { float nrm = md_znorm(1, dims, vecs + j * dims[0]); complex float val = 1 / nrm; md_zsmul(1, dims, vecs + j * dims[0], vecs + j * dims[0], val); } md_free(noise); } static int number_of_kernels(const struct ecalib_conf* conf, unsigned int N, const float val[N]) { unsigned int n = 0; if (-1 != conf->numsv) { n = conf->numsv; assert(-1. == conf->percentsv); assert(-1. == conf->threshold); } else if (conf->percentsv != -1.) { n = (unsigned int)(N * conf->percentsv / 100.); assert(-1 == conf->numsv); assert(-1. == conf->threshold); } else { assert(-1 == conf->numsv); assert(-1. == conf->percentsv); for (unsigned int i = 0; i < N; i++) { if (val[i] / val[0] > sqrtf(conf->threshold)) n++; } } if (val[0] <= 0.) error("No signal.\n"); debug_printf(DP_DEBUG1, "Using %d/%ld kernels (%.2f%%, last SV: %f%s).\n", n, N, (float)n / (float)N * 100., (n > 0) ? (val[n - 1] / val[0]) : 1., conf->weighting ? ", weighted" : ""); float tr = 0.; for (unsigned int i = 0; i < N; i++) { tr += powf(val[i], 2.); debug_printf(DP_DEBUG3, "SVALS %f (%f)\n", val[i], val[i] / val[0]); } debug_printf(DP_DEBUG3, "\nTRACE: %f (%f)\n", tr, tr / (float)N); assert(n <= N); return n; } void compute_kernels(const struct ecalib_conf* conf, long nskerns_dims[5], complex float** nskerns_ptr, unsigned int SN, float val[SN], const long caldims[DIMS], const complex float* caldata) { assert(1 == md_calc_size(DIMS - 5, caldims + 5)); nskerns_dims[0] = conf->kdims[0]; nskerns_dims[1] = conf->kdims[1]; nskerns_dims[2] = conf->kdims[2]; nskerns_dims[3] = caldims[3]; long N = md_calc_size(4, nskerns_dims); assert(N > 0); nskerns_dims[4] = N; complex float* nskerns = md_alloc(5, nskerns_dims, CFL_SIZE); *nskerns_ptr = nskerns; PTR_ALLOC(complex float[N][N], vec); assert(NULL != val); assert(SN == N); debug_printf(DP_DEBUG1, "Build calibration matrix and SVD...\n"); #ifdef CALMAT_SVD calmat_svd(conf->kdims, N, *vec, val, caldims, caldata); if (conf->weighting) soft_weight_singular_vectors(N, conf->var, conf->kdims, caldims, val, val); for (int i = 0; i < N; i++) for (int j = 0; j < N; j++) #ifndef FLIP nskerns[i * N + j] = ((*vec)[j][i]) * (conf->weighting ? val[i] : 1.); #else nskerns[i * N + j] = ((*vec)[j][N - 1 - i]) * (conf->weighting ? val[N - 1 - i] : 1.); #endif #else covariance_function(conf->kdims, N, *vec, caldims, caldata); debug_printf(DP_DEBUG1, "Eigen decomposition... (size: %ld)\n", N); // we could apply Nystroem method here to speed it up float tmp_val[N]; lapack_eig(N, tmp_val, *vec); // reverse and square root, test for smaller null to avoid NaNs for (int i = 0; i < N; i++) val[i] = (tmp_val[N - 1 - i] < 0.) ? 0. : sqrtf(tmp_val[N - 1 - i]); if (conf->weighting) soft_weight_singular_vectors(N, conf-> var, conf->kdims, caldims, val, val); for (int i = 0; i < N; i++) for (int j = 0; j < N; j++) #ifndef FLIP nskerns[i * N + j] = (*vec)[N - 1 - i][j] * (conf->weighting ? val[i] : 1.); // flip #else nskerns[i * N + j] = (*vec)[i][j] * (conf->weighting ? val[N - 1 - i] : 1.); // flip #endif #endif if (conf->perturb > 0.) { long dims[2] = { N, N }; perturb(dims, nskerns, conf->perturb); } #ifndef FLIP nskerns_dims[4] = number_of_kernels(conf, N, val); #else nskerns_dims[4] = N - number_of_kernels(conf, N, val); #endif PTR_FREE(vec); } void compute_imgcov(const long cov_dims[4], complex float* imgcov, const long nskerns_dims[5], const complex float* nskerns) { debug_printf(DP_DEBUG1, "Zeropad...\n"); long xh = cov_dims[0]; long yh = cov_dims[1]; long zh = cov_dims[2]; long kx = nskerns_dims[0]; long ky = nskerns_dims[1]; long kz = nskerns_dims[2]; long channels = nskerns_dims[3]; long nr_kernels = nskerns_dims[4]; long imgkern_dims[5] = { xh, yh, zh, channels, nr_kernels }; complex float* imgkern1 = md_alloc(5, imgkern_dims, CFL_SIZE); complex float* imgkern2 = md_alloc(5, imgkern_dims, CFL_SIZE); md_resize_center(5, imgkern_dims, imgkern1, nskerns_dims, nskerns, CFL_SIZE); // resort array debug_printf(DP_DEBUG1, "FFT (juggling)...\n"); long istr[5]; long mstr[5]; long idim[5] = { xh, yh, zh, channels, nr_kernels }; long mdim[5] = { nr_kernels, channels, xh, yh, zh }; md_calc_strides(5, istr, idim, CFL_SIZE); md_calc_strides(5, mstr, mdim, CFL_SIZE); long m2str[5] = { mstr[2], mstr[3], mstr[4], mstr[1], mstr[0] }; ifftmod(5, imgkern_dims, FFT_FLAGS, imgkern1, imgkern1); ifft2(5, imgkern_dims, FFT_FLAGS, m2str, imgkern2, istr, imgkern1); float scalesq = (kx * ky * kz) * (xh * yh * zh); // second part for FFT scaling md_free(imgkern1); debug_printf(DP_DEBUG1, "Calculate Gram matrix...\n"); int cosize = channels * (channels + 1) / 2; assert(cov_dims[3] == cosize); #pragma omp parallel for collapse(3) for (int k = 0; k < zh; k++) { for (int j = 0; j < yh; j++) { for (int i = 0; i < xh; i++) { complex float gram[cosize]; gram_matrix2(channels, gram, nr_kernels, (const complex float (*)[nr_kernels])(imgkern2 + ((k * yh + j) * xh + i) * (channels * nr_kernels))); #ifdef FLIP // add (scaled) identity matrix for (int i = 0, l = 0; i < channels; i++) for (int j = 0; j <= i; j++, l++) gram[l] = ((i == j) ? (kx * ky * kz) : 0.) - gram[l]; #endif for (int l = 0; l < cosize; l++) imgcov[(((l * zh) + k) * yh + j) * xh + i] = gram[l] / scalesq; } } } md_free(imgkern2); }
syr2k.dstblock-timetile.c
/** * This version is stamped on May 10, 2016 * * Contact: * Louis-Noel Pouchet <pouchet.ohio-state.edu> * Tomofumi Yuki <tomofumi.yuki.fr> * * Web address: http://polybench.sourceforge.net */ /* syr2k.c: this file is part of PolyBench/C */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ #include "syr2k.h" /* Array initialization. */ static void init_array(int n, int m, DATA_TYPE *alpha, DATA_TYPE *beta, DATA_TYPE POLYBENCH_2D(C,N,N,n,n), DATA_TYPE POLYBENCH_2D(A,N,M,n,m), DATA_TYPE POLYBENCH_2D(B,N,M,n,m)) { int i, j; *alpha = 1.5; *beta = 1.2; for (i = 0; i < n; i++) for (j = 0; j < m; j++) { A[i][j] = (DATA_TYPE) ((i*j+1)%n) / n; B[i][j] = (DATA_TYPE) ((i*j+2)%m) / m; } for (i = 0; i < n; i++) for (j = 0; j < n; j++) { C[i][j] = (DATA_TYPE) ((i*j+3)%n) / m; } } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int n, DATA_TYPE POLYBENCH_2D(C,N,N,n,n)) { int i, j; POLYBENCH_DUMP_START; POLYBENCH_DUMP_BEGIN("C"); for (i = 0; i < n; i++) for (j = 0; j < n; j++) { if ((i * n + j) % 20 == 0) fprintf (POLYBENCH_DUMP_TARGET, "\n"); fprintf (POLYBENCH_DUMP_TARGET, DATA_PRINTF_MODIFIER, C[i][j]); } POLYBENCH_DUMP_END("C"); POLYBENCH_DUMP_FINISH; } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_syr2k(int n, int m, DATA_TYPE alpha, DATA_TYPE beta, DATA_TYPE POLYBENCH_2D(C,N,N,n,n), DATA_TYPE POLYBENCH_2D(A,N,M,n,m), DATA_TYPE POLYBENCH_2D(B,N,M,n,m), DATA_TYPE POLYBENCH_2D(At,M,N,m,n), DATA_TYPE POLYBENCH_2D(Bt,M,N,m,n)) { /* Sizes * the linesize is 64 bytes on keller and blum * on keller, L1,L2,L3 is 32 KB, 256 KB, 20480 KB * on blum, 32KB , 256 KB, 6 MB * 64 bits per word; each word is 8 bytes */ // Indices int i, j, k; int ii, jj, kk; // PARAMETER 1 // make sure you change the data size when you change this too!!! int cacheSize = 256; // IN kilobytes !!! // PARAMETER 2 int jumpA = floor((cacheSize * 1024) / (4*8*8)); //int jumpB = floor((cacheSize * 1024 ) / (18*8*4) ); int jumpB = 64; int jump = jumpA; // Misc. Calculations int linesize = 8; // how many bytes per cache line? int blockcount = cacheSize * 1024 / linesize; // kb * (bytes / per kb) / (bytes / per cache line) //BLAS PARAMS //UPLO = 'L' //TRANS = 'N' //A is NxM //At is MxN //B is NxM //Bt is MxN //C is NxN #pragma scop // Note: I can't figure out how to // stack allocate the array; I do this beforehand // and it's untimed. #pragma omp parallel for private(ii,jj,i,j) for (ii=0; ii < _PB_N; ii += jump){ for(jj=0; jj < _PB_M; jj += jump){ for (i=ii; i < fmin(jump + ii, _PB_N); i++){ for (j=jj; j < fmin(jump + jj, _PB_M); j++){ // Transpose At[j][i] = A[i][j]; Bt[j][i] = B[i][j]; } } } } // At is M by N // Bt is M by N #pragma omp parallel for private(ii,jj,i,j) for (ii=0; ii < _PB_N; ii += jumpB){ for(jj=0; jj <= i; jj += jumpB){ for (i=ii; i < fmin(jumpB + ii, _PB_N); i++){ for (j=jj; j <= fmin(jumpB + jj, i); j++){ C[i][j] *= beta; } } } } #pragma omp parallel for private(i,kk,k,j) for (i = 0; i < _PB_N; i++) { for(kk = 0; kk < _PB_M; kk += jumpB){ for(k = kk; k < fmin(_PB_M, kk+jumpB); k++){ #pragma omp parallel for private(j) for (j = 0; j <= i; j++){ C[i][j] += At[k][j]*alpha*B[i][k] + Bt[k][j]*alpha*A[i][k]; } } } } #pragma endscop } int main(int argc, char** argv) { /* Retrieve problem size. */ int n = N; int m = M; double footprint = 8*(n*n + 2*n*m); // HAVERFORD added code double FP_ops = 3.0 * m * (n + 1) * n; // HAVERFORD added code #ifdef POLYBENCH_GFLOPS polybench_set_program_flops(FP_ops); // HAVERFORD addition #endif #if defined POLYFORD_VERBOSE printf("Starting %s, n=%8d, m=%8d, Footprint %8.4g M, Source FP ops=%8.4g G\n", __FILE__, n, m, footprint / (1024 * 1024), FP_ops/1000000000.0); #endif /* Variable declaration/allocation. */ DATA_TYPE alpha; DATA_TYPE beta; POLYBENCH_2D_ARRAY_DECL(C,DATA_TYPE,N,N,n,n); POLYBENCH_2D_ARRAY_DECL(A,DATA_TYPE,N,M,n,m); POLYBENCH_2D_ARRAY_DECL(B,DATA_TYPE,N,M,n,m); POLYBENCH_2D_ARRAY_DECL(At,DATA_TYPE,M,N,m,n); POLYBENCH_2D_ARRAY_DECL(Bt,DATA_TYPE,M,N,m,n); /* Initialize array(s). */ init_array (n, m, &alpha, &beta, POLYBENCH_ARRAY(C), POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B)); /* Start timer. */ polybench_start_instruments; /* Run kernel. */ kernel_syr2k (n, m, alpha, beta, POLYBENCH_ARRAY(C), POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B), POLYBENCH_ARRAY(At), POLYBENCH_ARRAY(Bt)); /* Stop and print timer. */ polybench_stop_instruments; polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(n, POLYBENCH_ARRAY(C))); /* Be clean. */ POLYBENCH_FREE_ARRAY(C); POLYBENCH_FREE_ARRAY(A); POLYBENCH_FREE_ARRAY(B); return 0; }
Booster.h
/*! -*-c++-*- @file Booster.h @author David Hirvonen @brief Internal declaration of the XGBoost C++ interface class. \copyright Copyright 2014-2016 Elucideye, Inc. All rights reserved. \license{This project is released under the 3 Clause BSD License.} */ #ifndef __drishti_ml_Booster_h__ #define __drishti_ml_Booster_h__ // implementations in ctypes #define _CRT_SECURE_NO_WARNINGS #define _CRT_SECURE_NO_DEPRECATE #include <cstdio> #include <vector> #include <string> #include <cstring> #include <cmath> #include <algorithm> // include all std functions using namespace std; #include "xgboost/wrapper/xgboost_wrapper.h" #include "xgboost/src/data.h" #include "xgboost/src/io/io.h" #include "xgboost/src/io/simple_dmatrix-inl.hpp" // DMatrixSimple #include "xgboost/src/utils/utils.h" // CheckNaN #include "xgboost/src/learner/learner-inl.hpp" #include "xgboost/src/utils/math.h" #include "xgboost/src/utils/group_data.h" #include "xgboost/src/gbm/gblinear-inl.hpp" #include "xgboost/src/gbm/gbtree-inl.hpp" #include "xgboost/src/learner/objective-inl.hpp" #include "drishti/core/Logger.h" #include <random> #include <iostream> using namespace xgboost; using namespace xgboost::io; // Use this definition with custom boost serialization XGBoost lib, // else simply wrap standard XGBoost serialization with a boost API. // Note: Setting this to 1 (if possible) will significantly reduce // model size requirements #define USE_XGBOOST_WITH_CEREAL 1 DRISHTI_BEGIN_NAMESPACE(xgboost) inline std::shared_ptr<DMatrixSimple> DMatrixSimpleFromMat(const float* data, bst_ulong nrow, bst_ulong ncol, float missing) { bool nan_missing = utils::CheckNAN(missing); std::shared_ptr<DMatrixSimple> p_mat = std::make_shared<DMatrixSimple>(); DMatrixSimple& mat = *p_mat; mat.info.info.num_row = nrow; mat.info.info.num_col = ncol; for (bst_ulong i = 0; i < nrow; ++i, data += ncol) { bst_ulong nelem = 0; for (bst_ulong j = 0; j < ncol; ++j) { if (utils::CheckNAN(data[j])) { utils::Check(nan_missing, "There are NAN in the matrix, however, you did not set missing=NAN"); } else { if (nan_missing || data[j] != missing) { mat.row_data_.emplace_back(bst_uint(j), data[j]); ++nelem; } } } mat.row_ptr_.push_back(mat.row_ptr_.back() + nelem); } return p_mat; } inline std::shared_ptr<DMatrixSimple> DMatrixSimpleFromMat(const MatrixType<float>& data, bst_ulong nrow, bst_ulong ncol, float missing) { #if DRISHTI_BUILD_MIN_SIZE assert(false); return std::shared_ptr<DMatrixSimple>(); #else bool nan_missing = utils::CheckNAN(missing); std::shared_ptr<DMatrixSimple> p_mat = std::make_shared<DMatrixSimple>(); DMatrixSimple& mat = *p_mat; mat.info.info.num_row = nrow; mat.info.info.num_col = ncol; for (bst_ulong i = 0; i < nrow; ++i) { bst_ulong nelem = 0; for (bst_ulong j = 0; j < ncol; ++j) { if (utils::CheckNAN(data[i][j])) { utils::Check(nan_missing, "There are NAN in the matrix, however, you did not set missing=NAN"); } else { if (nan_missing || data[i][j] != missing) { mat.row_data_.push_back(RowBatch::Entry(bst_uint(j), data[i][j])); ++nelem; } } } mat.row_ptr_.push_back(mat.row_ptr_.back() + nelem); } return p_mat; #endif } inline std::shared_ptr<DMatrixSimple> DMatrixSimpleFromMat(const MatrixType<float>& data, bst_ulong nrow, bst_ulong ncol, const MatrixType<uint8_t>& mask) { std::shared_ptr<DMatrixSimple> p_mat = std::make_shared<DMatrixSimple>(); DMatrixSimple& mat = *p_mat; mat.info.info.num_row = nrow; mat.info.info.num_col = ncol; for (bst_ulong i = 0; i < nrow; ++i) { bst_ulong nelem = 0; for (bst_ulong j = 0; j < ncol; ++j) { if (!mask.size() || mask[i][j]) { mat.row_data_.emplace_back(bst_uint(j), data[i][j]); ++nelem; } } mat.row_ptr_.push_back(mat.row_ptr_.back() + nelem); } return p_mat; } DRISHTI_BEGIN_NAMESPACE(wrapper) // booster wrapper class class Booster : public learner::BoostLearner { public: explicit Booster(const std::vector<DataMatrix*>& mats = {}) { this->silent = 1; this->init_model = false; if (mats.size()) { this->SetCacheData(mats); } } inline const float* Pred(const DataMatrix& dmat, int option_mask, unsigned ntree_limit, bst_ulong* len) { #if DRISHTI_BUILD_MIN_SIZE assert(false); return nullptr; #else this->CheckInitModel(); this->Predict(dmat, (option_mask & 1) != 0, &this->preds_, ntree_limit, (option_mask & 2) != 0); *len = static_cast<bst_ulong>(this->preds_.size()); return BeginPtr(this->preds_); #endif } inline void BoostOneIter(const DataMatrix& train, float* grad, float* hess, bst_ulong len) { #if DRISHTI_BUILD_MIN_SIZE assert(false); #else this->gpair_.resize(len); const bst_omp_uint ndata = static_cast<bst_omp_uint>(len); #pragma omp parallel for schedule(static) for (bst_omp_uint j = 0; j < ndata; ++j) { gpair_[j] = bst_gpair(grad[j], hess[j]); } gbm_->DoBoost(train.fmat(), this->FindBufferOffset(train), train.info.info, &gpair_); #endif } inline void CheckInitModel() { if (!init_model) { this->InitModel(); init_model = true; } } inline void LoadModel(const char* fname) { this->init_model = true; #if DRISHTI_BUILD_MIN_SIZE assert(false); #else learner::BoostLearner::LoadModel(fname); #endif } inline void LoadModelFromBuffer(const void* buf, size_t size) { this->init_model = true; #if DRISHTI_BUILD_MIN_SIZE assert(false); #else utils::MemoryFixSizeBuffer fs((void*)buf, size); learner::BoostLearner::LoadModel(fs, true); #endif } inline const char* GetModelRaw(bst_ulong* out_len) { #if DRISHTI_BUILD_MIN_SIZE assert(false); return nullptr; #else this->CheckInitModel(); model_str.resize(0); utils::MemoryBufferStream fs(&model_str); learner::BoostLearner::SaveModel(fs, false); *out_len = static_cast<bst_ulong>(model_str.length()); if (*out_len == 0) { return NULL; } else { return &model_str[0]; } #endif } template <class Archive> void serialize(Archive& ar, const unsigned int version) { #if USE_XGBOOST_WITH_CEREAL auto& parent = dynamic_cast<xgboost::learner::BoostLearner&>(*this); ar& parent; #else if (Archive::is_loading::value) { ar& model_str; LoadModelFromBuffer(&model_str[0], model_str.size()); } else { bst_ulong length = 0; GetModelRaw(&length); // uses internal model_str ar& model_str; } #endif } void setStreamLogger(std::shared_ptr<spdlog::logger>& logger) { m_streamLogger = logger; } // temporal data to save model dump std::string model_str; private: bool init_model; std::shared_ptr<spdlog::logger> m_streamLogger; }; DRISHTI_END_NAMESPACE(wrapper) DRISHTI_END_NAMESPACE(xgboost) #endif // __drishti_ml_Booster_h__
bli_axpyv_opt_var1.c
/* BLIS An object-based framework for developing high-performance BLAS-like libraries. Copyright (C) 2014, The University of Texas Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. - Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - Neither the name of The University of Texas nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "blis.h" #define FUNCPTR_T axpyv_fp typedef void (*FUNCPTR_T)( conj_t conjx, dim_t n, void* alpha, void* x, inc_t incx, void* y, inc_t incy ); // If some mixed datatype functions will not be compiled, we initialize // the corresponding elements of the function array to NULL. #ifdef BLIS_ENABLE_MIXED_PRECISION_SUPPORT static FUNCPTR_T GENARRAY3_ALL(ftypes,axpyv_opt_var1); #else #ifdef BLIS_ENABLE_MIXED_DOMAIN_SUPPORT static FUNCPTR_T GENARRAY3_EXT(ftypes,axpyv_opt_var1); #else static FUNCPTR_T GENARRAY3_MIN(ftypes,axpyv_opt_var1); #endif #endif void bli_axpyv_opt_var1( obj_t* alpha, obj_t* x, obj_t* y ) { num_t dt_x = bli_obj_datatype( *x ); num_t dt_y = bli_obj_datatype( *y ); conj_t conjx = bli_obj_conj_status( *x ); dim_t n = bli_obj_vector_dim( *x ); inc_t inc_x = bli_obj_vector_inc( *x ); void* buf_x = bli_obj_buffer_at_off( *x ); inc_t inc_y = bli_obj_vector_inc( *y ); void* buf_y = bli_obj_buffer_at_off( *y ); num_t dt_alpha; void* buf_alpha; FUNCPTR_T f; // If alpha is a scalar constant, use dt_x to extract the address of the // corresponding constant value; otherwise, use the datatype encoded // within the alpha object and extract the buffer at the alpha offset. bli_set_scalar_dt_buffer( alpha, dt_x, dt_alpha, buf_alpha ); // Index into the type combination array to extract the correct // function pointer. f = ftypes[dt_alpha][dt_x][dt_y]; // Invoke the function. f( conjx, n, buf_alpha, buf_x, inc_x, buf_y, inc_y ); } #undef GENTFUNC3 #define GENTFUNC3( ctype_a, ctype_x, ctype_y, cha, chx, chy, opname, varname ) \ \ void PASTEMAC3(cha,chx,chy,varname)( \ conj_t conjx, \ dim_t n, \ void* alpha, \ void* x, inc_t incx, \ void* y, inc_t incy \ ) \ { \ ctype_a* alpha_cast = alpha; \ ctype_x* x_cast = x; \ ctype_y* y_cast = y; \ ctype_x* chi1; \ ctype_y* psi1; \ dim_t i; \ \ if ( bli_zero_dim1( n ) ) return; \ \ chi1 = x_cast; \ psi1 = y_cast; \ \ if ( bli_is_conj( conjx ) ) \ { \ for ( i = 0; i < n; ++i ) \ { \ PASTEMAC3(cha,chx,chy,axpyjs)( *alpha_cast, *chi1, *psi1 ); \ \ chi1 += incx; \ psi1 += incy; \ } \ } \ else \ { \ for ( i = 0; i < n; ++i ) \ { \ PASTEMAC3(cha,chx,chy,axpys)( *alpha_cast, *chi1, *psi1 ); \ \ chi1 += incx; \ psi1 += incy; \ } \ } \ } // Define the basic set of functions unconditionally, and then also some // mixed datatype functions if requested. //INSERT_GENTFUNC3_BASIC( axpyv, axpyv_opt_var1 ) GENTFUNC3( float, float, float, s, s, s, axpyv, axpyv_opt_var1 ) //GENTFUNC3( double, double, double, d, d, d, axpyv, axpyv_opt_var1 ) GENTFUNC3( scomplex, scomplex, scomplex, c, c, c, axpyv, axpyv_opt_var1 ) GENTFUNC3( dcomplex, dcomplex, dcomplex, z, z, z, axpyv, axpyv_opt_var1 ) #ifdef BLIS_ENABLE_MIXED_DOMAIN_SUPPORT INSERT_GENTFUNC3_MIX_D( axpyv, axpyv_opt_var1 ) #endif #ifdef BLIS_ENABLE_MIXED_PRECISION_SUPPORT INSERT_GENTFUNC3_MIX_P( axpyv, axpyv_opt_var1 ) #endif void bli_dddaxpyv_opt_var1( conj_t conjx, dim_t n, void* alpha_in, void* x_in, inc_t incx, void* y_in, inc_t incy ) { double* restrict alpha = alpha_in; double* restrict x = x_in; double* restrict y = y_in; if ( bli_zero_dim1( n ) ) return; // If there is anything that would interfere with our use of aligned // vector loads/stores, call the reference implementation. bool_t use_ref = FALSE; if ( incx != 1 || incy != 1 || bli_is_unaligned_to( x, 32 ) || bli_is_unaligned_to( y, 32 ) ) { use_ref = TRUE; } // Call the reference implementation if needed. if ( use_ref == TRUE ) { printf("Defaulting to reference!"); bli_dddaxpyv_unb_var1( conjx, n, alpha, x, incx, y, incy ); return; } dim_t n_run = n / 4; dim_t n_left = n % 4; vector4double xv, yv, zv; vector4double alphav = vec_lds( 0 * sizeof(double), alpha ); #pragma omp parallel for for ( dim_t i = 0; i < n_run; i++ ) { xv = vec_lda( 0 * sizeof(double), &x[i*4] ); yv = vec_lda( 0 * sizeof(double), &y[i*4] ); zv = vec_madd( alphav, xv, yv ); vec_sta( zv, 0 * sizeof(double), &y[i*4] ); } for ( dim_t i = 0; i < n_left; i++ ) { y[4*n_run + i] += *alpha * x[4*n_run + i]; } }
DataTypeConversions.h
// // Created by raver119 on 21.11.17. // #ifndef LIBND4J_DATATYPECONVERSIONS_H #define LIBND4J_DATATYPECONVERSIONS_H #include <pointercast.h> #include <helpers/logger.h> #include <op_boilerplate.h> #include <array/DataType.h> #include <types/float16.h> #include <helpers/BitwiseUtils.h> namespace nd4j { template <typename T> class DataTypeConversions { public: static FORCEINLINE void convertType(T* buffer, void* src, DataType dataType, ByteOrder order, Nd4jIndex length) { bool isBe = BitwiseUtils::isBE(); bool canKeep = (isBe && order == ByteOrder::BE) || (!isBe && order == ByteOrder::LE); switch (dataType) { case DataType_FLOAT: { auto tmp = (float *) src; #pragma omp parallel for simd schedule(guided) for (Nd4jIndex e = 0; e < length; e++) { buffer[e] = canKeep ? (T) tmp[e] : BitwiseUtils::swap_bytes<T>((T) tmp[e]); } } break; case DataType_DOUBLE: { auto tmp = (double *) src; #pragma omp parallel for simd schedule(guided) for (Nd4jIndex e = 0; e < length; e++) buffer[e] = canKeep ? (T) tmp[e] : BitwiseUtils::swap_bytes<T>((T) tmp[e]); } break; case DataType_HALF: { auto tmp = (float16 *) src; #pragma omp parallel for simd schedule(guided) for (Nd4jIndex e = 0; e < length; e++) buffer[e] = canKeep ? (T) tmp[e] : BitwiseUtils::swap_bytes<T>((T) tmp[e]); } break; default: { nd4j_printf("Unsupported DataType requested: [%i]\n", (int) dataType); throw "Unsupported DataType"; } } } }; } #endif //LIBND4J_DATATYPECONVERSIONS_H
abs.h
#pragma once #include <vector> #include <unordered_map> #include <algorithm> #include <cmath> #include <omp.h> #include "_cuda.h" using std::vector; using std::unordered_map; using std::max; using std::abs; // ABS // --- template <class T> void abs(T *a, int N) { for (int i=0; i<N; i++) a[i] = abs(a[i]); } template <class T> void abs(vector<T>& a) { abs(a.begin(), a.end()); } template <class K, class T> void abs(unordered_map<K, T>& a) { for (auto& p : a) p.second = abs(p.second); } // ABS-AT // ------ template <class T, class I> void absAt(T *a, I&& is) { for (int i : is) a[i] = abs(a[i]); } template <class T, class I> void absAt(vector<T>& a, I&& is) { absAt(a.data(), is); } template <class K, class T, class I> void absAt(unordered_map<K, T>& a, I&& ks) { for (auto&& k : ks) a[k] = abs(a[k]); } // ABS (OMP) // --------- template <class T> void absOmp(T *a, int N) { #pragma omp parallel for for (int i=0; i<N; i++) a[i] = abs(a[i]); } template <class T> void fillOmp(vector<T>& a) { absOmp(a.data(), a.size()); } // ABS (CUDA) // ---------- template <class T> __device__ void absKernelLoop(T *a, int N, int i, int DI) { for (; i<N; i+=DI) a[i] = abs(a[i]); } template <class T> __global__ void absKernel(T *a, int N) { DEFINE(t, b, B, G); absKernelLoop(a, N, B*b+t, G*B); } template <class T> void absCuda(T *a, int N) { int B = BLOCK_DIM; int G = min(ceilDiv(N, B), GRID_DIM); size_t N1 = N * sizeof(T); T *aD; TRY( cudaMalloc(&aD, N1) ); TRY( cudaMemcpy(aD, a, N1, cudaMemcpyHostToDevice) ); absKernel<<<G, B>>>(aD, N); TRY( cudaMemcpy(a, aD, N1, cudaMemcpyDeviceToHost) ); TRY( cudaFree(aD) ); } template <class T> void absCuda(vector<T>& a) { absCuda(a.data(), a.size()); }
fntext_bi.c
#include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <math.h> #include <time.h> #include <string.h> #include <assert.h> #include <omp.h> #define EM_RANGE (0.01) typedef float floatx; struct model_t { floatx *em, *w, *b; floatx *em_bi, *w_bi; int64_t em_dim, vocab_num, category_num; }; struct dataset_t { int64_t *text_indices, *text_lens, *text_categories; int64_t *start_pos; int64_t text_num; }; void init_model(struct model_t *model, int64_t em_dim, int64_t vocab_num, int64_t category_num, int64_t is_init) { model->em_dim = em_dim; model->vocab_num = vocab_num; model->category_num = category_num; model->em = (floatx *)malloc(em_dim * vocab_num * sizeof(floatx)); model->em_bi = (floatx *)malloc(em_dim * vocab_num * sizeof(floatx)); model->w = (floatx *)malloc(em_dim * category_num * sizeof(floatx)); model->w_bi = (floatx *)malloc(em_dim * category_num * sizeof(floatx)); model->b = (floatx *)malloc(category_num * sizeof(floatx)); floatx *em = model->em; floatx *em_bi = model->em_bi; floatx *w = model->w; floatx *w_bi = model->w_bi; floatx *b = model->b; int64_t i; if (is_init) { srand(time(NULL)); // [-EM_RANGE, EM_RANGE] for (i = 0; i < em_dim * vocab_num; i++) { em[i] = ((floatx)rand() / RAND_MAX) * 2. * EM_RANGE - EM_RANGE; em_bi[i] = ((floatx)rand() / RAND_MAX) * 2. * EM_RANGE - EM_RANGE; } floatx stdv = 1. / (floatx)sqrt((double)em_dim * 2); for (i = 0; i < em_dim * category_num; i++) { w[i] = (floatx)rand() / RAND_MAX * 2. * stdv - stdv; w_bi[i] = (floatx)rand() / RAND_MAX * 2. * stdv - stdv; } for (i = 0; i < category_num; i++) b[i] = (floatx)rand() / RAND_MAX * 2. * stdv - stdv; } else { for (i = 0; i < em_dim * vocab_num; i++) { em[i] = 0.; em_bi[i] = 0.; } for (i = 0; i < em_dim * category_num; i++) { w[i] = 0.; w_bi[i] = 0.; } for (i = 0; i < category_num; i++) b[i] = 0.; } } void free_model(struct model_t *model) { free(model->em); free(model->em_bi); free(model->w); free(model->w_bi); free(model->b); } int preread(FILE *fp) { int ch = fgetc(fp); if (ch == EOF) return ch; else { fseek(fp, -1, SEEK_CUR); return ch; } } void load_data(struct dataset_t *data, const char *path, int64_t max_voc) { FILE *fp = NULL; fp = fopen(path, "r"); if (fp == NULL) { perror("error"); exit(EXIT_FAILURE); } int next_i, next_ch; int64_t text_num = 0, ch_num = 0, ignore_text_num = 0; int64_t text_len = 0; ; int64_t cat, text_i; enum state_t { READ_CAT, READ_INDEX } state = READ_CAT; while (1) { int is_break = 0; switch (state) { case READ_CAT: if (fscanf(fp, "%ld,", &cat) > 0) { if (preread(fp) == '\n') { ignore_text_num++; fgetc(fp); } else state = READ_INDEX; } else { assert(feof(fp)); is_break = 1; } break; case READ_INDEX: assert(fscanf(fp, "%ld", &text_i) > 0); if (text_i < max_voc) { ch_num++; text_len++; } next_ch = fgetc(fp); if (next_ch == '\n') { if (text_len == 0) { ignore_text_num++; } else { text_num++; text_len = 0; } state = READ_CAT; } } if (is_break) break; } printf("load data from %s\n", path); printf("#lines: %ld, #chs: %ld\n", text_num, ch_num); printf("#ignore lines: %ld\n", ignore_text_num); data->text_num = text_num; data->text_indices = (int64_t *)malloc(ch_num * sizeof(int64_t)); data->text_lens = (int64_t *)malloc(text_num * sizeof(int64_t)); data->text_categories = (int64_t *)malloc(text_num * sizeof(int64_t)); data->start_pos = (int64_t *)malloc(text_num * sizeof(int64_t)); text_len = 0; int64_t *text_indices = data->text_indices; int64_t *text_lens = data->text_lens; int64_t *text_categories = data->text_categories; int64_t *start_pos = data->start_pos; rewind(fp); while (1) { int is_break = 0; switch (state) { case READ_CAT: if (fscanf(fp, "%ld,", &cat) > 0) { if (preread(fp) == '\n') { fgetc(fp); } else state = READ_INDEX; } else { assert(feof(fp)); is_break = 1; } break; case READ_INDEX: assert(fscanf(fp, "%ld", &text_i) > 0); if (text_i < max_voc) { text_len++; *text_indices = text_i; text_indices++; } next_ch = fgetc(fp); if (next_ch == '\n') { state = READ_CAT; if (text_len > 0) { *text_lens = text_len; text_lens++; text_len = 0; *text_categories = cat; text_categories++; } } } if (is_break) break; } start_pos[0] = 0; for (int64_t i = 1; i < text_num; i++) start_pos[i] = start_pos[i - 1] + data->text_lens[i - 1]; fclose(fp); } void free_data(struct dataset_t *data) { free(data->text_indices); free(data->text_lens); free(data->text_categories); free(data->start_pos); } floatx forward(struct model_t *model, struct dataset_t *train_data, int64_t text_i, floatx *max_fea, int64_t *max_fea_index, floatx *max_bi_fea, int64_t *max_bi_fea_index, floatx *softmax_fea) { int64_t *text_indices = &(train_data->text_indices[train_data->start_pos[text_i]]); int64_t text_len = train_data->text_lens[text_i]; assert(text_len >= 1); int64_t text_category = train_data->text_categories[text_i]; int64_t i, j; int64_t em_pos, em_pos0, em_pos1; // max_pool // 先赋预值 em_pos = text_indices[0] * model->em_dim; for (i = 0; i < model->em_dim; i++) { max_fea[i] = model->em[em_pos + i]; max_fea_index[i] = em_pos + i; } for (i = 1; i < text_len; i++) { em_pos = text_indices[i] * model->em_dim; for (j = 0; j < model->em_dim; j++) { max_fea[j] = max_fea[j] > (model->em[em_pos + j]) ? max_fea[j] : (model->em[em_pos + j]); max_fea_index[j] = max_fea[j] > (model->em[em_pos + j]) ? max_fea_index[j] : (em_pos + j); } } // max_pool bi // 先赋预值 em_pos0 = text_indices[0] * model->em_dim; em_pos1 = (text_len > 1) ? (text_indices[1] * model->em_dim) : (text_indices[0] * model->em_dim); //长度为1 那么就把那个单词复制一个 for (j = 0; j < model->em_dim; j++) { max_bi_fea[j] = model->em_bi[em_pos0 + j] + model->em_bi[em_pos1 + j]; max_bi_fea_index[0] = em_pos0 + j; max_bi_fea_index[1] = em_pos1 + j; } if (text_len == 1) { // printf("warning: text[id: %ld] length == 1 (bi-gram features need length>1)\n", text_i); } for (i = 1; i < text_len - 1; i++) { em_pos0 = text_indices[i] * model->em_dim; em_pos1 = text_indices[i + 1] * model->em_dim; for (j = 0; j < model->em_dim; j++) { floatx fea = model->em_bi[em_pos0 + j] + model->em_bi[em_pos1 + j]; if (max_bi_fea[j] < fea) { max_bi_fea[j] = fea; max_bi_fea_index[2 * j] = em_pos0 + j; max_bi_fea_index[2 * j + 1] = em_pos1 + j; } } } // mlp for (i = 0; i < model->category_num; i++) softmax_fea[i] = model->b[i]; for (i = 0; i < model->category_num; i++) for (j = 0; j < model->em_dim; j++) softmax_fea[i] += (max_fea[j] * model->w[i * model->em_dim + j] + max_bi_fea[j] * model->w_bi[i * model->em_dim + j]); floatx loss = 0.; floatx tmp = 0.; loss -= softmax_fea[text_category]; for (i = 0; i < model->category_num; i++) { softmax_fea[i] = (floatx)exp((double)softmax_fea[i]); tmp += softmax_fea[i]; } loss += (floatx)log(tmp); return loss; } void backward(struct model_t *model, struct dataset_t *train_data, int64_t text_i, floatx *max_fea, floatx *max_fea_bi, floatx *softmax_fea, floatx *grad_em, float *grad_em_bi, floatx *grad_w, float *grad_w_bi, floatx *grad_b) { int64_t *text_indices = &(train_data->text_indices[train_data->start_pos[text_i]]); int64_t text_len = train_data->text_lens[text_i]; int64_t text_category = train_data->text_categories[text_i]; floatx tmp_sum = 0.; int64_t i, j; for (i = 0; i < model->category_num; i++) tmp_sum += softmax_fea[i]; for (i = 0; i < model->category_num; i++) grad_b[i] = softmax_fea[i] / tmp_sum; grad_b[text_category] -= 1.; for (i = 0; i < model->category_num; i++) for (j = 0; j < model->em_dim; j++) grad_w[i * model->em_dim + j] = max_fea[j] * grad_b[i]; // bi for (i = 0; i < model->category_num; i++) for (j = 0; j < model->em_dim; j++) grad_w_bi[i * model->em_dim + j] = max_fea_bi[j] * grad_b[i]; for (j = 0; j < model->em_dim; j++) grad_em[j] = 0.; for (i = 0; i < model->category_num; i++) for (j = 0; j < model->em_dim; j++) grad_em[j] += (model->w[i * model->em_dim + j]) * grad_b[i]; // bi for (j = 0; j < model->em_dim; j++) grad_em_bi[j] = 0.; for (i = 0; i < model->category_num; i++) for (j = 0; j < model->em_dim; j++) grad_em_bi[j] += (model->w_bi[i * model->em_dim + j]) * grad_b[i]; } void evaluate(struct model_t *model, struct dataset_t *vali_data, int64_t batch_size, int64_t threads_n) { printf("evaluating...\n"); time_t eva_start, eva_end; eva_start = time(NULL); floatx *max_feas = (floatx *)malloc(model->em_dim * batch_size * sizeof(floatx)); int64_t *max_fea_indexs = (int64_t *)malloc(model->em_dim * batch_size * sizeof(int64_t)); floatx *max_bi_feas = (floatx *)malloc(model->em_dim * batch_size * sizeof(floatx)); int64_t *max_bi_fea_indexs = (int64_t *)malloc(2 * model->em_dim * batch_size * sizeof(int64_t)); floatx *softmax_feas = (floatx *)malloc(model->category_num * batch_size * sizeof(floatx)); int64_t *pre_labels = (int64_t *)malloc(batch_size * sizeof(int64_t)); int64_t *real_labels = (int64_t *)malloc(batch_size * sizeof(int64_t)); // 临界资源 floatx *cat_all = (floatx *)malloc(model->category_num * sizeof(floatx)); floatx *cat_true = (floatx *)malloc(model->category_num * sizeof(floatx)); for (int64_t i = 0; i < model->category_num; i++) { cat_all[i] = 0.; cat_true[i] = 0.; } for (int64_t batch_i = 0; batch_i < (vali_data->text_num + batch_size - 1) / batch_size; batch_i++) { int64_t real_batch_size = (vali_data->text_num - batch_i * batch_size) > batch_size ? batch_size : (vali_data->text_num - batch_i * batch_size); // 可以加速 #pragma omp parallel for schedule(dynamic) num_threads(threads_n) for (int64_t batch_j = 0; batch_j < real_batch_size; batch_j++) { int64_t text_i = (batch_i)*batch_size + batch_j; assert(text_i < vali_data->text_num); int64_t text_category = vali_data->text_categories[text_i]; // 长度为0的text,不计算梯度 // 会导致问题,比如梯度没有更新 // 应该在生成数据时避免 if (vali_data->text_lens[text_i] == 0) { printf("error: vali text length can not be zero.[text id: %ld]", text_i); exit(-1); } floatx *max_fea = &max_feas[batch_j * model->em_dim]; int64_t *max_fea_index = &max_fea_indexs[batch_j * model->em_dim]; floatx *max_bi_fea = &max_bi_feas[batch_j * model->em_dim]; int64_t *max_bi_fea_index = &max_bi_fea_indexs[2 * batch_j * model->em_dim]; floatx *softmax_fea = &softmax_feas[batch_j * model->category_num]; int64_t *pre_label = &pre_labels[batch_j]; int64_t *real_label = &real_labels[batch_j]; *real_label = text_category; forward(model, vali_data, text_i, max_fea, max_fea_index, max_bi_fea, max_bi_fea_index, softmax_fea); *pre_label = 0; floatx fea = softmax_fea[0]; for (int64_t c = 1; c < model->category_num; c++) { if (softmax_fea[c] > fea) { *pre_label = c; fea = softmax_fea[c]; } } } // 访问临界资源 for (int64_t batch_j = 0; batch_j < real_batch_size; batch_j++) { cat_all[real_labels[batch_j]] += 1; if (real_labels[batch_j] == pre_labels[batch_j]) cat_true[real_labels[batch_j]] += 1; } } floatx cat_all_sum = 0.; floatx cat_true_sum = 0.; for (int64_t k = 0; k < model->category_num; k++) { cat_all_sum += cat_all[k]; cat_true_sum += cat_true[k]; } printf("#samples: %.0f\n", cat_all_sum); printf("macro precision: %.5f\n", cat_true_sum / cat_all_sum); for (int64_t k = 0; k < model->category_num; k++) printf(" category #%ld precision: %.5f\n", k, cat_true[k] / cat_all[k]); free(max_feas); free(max_fea_indexs); free(max_bi_feas); free(max_bi_fea_indexs); free(softmax_feas); free(pre_labels); free(real_labels); free(cat_all); free(cat_true); eva_end = time(NULL); printf(" evaluating time: %lds\n", eva_end - eva_start); } void train_adam(struct model_t *model, struct dataset_t *train_data, struct dataset_t *vali_data, int64_t epochs, int64_t batch_size, int64_t threads_n) { printf("start training(Adam)...\n"); // omp_lock_t omplock; // omp_init_lock(&omplock); int64_t tmp, i, sel; floatx alpha = 0.001, beta1 = 0.9, beta2 = 0.999, epsilon = 1e-8; floatx beta1t = beta1; floatx beta2t = beta2; int64_t *shuffle_index = (int64_t *)malloc(train_data->text_num * sizeof(int64_t)); struct model_t adam_m, adam_v, gt; init_model(&adam_m, model->em_dim, model->vocab_num, model->category_num, 0); init_model(&adam_v, model->em_dim, model->vocab_num, model->category_num, 0); init_model(&gt, model->em_dim, model->vocab_num, model->category_num, 0); floatx *grads_em = (floatx *)malloc(model->em_dim * batch_size * sizeof(floatx)); floatx *grads_em_bi = (floatx *)malloc(model->em_dim * batch_size * sizeof(floatx)); floatx *grads_w = (floatx *)malloc(model->em_dim * model->category_num * batch_size * sizeof(floatx)); floatx *grads_w_bi = (floatx *)malloc(model->em_dim * model->category_num * batch_size * sizeof(floatx)); floatx *grads_b = (floatx *)malloc(model->category_num * batch_size * sizeof(floatx)); floatx *max_feas = (floatx *)malloc(model->em_dim * batch_size * sizeof(floatx)); int64_t *max_fea_indexs = (int64_t *)malloc(model->em_dim * batch_size * sizeof(int64_t)); floatx *max_bi_feas = (floatx *)malloc(model->em_dim * batch_size * sizeof(floatx)); int64_t *max_bi_fea_indexs = (int64_t *)malloc(2 * model->em_dim * batch_size * sizeof(int64_t)); floatx *softmax_feas = (floatx *)malloc(model->category_num * batch_size * sizeof(floatx)); floatx *losses = (floatx *)malloc(batch_size * sizeof(floatx)); printf("init grad end...\n"); for (i = 0; i < train_data->text_num; i++) shuffle_index[i] = i; for (int64_t epoch = 0; epoch < epochs; epoch++) { printf("#epoch: %ld\n", epoch); floatx s_loss = 0.; time_t epoch_start, epoch_end; // clock_t epoch_start, epoch_end; // shuffle for (i = 0; i < train_data->text_num; i++) { sel = rand() % (train_data->text_num - i) + i; tmp = shuffle_index[i]; shuffle_index[i] = shuffle_index[sel]; shuffle_index[sel] = tmp; } epoch_start = time(NULL); // epoch_start = clock(); for (int64_t batch_i = 0; batch_i < (train_data->text_num + batch_size - 1) / batch_size; batch_i++) { int64_t real_batch_size = (train_data->text_num - batch_i * batch_size) > batch_size ? batch_size : (train_data->text_num - batch_i * batch_size); // 可以加速 #pragma omp parallel for schedule(dynamic) num_threads(threads_n) for (int64_t batch_j = 0; batch_j < real_batch_size; batch_j++) { int64_t text_i = (batch_i)*batch_size + batch_j; assert(text_i < train_data->text_num); text_i = shuffle_index[text_i]; // 长度为0的text,不计算梯度 // 会导致问题,比如梯度没有更新 // 应该在生成数据时避免 if (train_data->text_lens[text_i] == 0) { printf("error: training text length can not be zero.[text id: %ld]", text_i); exit(-1); } floatx *grad_em = &grads_em[batch_j * model->em_dim]; floatx *grad_em_bi = &grads_em_bi[batch_j * model->em_dim]; floatx *grad_w = &grads_w[batch_j * model->em_dim * model->category_num]; floatx *grad_w_bi = &grads_w_bi[batch_j * model->em_dim * model->category_num]; floatx *grad_b = &grads_b[batch_j * model->category_num]; floatx *max_fea = &max_feas[batch_j * model->em_dim]; int64_t *max_fea_index = &max_fea_indexs[batch_j * model->em_dim]; floatx *max_bi_fea = &max_bi_feas[batch_j * model->em_dim]; int64_t *max_bi_fea_index = &max_bi_fea_indexs[2 * batch_j * model->em_dim]; floatx *softmax_fea = &softmax_feas[batch_j * model->category_num]; losses[batch_j] = forward(model, train_data, text_i, max_fea, max_fea_index, max_bi_fea, max_bi_fea_index, softmax_fea); backward(model, train_data, text_i, max_fea, max_bi_fea, softmax_fea, grad_em, grad_em_bi, grad_w, grad_w_bi, grad_b); } for (int64_t batch_j = 0; batch_j < real_batch_size; batch_j++) s_loss += losses[batch_j]; // 把多个batch的梯度累加起来 不可以加速,因为gt.em是临界资源 for (int64_t batch_j = 0; batch_j < real_batch_size; batch_j++) { for (int64_t batch_k = 0; batch_k < model->em_dim * model->category_num; batch_k++) gt.w[batch_k] += grads_w[batch_j * model->em_dim * model->category_num + batch_k] / (floatx)batch_size; // bi for (int64_t batch_k = 0; batch_k < model->em_dim * model->category_num; batch_k++) gt.w_bi[batch_k] += grads_w_bi[batch_j * model->em_dim * model->category_num + batch_k] / (floatx)batch_size; for (int64_t batch_k = 0; batch_k < model->category_num; batch_k++) gt.b[batch_k] += grads_b[batch_j * model->category_num + batch_k] / (floatx)batch_size; // em的grad 特殊对待 for (int64_t batch_k = 0; batch_k < model->em_dim; batch_k++) { int64_t em_index = max_fea_indexs[batch_j * model->em_dim + batch_k]; gt.em[em_index] += grads_em[batch_j * model->em_dim + batch_k] / (floatx)batch_size; // bi int64_t em_index0 = max_bi_fea_indexs[2 * batch_j * model->em_dim + 2 * batch_k]; int64_t em_index1 = max_bi_fea_indexs[2 * batch_j * model->em_dim + 2 * batch_k + 1]; gt.em_bi[em_index0] += grads_em_bi[batch_j * model->em_dim + batch_k] / (floatx)batch_size; gt.em_bi[em_index1] += grads_em_bi[batch_j * model->em_dim + batch_k] / (floatx)batch_size; } } // 计算m,v update param 可以加速 #pragma omp parallel for schedule(static) num_threads(threads_n) for (int64_t batch_k = 0; batch_k < model->em_dim * model->category_num; batch_k++) { adam_m.w[batch_k] = beta1 * adam_m.w[batch_k] + (1 - beta1) * gt.w[batch_k]; adam_v.w[batch_k] = beta2 * adam_v.w[batch_k] + (1 - beta2) * gt.w[batch_k] * gt.w[batch_k]; gt.w[batch_k] = 0.; floatx m_hat = adam_m.w[batch_k] / (1 - beta1t); floatx v_hat = adam_v.w[batch_k] / (1 - beta2t); model->w[batch_k] -= alpha * m_hat / ((floatx)sqrt((floatx)v_hat) + epsilon); // bi adam_m.w_bi[batch_k] = beta1 * adam_m.w_bi[batch_k] + (1 - beta1) * gt.w_bi[batch_k]; adam_v.w_bi[batch_k] = beta2 * adam_v.w_bi[batch_k] + (1 - beta2) * gt.w_bi[batch_k] * gt.w_bi[batch_k]; gt.w_bi[batch_k] = 0.; m_hat = adam_m.w_bi[batch_k] / (1 - beta1t); v_hat = adam_v.w_bi[batch_k] / (1 - beta2t); model->w_bi[batch_k] -= alpha * m_hat / ((floatx)sqrt((floatx)v_hat) + epsilon); } // 循环数量少,不用加速 for (int64_t batch_k = 0; batch_k < model->category_num; batch_k++) { adam_m.b[batch_k] = beta1 * adam_m.b[batch_k] + (1 - beta1) * gt.b[batch_k]; adam_v.b[batch_k] = beta2 * adam_v.b[batch_k] + (1 - beta2) * gt.b[batch_k] * gt.b[batch_k]; gt.b[batch_k] = 0.; floatx m_hat = adam_m.b[batch_k] / (1 - beta1t); floatx v_hat = adam_v.b[batch_k] / (1 - beta2t); model->b[batch_k] -= alpha * m_hat / ((floatx)sqrt((floatx)v_hat) + epsilon); } // adam_m,adam_v,model->em, gt.em是临界资源 for (int64_t batch_j = 0; batch_j < real_batch_size; batch_j++) { // em的grad 特殊对待 for (int64_t batch_k = 0; batch_k < model->em_dim; batch_k++) { int64_t em_index = max_fea_indexs[batch_j * model->em_dim + batch_k]; if (gt.em[em_index] != 0.) { adam_m.em[em_index] = beta1 * adam_m.em[em_index] + (1 - beta1) * gt.em[em_index]; adam_v.em[em_index] = beta2 * adam_v.em[em_index] + (1 - beta2) * gt.em[em_index] * gt.em[em_index]; gt.em[em_index] = 0.; floatx m_hat = adam_m.em[em_index] / (1 - beta1t); floatx v_hat = adam_v.em[em_index] / (1 - beta2t); model->em[em_index] -= alpha * m_hat / ((floatx)sqrt((floatx)v_hat) + epsilon); } // bi int64_t em_index0 = max_bi_fea_indexs[2 * batch_j * model->em_dim + 2 * batch_k]; int64_t em_index1 = max_bi_fea_indexs[2 * batch_j * model->em_dim + 2 * batch_k + 1]; if (gt.em_bi[em_index0] != 0.) { adam_m.em_bi[em_index0] = beta1 * adam_m.em_bi[em_index0] + (1 - beta1) * gt.em_bi[em_index0]; adam_v.em_bi[em_index0] = beta2 * adam_v.em_bi[em_index0] + (1 - beta2) * gt.em_bi[em_index0] * gt.em_bi[em_index0]; gt.em_bi[em_index0] = 0.; floatx m_hat = adam_m.em_bi[em_index0] / (1 - beta1t); floatx v_hat = adam_v.em_bi[em_index0] / (1 - beta2t); model->em_bi[em_index0] -= alpha * m_hat / ((floatx)sqrt((floatx)v_hat) + epsilon); } if (gt.em_bi[em_index1] != 0.) { adam_m.em_bi[em_index1] = beta1 * adam_m.em_bi[em_index1] + (1 - beta1) * gt.em_bi[em_index1]; adam_v.em_bi[em_index1] = beta2 * adam_v.em_bi[em_index1] + (1 - beta2) * gt.em_bi[em_index1] * gt.em_bi[em_index1]; gt.em_bi[em_index1] = 0.; floatx m_hat = adam_m.em_bi[em_index1] / (1 - beta1t); floatx v_hat = adam_v.em_bi[em_index1] / (1 - beta2t); model->em_bi[em_index1] -= alpha * m_hat / ((floatx)sqrt((floatx)v_hat) + epsilon); } } } beta1t *= beta1t; beta2t *= beta2t; } // end_batch epoch_end = time(NULL); // epoch_end = clock(); s_loss /= train_data->text_num; printf(" loss: %.4f\n", s_loss); printf(" time: %lds\n", epoch_end - epoch_start); // printf(" time: %.1fs\n", (double)(epoch_end - epoch_start)/CLOCKS_PER_SEC ); if (vali_data != NULL) { printf("evaluate vali data...\n"); evaluate(model, vali_data, batch_size, threads_n); } printf("\n"); } //end_epoch free(shuffle_index); free_model(&adam_m); free_model(&adam_v); free_model(&gt); free(grads_em); free(grads_em_bi); free(grads_w); free(grads_w_bi); free(grads_b); free(max_feas); free(max_fea_indexs); free(max_bi_feas); free(max_bi_fea_indexs); free(softmax_feas); free(losses); } void show(int64_t *a, int64_t n) { for (int64_t i = 0; i < n; i++) printf("%ld ", a[i]); printf("\n"); } int arg_helper(char *str, int argc, char **argv) { int pos; for (pos = 1; pos < argc; pos++) if (strcmp(str, argv[pos]) == 0) return pos; return -1; } void save_em(struct model_t *model, char *path, int64_t n) { FILE *fp = NULL; fp = fopen(path, "w"); if (fp == NULL) { perror("error"); exit(EXIT_FAILURE); } for (int64_t i = 0; i < n; i++) { int64_t pos = i * model->em_dim; for (int64_t j = 0; j < model->em_dim; j++) { if (j == model->em_dim - 1) { fprintf(fp, "%.8f\n", model->em[pos + j]); } else { fprintf(fp, "%.8f ", model->em[pos + j]); } } } fclose(fp); } int main(int argc, char **argv) { struct model_t model; struct dataset_t train_data, vali_data, test_data; int64_t em_dim = 200, vocab_num = 0, category_num = 0, em_len = 0; int64_t epochs = 10, batch_size = 2000, threads_n = 20; floatx lr = 0.5, limit_vocab=1.; char *train_data_path = NULL, *vali_data_path = NULL, *test_data_path = NULL, *em_path = NULL; int i; if ((i = arg_helper("-dim", argc, argv)) > 0) em_dim = (int64_t)atoi(argv[i + 1]); if ((i = arg_helper("-vocab", argc, argv)) > 0) vocab_num = (int64_t)atoi(argv[i + 1]); if ((i = arg_helper("-category", argc, argv)) > 0) category_num = (int64_t)atoi(argv[i + 1]); if ((i = arg_helper("-epoch", argc, argv)) > 0) epochs = (int64_t)atoi(argv[i + 1]); if ((i = arg_helper("-batch-size", argc, argv)) > 0) batch_size = (int64_t)atoi(argv[i + 1]); if ((i = arg_helper("-thread", argc, argv)) > 0) threads_n = (int64_t)atoi(argv[i + 1]); if ((i = arg_helper("-lr", argc, argv)) > 0) lr = (floatx)atof(argv[i + 1]); if ((i = arg_helper("-train", argc, argv)) > 0) train_data_path = argv[i + 1]; if ((i = arg_helper("-vali", argc, argv)) > 0) vali_data_path = argv[i + 1]; if ((i = arg_helper("-test", argc, argv)) > 0) test_data_path = argv[i + 1]; if ((i = arg_helper("-em-path", argc, argv)) > 0) em_path = argv[i + 1]; if ((i = arg_helper("-em-len", argc, argv)) > 0) em_len = (int64_t)atoi(argv[i + 1]); if ((i = arg_helper("-limit-vocab", argc, argv)) > 0) limit_vocab = (floatx)atof(argv[i + 1]); if (vocab_num == 0) { printf("error: miss -vocab"); exit(-1); } if (category_num == 0) { printf("error: miss -category"); exit(-1); } if (train_data_path == NULL) { printf("error: need train data!"); exit(-1); } init_model(&model, em_dim, vocab_num, category_num, 1); if (train_data_path != NULL) load_data(&train_data, train_data_path, (int64_t)(limit_vocab*vocab_num)); if (test_data_path != NULL) load_data(&test_data, test_data_path, (int64_t)(limit_vocab*vocab_num)); if (vali_data_path != NULL) load_data(&vali_data, vali_data_path, (int64_t)(limit_vocab*vocab_num)); if (vali_data_path != NULL) train_adam(&model, &train_data, &vali_data, epochs, batch_size, threads_n); else train_adam(&model, &train_data, NULL, epochs, batch_size, threads_n); if (test_data_path != NULL) { printf("evaluate test data...\n"); evaluate(&model, &test_data, batch_size, threads_n); } if (em_path != NULL) { printf("saving em...\n"); if (em_len == 0) em_len = model.vocab_num; save_em(&model, em_path, em_len); } free_model(&model); if (train_data_path != NULL) free_data(&train_data); if (test_data_path != NULL) free_data(&test_data); if (vali_data_path != NULL) free_data(&vali_data); return 0; }
par_csr_matop.c
/****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ #include "_hypre_utilities.h" #include "hypre_hopscotch_hash.h" #include "_hypre_parcsr_mv.h" #include "_hypre_lapack.h" #include "_hypre_blas.h" /* The following function was formerly part of hypre_ParMatmul but was removed so it can also be used for multiplication of Boolean matrices */ void hypre_ParMatmul_RowSizes( HYPRE_MemoryLocation memory_location, HYPRE_Int ** C_diag_i, HYPRE_Int ** C_offd_i, /*HYPRE_Int ** B_marker,*/ HYPRE_Int * A_diag_i, HYPRE_Int * A_diag_j, HYPRE_Int * A_offd_i, HYPRE_Int * A_offd_j, HYPRE_Int * B_diag_i, HYPRE_Int * B_diag_j, HYPRE_Int * B_offd_i, HYPRE_Int * B_offd_j, HYPRE_Int * B_ext_diag_i, HYPRE_Int * B_ext_diag_j, HYPRE_Int * B_ext_offd_i, HYPRE_Int * B_ext_offd_j, HYPRE_Int * map_B_to_C, HYPRE_Int *C_diag_size, HYPRE_Int *C_offd_size, HYPRE_Int num_rows_diag_A, HYPRE_Int num_cols_offd_A, HYPRE_Int allsquare, HYPRE_Int num_cols_diag_B, HYPRE_Int num_cols_offd_B, HYPRE_Int num_cols_offd_C ) { HYPRE_Int i1, i2, i3, jj2, jj3; HYPRE_Int jj_count_diag, jj_count_offd, jj_row_begin_diag, jj_row_begin_offd; HYPRE_Int start_indexing = 0; /* start indexing for C_data at 0 */ HYPRE_Int num_threads = hypre_NumThreads(); HYPRE_Int *jj_count_diag_array; HYPRE_Int *jj_count_offd_array; HYPRE_Int ii, size, rest; /* First pass begins here. Computes sizes of C rows. Arrays computed: C_diag_i, C_offd_i, B_marker Arrays needed: (11, all HYPRE_Int*) A_diag_i, A_diag_j, A_offd_i, A_offd_j, B_diag_i, B_diag_j, B_offd_i, B_offd_j, B_ext_i, B_ext_j, col_map_offd_B, col_map_offd_B, B_offd_i, B_offd_j, B_ext_i, B_ext_j, Scalars computed: C_diag_size, C_offd_size Scalars needed: num_rows_diag_A, num_rows_diag_A, num_cols_offd_A, allsquare, first_col_diag_B, n_cols_B, num_cols_offd_B, num_cols_diag_B */ *C_diag_i = hypre_CTAlloc(HYPRE_Int, num_rows_diag_A+1, memory_location); *C_offd_i = hypre_CTAlloc(HYPRE_Int, num_rows_diag_A+1, memory_location); jj_count_diag_array = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count_offd_array = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); /*----------------------------------------------------------------------- * Loop over rows of A *-----------------------------------------------------------------------*/ size = num_rows_diag_A/num_threads; rest = num_rows_diag_A - size*num_threads; #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(ii, i1, jj_row_begin_diag, jj_row_begin_offd, jj_count_diag, jj_count_offd, jj2, i2, jj3, i3) #endif /*for (ii=0; ii < num_threads; ii++)*/ { HYPRE_Int *B_marker = NULL; HYPRE_Int ns, ne; ii = hypre_GetThreadNum(); if (ii < rest) { ns = ii*size+ii; ne = (ii+1)*size+ii+1; } else { ns = ii*size+rest; ne = (ii+1)*size+rest; } jj_count_diag = start_indexing; jj_count_offd = start_indexing; if (num_cols_diag_B || num_cols_offd_C) B_marker = hypre_CTAlloc(HYPRE_Int, num_cols_diag_B+num_cols_offd_C, HYPRE_MEMORY_HOST); for (i1 = 0; i1 < num_cols_diag_B+num_cols_offd_C; i1++) B_marker[i1] = -1; for (i1 = ns; i1 < ne; i1++) { /*-------------------------------------------------------------------- * Set marker for diagonal entry, C_{i1,i1} (for square matrices). *--------------------------------------------------------------------*/ jj_row_begin_diag = jj_count_diag; jj_row_begin_offd = jj_count_offd; if ( allsquare ) { B_marker[i1] = jj_count_diag; jj_count_diag++; } /*----------------------------------------------------------------- * Loop over entries in row i1 of A_offd. *-----------------------------------------------------------------*/ if (num_cols_offd_A) { for (jj2 = A_offd_i[i1]; jj2 < A_offd_i[i1+1]; jj2++) { i2 = A_offd_j[jj2]; /*----------------------------------------------------------- * Loop over entries in row i2 of B_ext. *-----------------------------------------------------------*/ for (jj3 = B_ext_offd_i[i2]; jj3 < B_ext_offd_i[i2+1]; jj3++) { i3 = num_cols_diag_B+B_ext_offd_j[jj3]; /*-------------------------------------------------------- * Check B_marker to see that C_{i1,i3} has not already * been accounted for. If it has not, mark it and increment * counter. *--------------------------------------------------------*/ if (B_marker[i3] < jj_row_begin_offd) { B_marker[i3] = jj_count_offd; jj_count_offd++; } } for (jj3 = B_ext_diag_i[i2]; jj3 < B_ext_diag_i[i2+1]; jj3++) { i3 = B_ext_diag_j[jj3]; if (B_marker[i3] < jj_row_begin_diag) { B_marker[i3] = jj_count_diag; jj_count_diag++; } } } } /*----------------------------------------------------------------- * Loop over entries in row i1 of A_diag. *-----------------------------------------------------------------*/ for (jj2 = A_diag_i[i1]; jj2 < A_diag_i[i1+1]; jj2++) { i2 = A_diag_j[jj2]; /*----------------------------------------------------------- * Loop over entries in row i2 of B_diag. *-----------------------------------------------------------*/ for (jj3 = B_diag_i[i2]; jj3 < B_diag_i[i2+1]; jj3++) { i3 = B_diag_j[jj3]; /*-------------------------------------------------------- * Check B_marker to see that C_{i1,i3} has not already * been accounted for. If it has not, mark it and increment * counter. *--------------------------------------------------------*/ if (B_marker[i3] < jj_row_begin_diag) { B_marker[i3] = jj_count_diag; jj_count_diag++; } } /*----------------------------------------------------------- * Loop over entries in row i2 of B_offd. *-----------------------------------------------------------*/ if (num_cols_offd_B) { for (jj3 = B_offd_i[i2]; jj3 < B_offd_i[i2+1]; jj3++) { i3 = num_cols_diag_B+map_B_to_C[B_offd_j[jj3]]; /*-------------------------------------------------------- * Check B_marker to see that C_{i1,i3} has not already * been accounted for. If it has not, mark it and increment * counter. *--------------------------------------------------------*/ if (B_marker[i3] < jj_row_begin_offd) { B_marker[i3] = jj_count_offd; jj_count_offd++; } } } } /*-------------------------------------------------------------------- * Set C_diag_i and C_offd_i for this row. *--------------------------------------------------------------------*/ (*C_diag_i)[i1] = jj_row_begin_diag; (*C_offd_i)[i1] = jj_row_begin_offd; } jj_count_diag_array[ii] = jj_count_diag; jj_count_offd_array[ii] = jj_count_offd; hypre_TFree(B_marker, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (ii) { jj_count_diag = jj_count_diag_array[0]; jj_count_offd = jj_count_offd_array[0]; for (i1 = 1; i1 < ii; i1++) { jj_count_diag += jj_count_diag_array[i1]; jj_count_offd += jj_count_offd_array[i1]; } for (i1 = ns; i1 < ne; i1++) { (*C_diag_i)[i1] += jj_count_diag; (*C_offd_i)[i1] += jj_count_offd; } } else { (*C_diag_i)[num_rows_diag_A] = 0; (*C_offd_i)[num_rows_diag_A] = 0; for (i1 = 0; i1 < num_threads; i1++) { (*C_diag_i)[num_rows_diag_A] += jj_count_diag_array[i1]; (*C_offd_i)[num_rows_diag_A] += jj_count_offd_array[i1]; } } } /* end parallel loop */ /*----------------------------------------------------------------------- * Allocate C_diag_data and C_diag_j arrays. * Allocate C_offd_data and C_offd_j arrays. *-----------------------------------------------------------------------*/ *C_diag_size = (*C_diag_i)[num_rows_diag_A]; *C_offd_size = (*C_offd_i)[num_rows_diag_A]; hypre_TFree(jj_count_diag_array, HYPRE_MEMORY_HOST); hypre_TFree(jj_count_offd_array, HYPRE_MEMORY_HOST); /* End of First Pass */ } /*-------------------------------------------------------------------------- * hypre_ParMatmul : multiplies two ParCSRMatrices A and B and returns * the product in ParCSRMatrix C * Note that C does not own the partitionings since its row_starts * is owned by A and col_starts by B. *--------------------------------------------------------------------------*/ hypre_ParCSRMatrix *hypre_ParMatmul( hypre_ParCSRMatrix *A, hypre_ParCSRMatrix *B ) { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_MATMUL] -= hypre_MPI_Wtime(); #endif MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Complex *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Complex *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_BigInt *row_starts_A = hypre_ParCSRMatrixRowStarts(A); HYPRE_Int num_rows_diag_A = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int num_cols_diag_A = hypre_CSRMatrixNumCols(A_diag); HYPRE_Int num_cols_offd_A = hypre_CSRMatrixNumCols(A_offd); hypre_CSRMatrix *B_diag = hypre_ParCSRMatrixDiag(B); HYPRE_Complex *B_diag_data = hypre_CSRMatrixData(B_diag); HYPRE_Int *B_diag_i = hypre_CSRMatrixI(B_diag); HYPRE_Int *B_diag_j = hypre_CSRMatrixJ(B_diag); hypre_CSRMatrix *B_offd = hypre_ParCSRMatrixOffd(B); HYPRE_BigInt *col_map_offd_B = hypre_ParCSRMatrixColMapOffd(B); HYPRE_Complex *B_offd_data = hypre_CSRMatrixData(B_offd); HYPRE_Int *B_offd_i = hypre_CSRMatrixI(B_offd); HYPRE_Int *B_offd_j = hypre_CSRMatrixJ(B_offd); HYPRE_BigInt first_col_diag_B = hypre_ParCSRMatrixFirstColDiag(B); HYPRE_BigInt last_col_diag_B; HYPRE_BigInt *col_starts_B = hypre_ParCSRMatrixColStarts(B); HYPRE_Int num_rows_diag_B = hypre_CSRMatrixNumRows(B_diag); HYPRE_Int num_cols_diag_B = hypre_CSRMatrixNumCols(B_diag); HYPRE_Int num_cols_offd_B = hypre_CSRMatrixNumCols(B_offd); hypre_ParCSRMatrix *C; HYPRE_BigInt *col_map_offd_C; HYPRE_Int *map_B_to_C=NULL; hypre_CSRMatrix *C_diag; HYPRE_Complex *C_diag_data; HYPRE_Int *C_diag_i; HYPRE_Int *C_diag_j; hypre_CSRMatrix *C_offd; HYPRE_Complex *C_offd_data=NULL; HYPRE_Int *C_offd_i=NULL; HYPRE_Int *C_offd_j=NULL; HYPRE_Int C_diag_size; HYPRE_Int C_offd_size; HYPRE_Int num_cols_offd_C = 0; hypre_CSRMatrix *Bs_ext; HYPRE_Complex *Bs_ext_data; HYPRE_Int *Bs_ext_i; HYPRE_BigInt *Bs_ext_j; HYPRE_Complex *B_ext_diag_data; HYPRE_Int *B_ext_diag_i; HYPRE_Int *B_ext_diag_j; HYPRE_Int B_ext_diag_size; HYPRE_Complex *B_ext_offd_data; HYPRE_Int *B_ext_offd_i; HYPRE_Int *B_ext_offd_j; HYPRE_BigInt *B_big_offd_j = NULL; HYPRE_Int B_ext_offd_size; HYPRE_BigInt n_rows_A, n_cols_A; HYPRE_BigInt n_rows_B, n_cols_B; HYPRE_Int allsquare = 0; HYPRE_Int num_procs; HYPRE_Int *my_diag_array; HYPRE_Int *my_offd_array; HYPRE_Int max_num_threads; HYPRE_Complex zero = 0.0; HYPRE_MemoryLocation memory_location_A = hypre_ParCSRMatrixMemoryLocation(A); HYPRE_MemoryLocation memory_location_B = hypre_ParCSRMatrixMemoryLocation(B); /* RL: TODO cannot guarantee, maybe should never assert hypre_assert(memory_location_A == memory_location_B); */ /* RL: in the case of A=H, B=D, or A=D, B=H, let C = D, * not sure if this is the right thing to do. * Also, need something like this in other places * TODO */ HYPRE_MemoryLocation memory_location_C = hypre_max(memory_location_A, memory_location_B); n_rows_A = hypre_ParCSRMatrixGlobalNumRows(A); n_cols_A = hypre_ParCSRMatrixGlobalNumCols(A); n_rows_B = hypre_ParCSRMatrixGlobalNumRows(B); n_cols_B = hypre_ParCSRMatrixGlobalNumCols(B); max_num_threads = hypre_NumThreads(); my_diag_array = hypre_CTAlloc(HYPRE_Int, max_num_threads, HYPRE_MEMORY_HOST); my_offd_array = hypre_CTAlloc(HYPRE_Int, max_num_threads, HYPRE_MEMORY_HOST); if (n_cols_A != n_rows_B || num_cols_diag_A != num_rows_diag_B) { hypre_error_w_msg(HYPRE_ERROR_GENERIC," Error! Incompatible matrix dimensions!\n"); return NULL; } /* if globally C=A*B is square and locally C_diag should also be square */ if ( num_rows_diag_A == num_cols_diag_B && n_rows_A == n_cols_B ) { allsquare = 1; } /*----------------------------------------------------------------------- * Extract B_ext, i.e. portion of B that is stored on neighbor procs * and needed locally for matrix matrix product *-----------------------------------------------------------------------*/ hypre_MPI_Comm_size(comm, &num_procs); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX] -= hypre_MPI_Wtime(); #endif if (num_procs > 1) { /*--------------------------------------------------------------------- * If there exists no CommPkg for A, a CommPkg is generated using * equally load balanced partitionings within * hypre_ParCSRMatrixExtractBExt *--------------------------------------------------------------------*/ Bs_ext = hypre_ParCSRMatrixExtractBExt(B,A,1); Bs_ext_data = hypre_CSRMatrixData(Bs_ext); Bs_ext_i = hypre_CSRMatrixI(Bs_ext); Bs_ext_j = hypre_CSRMatrixBigJ(Bs_ext); } B_ext_diag_i = hypre_CTAlloc(HYPRE_Int, num_cols_offd_A+1, HYPRE_MEMORY_HOST); B_ext_offd_i = hypre_CTAlloc(HYPRE_Int, num_cols_offd_A+1, HYPRE_MEMORY_HOST); B_ext_diag_size = 0; B_ext_offd_size = 0; last_col_diag_B = first_col_diag_B + (HYPRE_BigInt)num_cols_diag_B -1; #ifdef HYPRE_CONCURRENT_HOPSCOTCH hypre_UnorderedBigIntSet set; #pragma omp parallel { HYPRE_Int size, rest, ii; HYPRE_Int ns, ne; HYPRE_Int i1, i, j; HYPRE_Int my_offd_size, my_diag_size; HYPRE_Int cnt_offd, cnt_diag; HYPRE_Int num_threads = hypre_NumActiveThreads(); size = num_cols_offd_A/num_threads; rest = num_cols_offd_A - size*num_threads; ii = hypre_GetThreadNum(); if (ii < rest) { ns = ii*size+ii; ne = (ii+1)*size+ii+1; } else { ns = ii*size+rest; ne = (ii+1)*size+rest; } my_diag_size = 0; my_offd_size = 0; for (i=ns; i < ne; i++) { B_ext_diag_i[i] = my_diag_size; B_ext_offd_i[i] = my_offd_size; for (j=Bs_ext_i[i]; j < Bs_ext_i[i+1]; j++) if (Bs_ext_j[j] < first_col_diag_B || Bs_ext_j[j] > last_col_diag_B) my_offd_size++; else my_diag_size++; } my_diag_array[ii] = my_diag_size; my_offd_array[ii] = my_offd_size; #pragma omp barrier if (ii) { my_diag_size = my_diag_array[0]; my_offd_size = my_offd_array[0]; for (i1 = 1; i1 < ii; i1++) { my_diag_size += my_diag_array[i1]; my_offd_size += my_offd_array[i1]; } for (i1 = ns; i1 < ne; i1++) { B_ext_diag_i[i1] += my_diag_size; B_ext_offd_i[i1] += my_offd_size; } } else { B_ext_diag_size = 0; B_ext_offd_size = 0; for (i1 = 0; i1 < num_threads; i1++) { B_ext_diag_size += my_diag_array[i1]; B_ext_offd_size += my_offd_array[i1]; } B_ext_diag_i[num_cols_offd_A] = B_ext_diag_size; B_ext_offd_i[num_cols_offd_A] = B_ext_offd_size; if (B_ext_diag_size) { B_ext_diag_j = hypre_CTAlloc(HYPRE_Int, B_ext_diag_size, HYPRE_MEMORY_HOST); B_ext_diag_data = hypre_CTAlloc(HYPRE_Complex, B_ext_diag_size, HYPRE_MEMORY_HOST); } if (B_ext_offd_size) { B_ext_offd_j = hypre_CTAlloc(HYPRE_Int, B_ext_offd_size, HYPRE_MEMORY_HOST); B_big_offd_j = hypre_CTAlloc(HYPRE_BigInt, B_ext_offd_size, HYPRE_MEMORY_HOST); B_ext_offd_data = hypre_CTAlloc(HYPRE_Complex, B_ext_offd_size, HYPRE_MEMORY_HOST); } hypre_UnorderedBigIntSetCreate(&set, B_ext_offd_size + num_cols_offd_B, 16*hypre_NumThreads()); } #pragma omp barrier cnt_offd = B_ext_offd_i[ns]; cnt_diag = B_ext_diag_i[ns]; for (i=ns; i < ne; i++) { for (j=Bs_ext_i[i]; j < Bs_ext_i[i+1]; j++) if (Bs_ext_j[j] < first_col_diag_B || Bs_ext_j[j] > last_col_diag_B) { hypre_UnorderedBigIntSetPut(&set, Bs_ext_j[j]); B_big_offd_j[cnt_offd] = Bs_ext_j[j]; //Bs_ext_j[cnt_offd] = Bs_ext_j[j]; B_ext_offd_data[cnt_offd++] = Bs_ext_data[j]; } else { B_ext_diag_j[cnt_diag] = (HYPRE_Int)(Bs_ext_j[j] - first_col_diag_B); B_ext_diag_data[cnt_diag++] = Bs_ext_data[j]; } } HYPRE_Int i_begin, i_end; hypre_GetSimpleThreadPartition(&i_begin, &i_end, num_cols_offd_B); for (i = i_begin; i < i_end; i++) { hypre_UnorderedBigIntSetPut(&set, col_map_offd_B[i]); } } /* omp parallel */ col_map_offd_C = hypre_UnorderedBigIntSetCopyToArray(&set, &num_cols_offd_C); hypre_UnorderedBigIntSetDestroy(&set); hypre_UnorderedBigIntMap col_map_offd_C_inverse; hypre_big_sort_and_create_inverse_map(col_map_offd_C, num_cols_offd_C, &col_map_offd_C, &col_map_offd_C_inverse); HYPRE_Int i, j; #pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE for (i = 0; i < num_cols_offd_A; i++) for (j=B_ext_offd_i[i]; j < B_ext_offd_i[i+1]; j++) //B_ext_offd_j[j] = hypre_UnorderedIntMapGet(&col_map_offd_C_inverse, B_ext_offd_j[j]); B_ext_offd_j[j] = hypre_UnorderedBigIntMapGet(&col_map_offd_C_inverse, B_big_offd_j[j]); if (num_cols_offd_C) { hypre_UnorderedBigIntMapDestroy(&col_map_offd_C_inverse); } hypre_TFree(my_diag_array, HYPRE_MEMORY_HOST); hypre_TFree(my_offd_array, HYPRE_MEMORY_HOST); if (num_cols_offd_B) { HYPRE_Int i; map_B_to_C = hypre_CTAlloc(HYPRE_Int, num_cols_offd_B, HYPRE_MEMORY_HOST); #pragma omp parallel private(i) { HYPRE_Int i_begin, i_end; hypre_GetSimpleThreadPartition(&i_begin, &i_end, num_cols_offd_C); HYPRE_Int cnt; if (i_end > i_begin) { cnt = hypre_BigLowerBound(col_map_offd_B, col_map_offd_B + (HYPRE_BigInt)num_cols_offd_B, col_map_offd_C[i_begin]) - col_map_offd_B; } for (i = i_begin; i < i_end && cnt < num_cols_offd_B; i++) { if (col_map_offd_C[i] == col_map_offd_B[cnt]) { map_B_to_C[cnt++] = i; } } } } if (num_procs > 1) { hypre_CSRMatrixDestroy(Bs_ext); Bs_ext = NULL; } #else /* !HYPRE_CONCURRENT_HOPSCOTCH */ HYPRE_BigInt *temp; #ifdef HYPRE_USING_OPENMP #pragma omp parallel #endif { HYPRE_Int size, rest, ii; HYPRE_Int ns, ne; HYPRE_Int i1, i, j; HYPRE_Int my_offd_size, my_diag_size; HYPRE_Int cnt_offd, cnt_diag; HYPRE_Int num_threads = hypre_NumActiveThreads(); size = num_cols_offd_A/num_threads; rest = num_cols_offd_A - size*num_threads; ii = hypre_GetThreadNum(); if (ii < rest) { ns = ii*size+ii; ne = (ii+1)*size+ii+1; } else { ns = ii*size+rest; ne = (ii+1)*size+rest; } my_diag_size = 0; my_offd_size = 0; for (i=ns; i < ne; i++) { B_ext_diag_i[i] = my_diag_size; B_ext_offd_i[i] = my_offd_size; for (j=Bs_ext_i[i]; j < Bs_ext_i[i+1]; j++) if (Bs_ext_j[j] < first_col_diag_B || Bs_ext_j[j] > last_col_diag_B) my_offd_size++; else my_diag_size++; } my_diag_array[ii] = my_diag_size; my_offd_array[ii] = my_offd_size; #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (ii) { my_diag_size = my_diag_array[0]; my_offd_size = my_offd_array[0]; for (i1 = 1; i1 < ii; i1++) { my_diag_size += my_diag_array[i1]; my_offd_size += my_offd_array[i1]; } for (i1 = ns; i1 < ne; i1++) { B_ext_diag_i[i1] += my_diag_size; B_ext_offd_i[i1] += my_offd_size; } } else { B_ext_diag_size = 0; B_ext_offd_size = 0; for (i1 = 0; i1 < num_threads; i1++) { B_ext_diag_size += my_diag_array[i1]; B_ext_offd_size += my_offd_array[i1]; } B_ext_diag_i[num_cols_offd_A] = B_ext_diag_size; B_ext_offd_i[num_cols_offd_A] = B_ext_offd_size; if (B_ext_diag_size) { B_ext_diag_j = hypre_CTAlloc(HYPRE_Int, B_ext_diag_size, HYPRE_MEMORY_HOST); B_ext_diag_data = hypre_CTAlloc(HYPRE_Complex, B_ext_diag_size, HYPRE_MEMORY_HOST); } if (B_ext_offd_size) { B_ext_offd_j = hypre_CTAlloc(HYPRE_Int, B_ext_offd_size, HYPRE_MEMORY_HOST); B_big_offd_j = hypre_CTAlloc(HYPRE_BigInt, B_ext_offd_size, HYPRE_MEMORY_HOST); B_ext_offd_data = hypre_CTAlloc(HYPRE_Complex, B_ext_offd_size, HYPRE_MEMORY_HOST); } if (B_ext_offd_size || num_cols_offd_B) temp = hypre_CTAlloc(HYPRE_BigInt, B_ext_offd_size+num_cols_offd_B, HYPRE_MEMORY_HOST); } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif cnt_offd = B_ext_offd_i[ns]; cnt_diag = B_ext_diag_i[ns]; for (i=ns; i < ne; i++) { for (j=Bs_ext_i[i]; j < Bs_ext_i[i+1]; j++) if (Bs_ext_j[j] < first_col_diag_B || Bs_ext_j[j] > last_col_diag_B) { temp[cnt_offd] = Bs_ext_j[j]; B_big_offd_j[cnt_offd] = Bs_ext_j[j]; //Bs_ext_j[cnt_offd] = Bs_ext_j[j]; B_ext_offd_data[cnt_offd++] = Bs_ext_data[j]; } else { B_ext_diag_j[cnt_diag] = (HYPRE_Int)(Bs_ext_j[j] - first_col_diag_B); B_ext_diag_data[cnt_diag++] = Bs_ext_data[j]; } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (ii == 0) { HYPRE_Int cnt; if (num_procs > 1) { hypre_CSRMatrixDestroy(Bs_ext); Bs_ext = NULL; } cnt = 0; if (B_ext_offd_size || num_cols_offd_B) { cnt = B_ext_offd_size; for (i=0; i < num_cols_offd_B; i++) temp[cnt++] = col_map_offd_B[i]; if (cnt) { HYPRE_BigInt value; hypre_BigQsort0(temp, 0, cnt-1); num_cols_offd_C = 1; value = temp[0]; for (i=1; i < cnt; i++) { if (temp[i] > value) { value = temp[i]; temp[num_cols_offd_C++] = value; } } } if (num_cols_offd_C) col_map_offd_C = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd_C, HYPRE_MEMORY_HOST); for (i=0; i < num_cols_offd_C; i++) col_map_offd_C[i] = temp[i]; hypre_TFree(temp, HYPRE_MEMORY_HOST); } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif for (i=ns; i < ne; i++) for (j=B_ext_offd_i[i]; j < B_ext_offd_i[i+1]; j++) B_ext_offd_j[j] = hypre_BigBinarySearch(col_map_offd_C, B_big_offd_j[j], //B_ext_offd_j[j] = hypre_BigBinarySearch(col_map_offd_C, Bs_ext_j[j], num_cols_offd_C); } /* end parallel region */ hypre_TFree(B_big_offd_j, HYPRE_MEMORY_HOST); hypre_TFree(my_diag_array, HYPRE_MEMORY_HOST); hypre_TFree(my_offd_array, HYPRE_MEMORY_HOST); if (num_cols_offd_B) { HYPRE_Int i, cnt; map_B_to_C = hypre_CTAlloc(HYPRE_Int, num_cols_offd_B, HYPRE_MEMORY_HOST); cnt = 0; for (i=0; i < num_cols_offd_C; i++) if (col_map_offd_C[i] == col_map_offd_B[cnt]) { map_B_to_C[cnt++] = i; if (cnt == num_cols_offd_B) break; } } #endif /* !HYPRE_CONCURRENT_HOPSCOTCH */ #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX] += hypre_MPI_Wtime(); #endif hypre_ParMatmul_RowSizes( /*&C_diag_i, &C_offd_i, &B_marker,*/ memory_location_C, &C_diag_i, &C_offd_i, A_diag_i, A_diag_j, A_offd_i, A_offd_j, B_diag_i, B_diag_j, B_offd_i, B_offd_j, B_ext_diag_i, B_ext_diag_j, B_ext_offd_i, B_ext_offd_j, map_B_to_C, &C_diag_size, &C_offd_size, num_rows_diag_A, num_cols_offd_A, allsquare, num_cols_diag_B, num_cols_offd_B, num_cols_offd_C ); /*----------------------------------------------------------------------- * Allocate C_diag_data and C_diag_j arrays. * Allocate C_offd_data and C_offd_j arrays. *-----------------------------------------------------------------------*/ last_col_diag_B = first_col_diag_B + (HYPRE_BigInt)num_cols_diag_B - 1; C_diag_data = hypre_CTAlloc(HYPRE_Complex, C_diag_size, memory_location_C); C_diag_j = hypre_CTAlloc(HYPRE_Int, C_diag_size, memory_location_C); if (C_offd_size) { C_offd_data = hypre_CTAlloc(HYPRE_Complex, C_offd_size, memory_location_C); C_offd_j = hypre_CTAlloc(HYPRE_Int, C_offd_size, memory_location_C); } /*----------------------------------------------------------------------- * Second Pass: Fill in C_diag_data and C_diag_j. * Second Pass: Fill in C_offd_data and C_offd_j. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Initialize some stuff. *-----------------------------------------------------------------------*/ #ifdef HYPRE_USING_OPENMP #pragma omp parallel #endif { HYPRE_Int *B_marker = NULL; HYPRE_Int ns, ne, size, rest, ii; HYPRE_Int i1, i2, i3, jj2, jj3; HYPRE_Int jj_row_begin_diag, jj_count_diag; HYPRE_Int jj_row_begin_offd, jj_count_offd; HYPRE_Int num_threads; HYPRE_Complex a_entry; /*, a_b_product;*/ ii = hypre_GetThreadNum(); num_threads = hypre_NumActiveThreads(); size = num_rows_diag_A/num_threads; rest = num_rows_diag_A - size*num_threads; if (ii < rest) { ns = ii*size+ii; ne = (ii+1)*size+ii+1; } else { ns = ii*size+rest; ne = (ii+1)*size+rest; } jj_count_diag = C_diag_i[ns]; jj_count_offd = C_offd_i[ns]; if (num_cols_diag_B || num_cols_offd_C) { B_marker = hypre_CTAlloc(HYPRE_Int, num_cols_diag_B+num_cols_offd_C, HYPRE_MEMORY_HOST); } for (i1 = 0; i1 < num_cols_diag_B+num_cols_offd_C; i1++) { B_marker[i1] = -1; } /*----------------------------------------------------------------------- * Loop over interior c-points. *-----------------------------------------------------------------------*/ for (i1 = ns; i1 < ne; i1++) { /*-------------------------------------------------------------------- * Create diagonal entry, C_{i1,i1} *--------------------------------------------------------------------*/ jj_row_begin_diag = jj_count_diag; jj_row_begin_offd = jj_count_offd; if ( allsquare ) { B_marker[i1] = jj_count_diag; C_diag_data[jj_count_diag] = zero; C_diag_j[jj_count_diag] = i1; jj_count_diag++; } /*----------------------------------------------------------------- * Loop over entries in row i1 of A_offd. *-----------------------------------------------------------------*/ if (num_cols_offd_A) { for (jj2 = A_offd_i[i1]; jj2 < A_offd_i[i1+1]; jj2++) { i2 = A_offd_j[jj2]; a_entry = A_offd_data[jj2]; /*----------------------------------------------------------- * Loop over entries in row i2 of B_ext. *-----------------------------------------------------------*/ for (jj3 = B_ext_offd_i[i2]; jj3 < B_ext_offd_i[i2+1]; jj3++) { i3 = num_cols_diag_B+B_ext_offd_j[jj3]; /*-------------------------------------------------------- * Check B_marker to see that C_{i1,i3} has not already * been accounted for. If it has not, create a new entry. * If it has, add new contribution. *--------------------------------------------------------*/ if (B_marker[i3] < jj_row_begin_offd) { B_marker[i3] = jj_count_offd; C_offd_data[jj_count_offd] = a_entry*B_ext_offd_data[jj3]; C_offd_j[jj_count_offd] = i3-num_cols_diag_B; jj_count_offd++; } else C_offd_data[B_marker[i3]] += a_entry*B_ext_offd_data[jj3]; } for (jj3 = B_ext_diag_i[i2]; jj3 < B_ext_diag_i[i2+1]; jj3++) { i3 = B_ext_diag_j[jj3]; if (B_marker[i3] < jj_row_begin_diag) { B_marker[i3] = jj_count_diag; C_diag_data[jj_count_diag] = a_entry*B_ext_diag_data[jj3]; C_diag_j[jj_count_diag] = i3; jj_count_diag++; } else C_diag_data[B_marker[i3]] += a_entry*B_ext_diag_data[jj3]; } } } /*----------------------------------------------------------------- * Loop over entries in row i1 of A_diag. *-----------------------------------------------------------------*/ for (jj2 = A_diag_i[i1]; jj2 < A_diag_i[i1+1]; jj2++) { i2 = A_diag_j[jj2]; a_entry = A_diag_data[jj2]; /*----------------------------------------------------------- * Loop over entries in row i2 of B_diag. *-----------------------------------------------------------*/ for (jj3 = B_diag_i[i2]; jj3 < B_diag_i[i2+1]; jj3++) { i3 = B_diag_j[jj3]; /*-------------------------------------------------------- * Check B_marker to see that C_{i1,i3} has not already * been accounted for. If it has not, create a new entry. * If it has, add new contribution. *--------------------------------------------------------*/ if (B_marker[i3] < jj_row_begin_diag) { B_marker[i3] = jj_count_diag; C_diag_data[jj_count_diag] = a_entry*B_diag_data[jj3]; C_diag_j[jj_count_diag] = i3; jj_count_diag++; } else { C_diag_data[B_marker[i3]] += a_entry*B_diag_data[jj3]; } } if (num_cols_offd_B) { for (jj3 = B_offd_i[i2]; jj3 < B_offd_i[i2+1]; jj3++) { i3 = num_cols_diag_B+map_B_to_C[B_offd_j[jj3]]; /*-------------------------------------------------------- * Check B_marker to see that C_{i1,i3} has not already * been accounted for. If it has not, create a new entry. * If it has, add new contribution. *--------------------------------------------------------*/ if (B_marker[i3] < jj_row_begin_offd) { B_marker[i3] = jj_count_offd; C_offd_data[jj_count_offd] = a_entry*B_offd_data[jj3]; C_offd_j[jj_count_offd] = i3-num_cols_diag_B; jj_count_offd++; } else { C_offd_data[B_marker[i3]] += a_entry*B_offd_data[jj3]; } } } } } hypre_TFree(B_marker, HYPRE_MEMORY_HOST); } /*end parallel region */ C = hypre_ParCSRMatrixCreate(comm, n_rows_A, n_cols_B, row_starts_A, col_starts_B, num_cols_offd_C, C_diag_size, C_offd_size); /* Note that C does not own the partitionings */ hypre_ParCSRMatrixSetRowStartsOwner(C, 0); hypre_ParCSRMatrixSetColStartsOwner(C, 0); C_diag = hypre_ParCSRMatrixDiag(C); hypre_CSRMatrixData(C_diag) = C_diag_data; hypre_CSRMatrixI(C_diag) = C_diag_i; hypre_CSRMatrixJ(C_diag) = C_diag_j; C_offd = hypre_ParCSRMatrixOffd(C); hypre_CSRMatrixI(C_offd) = C_offd_i; hypre_ParCSRMatrixOffd(C) = C_offd; if (num_cols_offd_C) { hypre_CSRMatrixData(C_offd) = C_offd_data; hypre_CSRMatrixJ(C_offd) = C_offd_j; hypre_ParCSRMatrixColMapOffd(C) = col_map_offd_C; } hypre_CSRMatrixMemoryLocation(C_diag) = memory_location_C; hypre_CSRMatrixMemoryLocation(C_offd) = memory_location_C; /*----------------------------------------------------------------------- * Free various arrays *-----------------------------------------------------------------------*/ hypre_TFree(B_ext_diag_i, HYPRE_MEMORY_HOST); if (B_ext_diag_size) { hypre_TFree(B_ext_diag_j, HYPRE_MEMORY_HOST); hypre_TFree(B_ext_diag_data, HYPRE_MEMORY_HOST); } hypre_TFree(B_ext_offd_i, HYPRE_MEMORY_HOST); if (B_ext_offd_size) { hypre_TFree(B_ext_offd_j, HYPRE_MEMORY_HOST); hypre_TFree(B_ext_offd_data, HYPRE_MEMORY_HOST); } if (num_cols_offd_B) hypre_TFree(map_B_to_C, HYPRE_MEMORY_HOST); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_MATMUL] += hypre_MPI_Wtime(); #endif return C; } /* The following function was formerly part of hypre_ParCSRMatrixExtractBExt but the code was removed so it can be used for a corresponding function for Boolean matrices JSP: to allow communication overlapping, it returns comm_handle_idx and comm_handle_data. Before accessing B, they should be destroyed (including send_data contained in the comm_handle). */ void hypre_ParCSRMatrixExtractBExt_Arrays_Overlap( HYPRE_Int ** pB_ext_i, HYPRE_BigInt ** pB_ext_j, HYPRE_Complex ** pB_ext_data, HYPRE_BigInt ** pB_ext_row_map, HYPRE_Int * num_nonzeros, HYPRE_Int data, HYPRE_Int find_row_map, MPI_Comm comm, hypre_ParCSRCommPkg * comm_pkg, HYPRE_Int num_cols_B, HYPRE_Int num_recvs, HYPRE_Int num_sends, HYPRE_BigInt first_col_diag, HYPRE_BigInt * row_starts, HYPRE_Int * recv_vec_starts, HYPRE_Int * send_map_starts, HYPRE_Int * send_map_elmts, HYPRE_Int * diag_i, HYPRE_Int * diag_j, HYPRE_Int * offd_i, HYPRE_Int * offd_j, HYPRE_BigInt * col_map_offd, HYPRE_Real * diag_data, HYPRE_Real * offd_data, hypre_ParCSRCommHandle **comm_handle_idx, hypre_ParCSRCommHandle **comm_handle_data, HYPRE_Int *CF_marker, HYPRE_Int *CF_marker_offd, HYPRE_Int skip_fine, /* 1 if only coarse points are needed */ HYPRE_Int skip_same_sign /* 1 if only points that have the same sign are needed */ // extended based long range interpolation: skip_fine = 1, skip_same_sign = 0 for S matrix, skip_fine = 1, skip_same_sign = 1 for A matrix // other interpolation: skip_fine = 0, skip_same_sign = 0 ) { hypre_ParCSRCommHandle *comm_handle, *row_map_comm_handle = NULL; hypre_ParCSRCommPkg *tmp_comm_pkg; HYPRE_Int *B_int_i; HYPRE_BigInt *B_int_j; HYPRE_Int *B_ext_i; HYPRE_BigInt * B_ext_j; HYPRE_Complex * B_ext_data; HYPRE_Complex * B_int_data; HYPRE_BigInt * B_int_row_map; HYPRE_BigInt * B_ext_row_map; HYPRE_Int num_procs, my_id; HYPRE_Int *jdata_recv_vec_starts; HYPRE_Int *jdata_send_map_starts; HYPRE_Int i, j, k; HYPRE_Int start_index; /*HYPRE_Int jrow;*/ HYPRE_Int num_rows_B_ext; HYPRE_Int *prefix_sum_workspace; hypre_MPI_Comm_size(comm,&num_procs); hypre_MPI_Comm_rank(comm,&my_id); HYPRE_BigInt first_row_index = row_starts[0]; num_rows_B_ext = recv_vec_starts[num_recvs]; if ( num_rows_B_ext < 0 ) { /* no B_ext, no communication */ *pB_ext_i = NULL; *pB_ext_j = NULL; if ( data ) *pB_ext_data = NULL; if ( find_row_map ) *pB_ext_row_map = NULL; *num_nonzeros = 0; return; }; B_int_i = hypre_CTAlloc(HYPRE_Int, send_map_starts[num_sends]+1, HYPRE_MEMORY_HOST); B_ext_i = hypre_CTAlloc(HYPRE_Int, num_rows_B_ext+1, HYPRE_MEMORY_HOST); *pB_ext_i = B_ext_i; if ( find_row_map ) { B_int_row_map = hypre_CTAlloc( HYPRE_BigInt, send_map_starts[num_sends]+1 , HYPRE_MEMORY_HOST); B_ext_row_map = hypre_CTAlloc( HYPRE_BigInt, num_rows_B_ext+1 , HYPRE_MEMORY_HOST); *pB_ext_row_map = B_ext_row_map; }; /*-------------------------------------------------------------------------- * generate B_int_i through adding number of row-elements of offd and diag * for corresponding rows. B_int_i[j+1] contains the number of elements of * a row j (which is determined through send_map_elmts) *--------------------------------------------------------------------------*/ jdata_send_map_starts = hypre_CTAlloc(HYPRE_Int, num_sends+1, HYPRE_MEMORY_HOST); jdata_recv_vec_starts = hypre_CTAlloc(HYPRE_Int, num_recvs+1, HYPRE_MEMORY_HOST); jdata_send_map_starts[0] = B_int_i[0] = 0; /*HYPRE_Int prefix_sum_workspace[(hypre_NumThreads() + 1)*num_sends];*/ prefix_sum_workspace = hypre_TAlloc(HYPRE_Int, (hypre_NumThreads() + 1)*num_sends, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(i,j,k) #endif { /*HYPRE_Int counts[num_sends];*/ HYPRE_Int *counts; counts = hypre_TAlloc(HYPRE_Int, num_sends, HYPRE_MEMORY_HOST); for (i=0; i < num_sends; i++) { HYPRE_Int j_begin, j_end; hypre_GetSimpleThreadPartition(&j_begin, &j_end, send_map_starts[i + 1] - send_map_starts[i]); j_begin += send_map_starts[i]; j_end += send_map_starts[i]; HYPRE_Int count = 0; if (skip_fine && skip_same_sign) { for (j = j_begin; j < j_end; j++) { HYPRE_Int jrow = send_map_elmts[j]; HYPRE_Int len = 0; if (diag_data[diag_i[jrow]] >= 0) { for (k = diag_i[jrow] + 1; k < diag_i[jrow + 1]; k++) { if (diag_data[k] < 0 && CF_marker[diag_j[k]] >= 0) len++; } for (k = offd_i[jrow]; k < offd_i[jrow + 1]; k++) { if (offd_data[k] < 0) len++; } } else { for (k = diag_i[jrow] + 1; k < diag_i[jrow + 1]; k++) { if (diag_data[k] > 0 && CF_marker[diag_j[k]] >= 0) len++; } for (k = offd_i[jrow]; k < offd_i[jrow + 1]; k++) { if (offd_data[k] > 0) len++; } } B_int_i[j + 1] = len; count += len; } } else if (skip_fine) { for (j = j_begin; j < j_end; j++) { HYPRE_Int jrow = send_map_elmts[j]; HYPRE_Int len = 0; for (k = diag_i[jrow]; k < diag_i[jrow + 1]; k++) { if (CF_marker[diag_j[k]] >= 0) len++; } for (k = offd_i[jrow]; k < offd_i[jrow + 1]; k++) { if (CF_marker_offd[offd_j[k]] >= 0) len++; } B_int_i[j + 1] = len; count += len; } } else { for (j = j_begin; j < j_end; j++) { HYPRE_Int jrow = send_map_elmts[j]; HYPRE_Int len = diag_i[jrow + 1] - diag_i[jrow]; len += offd_i[jrow + 1] - offd_i[jrow]; B_int_i[j + 1] = len; count += len; } } if (find_row_map) { for (j = j_begin; j < j_end; j++) { HYPRE_Int jrow = send_map_elmts[j]; B_int_row_map[j] = (HYPRE_BigInt)jrow + first_row_index; } } counts[i] = count; } hypre_prefix_sum_multiple(counts, jdata_send_map_starts + 1, num_sends, prefix_sum_workspace); #ifdef HYPRE_USING_OPENMP #pragma omp master #endif { for (i = 1; i < num_sends; i++) { jdata_send_map_starts[i + 1] += jdata_send_map_starts[i]; } /*-------------------------------------------------------------------------- * initialize communication *--------------------------------------------------------------------------*/ comm_handle = hypre_ParCSRCommHandleCreate(11,comm_pkg, &B_int_i[1],&(B_ext_i[1]) ); if ( find_row_map ) { /* scatter/gather B_int row numbers to form array of B_ext row numbers */ row_map_comm_handle = hypre_ParCSRCommHandleCreate (21,comm_pkg, B_int_row_map, B_ext_row_map ); } B_int_j = hypre_TAlloc(HYPRE_BigInt, jdata_send_map_starts[num_sends], HYPRE_MEMORY_HOST); if (data) B_int_data = hypre_TAlloc(HYPRE_Complex, jdata_send_map_starts[num_sends], HYPRE_MEMORY_HOST); } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif for (i=0; i < num_sends; i++) { HYPRE_Int j_begin, j_end; hypre_GetSimpleThreadPartition(&j_begin, &j_end, send_map_starts[i + 1] - send_map_starts[i]); j_begin += send_map_starts[i]; j_end += send_map_starts[i]; HYPRE_Int count = counts[i] + jdata_send_map_starts[i]; if (data) { if (skip_same_sign && skip_fine) { for (j = j_begin; j < j_end; j++) { HYPRE_Int jrow = send_map_elmts[j]; /*HYPRE_Int count_begin = count;*/ if (diag_data[diag_i[jrow]] >= 0) { for (k = diag_i[jrow] + 1; k < diag_i[jrow + 1]; k++) { if (diag_data[k] < 0 && CF_marker[diag_j[k]] >= 0) { B_int_j[count] = (HYPRE_BigInt)diag_j[k]+first_col_diag; B_int_data[count] = diag_data[k]; count++; } } for (k = offd_i[jrow]; k < offd_i[jrow + 1]; k++) { HYPRE_Int c = offd_j[k]; HYPRE_BigInt c_global = col_map_offd[c]; if (offd_data[k] < 0) { B_int_j[count] = c_global; B_int_data[count] = offd_data[k]; count++; } } } else { for (k = diag_i[jrow] + 1; k < diag_i[jrow + 1]; k++) { if (diag_data[k] > 0 && CF_marker[diag_j[k]] >= 0) { B_int_j[count] = (HYPRE_BigInt)diag_j[k]+first_col_diag; B_int_data[count] = diag_data[k]; count++; } } for (k = offd_i[jrow]; k < offd_i[jrow + 1]; k++) { HYPRE_Int c = offd_j[k]; HYPRE_BigInt c_global = col_map_offd[c]; if (offd_data[k] > 0) { B_int_j[count] = c_global; B_int_data[count] = offd_data[k]; count++; } } } } } else { for (j = j_begin; j < j_end; ++j) { HYPRE_Int jrow = send_map_elmts[j]; for (k=diag_i[jrow]; k < diag_i[jrow+1]; k++) { B_int_j[count] = (HYPRE_BigInt)diag_j[k]+first_col_diag; B_int_data[count] = diag_data[k]; count++; } for (k=offd_i[jrow]; k < offd_i[jrow+1]; k++) { B_int_j[count] = col_map_offd[offd_j[k]]; B_int_data[count] = offd_data[k]; count++; } } } } // data else { if (skip_fine) { for (j = j_begin; j < j_end; j++) { HYPRE_Int jrow = send_map_elmts[j]; for (k = diag_i[jrow]; k < diag_i[jrow + 1]; k++) { if (CF_marker[diag_j[k]] >= 0) { B_int_j[count] = (HYPRE_BigInt)diag_j[k] + first_col_diag; count++; } } for (k = offd_i[jrow]; k < offd_i[jrow + 1]; k++) { if (CF_marker_offd[offd_j[k]] >= 0) { B_int_j[count] = col_map_offd[offd_j[k]]; count++; } } } } else { for (j = j_begin; j < j_end; ++j) { HYPRE_Int jrow = send_map_elmts[j]; for (k=diag_i[jrow]; k < diag_i[jrow+1]; k++) { B_int_j[count] = (HYPRE_BigInt)diag_j[k]+first_col_diag; count++; } for (k=offd_i[jrow]; k < offd_i[jrow+1]; k++) { B_int_j[count] = col_map_offd[offd_j[k]]; count++; } } } } // !data } /* for each send target */ hypre_TFree(counts, HYPRE_MEMORY_HOST); } /* omp parallel. JSP: this takes most of time in this function */ hypre_TFree(prefix_sum_workspace, HYPRE_MEMORY_HOST); tmp_comm_pkg = hypre_CTAlloc(hypre_ParCSRCommPkg, 1, HYPRE_MEMORY_HOST); hypre_ParCSRCommPkgComm(tmp_comm_pkg) = comm; hypre_ParCSRCommPkgNumSends(tmp_comm_pkg) = num_sends; hypre_ParCSRCommPkgNumRecvs(tmp_comm_pkg) = num_recvs; hypre_ParCSRCommPkgSendProcs(tmp_comm_pkg) = hypre_ParCSRCommPkgSendProcs(comm_pkg); hypre_ParCSRCommPkgRecvProcs(tmp_comm_pkg) = hypre_ParCSRCommPkgRecvProcs(comm_pkg); hypre_ParCSRCommPkgSendMapStarts(tmp_comm_pkg) = jdata_send_map_starts; hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; /*-------------------------------------------------------------------------- * after communication exchange B_ext_i[j+1] contains the number of elements * of a row j ! * evaluate B_ext_i and compute *num_nonzeros for B_ext *--------------------------------------------------------------------------*/ for (i=0; i < num_recvs; i++) for (j = recv_vec_starts[i]; j < recv_vec_starts[i+1]; j++) B_ext_i[j+1] += B_ext_i[j]; *num_nonzeros = B_ext_i[num_rows_B_ext]; *pB_ext_j = hypre_TAlloc(HYPRE_BigInt, *num_nonzeros, HYPRE_MEMORY_HOST); B_ext_j = *pB_ext_j; if (data) { *pB_ext_data = hypre_TAlloc(HYPRE_Complex, *num_nonzeros, HYPRE_MEMORY_HOST); B_ext_data = *pB_ext_data; }; for (i=0; i < num_recvs; i++) { start_index = B_ext_i[recv_vec_starts[i]]; *num_nonzeros = B_ext_i[recv_vec_starts[i+1]]-start_index; jdata_recv_vec_starts[i+1] = B_ext_i[recv_vec_starts[i+1]]; } hypre_ParCSRCommPkgRecvVecStarts(tmp_comm_pkg) = jdata_recv_vec_starts; *comm_handle_idx = hypre_ParCSRCommHandleCreate(21,tmp_comm_pkg,B_int_j,B_ext_j); if (data) { *comm_handle_data = hypre_ParCSRCommHandleCreate(1,tmp_comm_pkg,B_int_data, B_ext_data); } if (row_map_comm_handle) { hypre_ParCSRCommHandleDestroy(row_map_comm_handle); row_map_comm_handle = NULL; } hypre_TFree(jdata_send_map_starts, HYPRE_MEMORY_HOST); hypre_TFree(jdata_recv_vec_starts, HYPRE_MEMORY_HOST); hypre_TFree(tmp_comm_pkg, HYPRE_MEMORY_HOST); hypre_TFree(B_int_i, HYPRE_MEMORY_HOST); if ( find_row_map ) hypre_TFree(B_int_row_map, HYPRE_MEMORY_HOST); /* end generic part */ } void hypre_ParCSRMatrixExtractBExt_Arrays( HYPRE_Int ** pB_ext_i, HYPRE_BigInt ** pB_ext_j, HYPRE_Complex ** pB_ext_data, HYPRE_BigInt ** pB_ext_row_map, HYPRE_Int * num_nonzeros, HYPRE_Int data, HYPRE_Int find_row_map, MPI_Comm comm, hypre_ParCSRCommPkg * comm_pkg, HYPRE_Int num_cols_B, HYPRE_Int num_recvs, HYPRE_Int num_sends, HYPRE_BigInt first_col_diag, HYPRE_BigInt * row_starts, HYPRE_Int * recv_vec_starts, HYPRE_Int * send_map_starts, HYPRE_Int * send_map_elmts, HYPRE_Int * diag_i, HYPRE_Int * diag_j, HYPRE_Int * offd_i, HYPRE_Int * offd_j, HYPRE_BigInt * col_map_offd, HYPRE_Real * diag_data, HYPRE_Real * offd_data ) { hypre_ParCSRCommHandle *comm_handle_idx, *comm_handle_data; hypre_ParCSRMatrixExtractBExt_Arrays_Overlap( pB_ext_i, pB_ext_j, pB_ext_data, pB_ext_row_map, num_nonzeros, data, find_row_map, comm, comm_pkg, num_cols_B, num_recvs, num_sends, first_col_diag, row_starts, recv_vec_starts, send_map_starts, send_map_elmts, diag_i, diag_j, offd_i, offd_j, col_map_offd, diag_data, offd_data, &comm_handle_idx, &comm_handle_data, NULL, NULL, 0, 0); HYPRE_Int *send_idx = (HYPRE_Int *)comm_handle_idx->send_data; hypre_ParCSRCommHandleDestroy(comm_handle_idx); hypre_TFree(send_idx, HYPRE_MEMORY_HOST); if (data) { HYPRE_Real *send_data = (HYPRE_Real *)comm_handle_data->send_data; hypre_ParCSRCommHandleDestroy(comm_handle_data); hypre_TFree(send_data, HYPRE_MEMORY_HOST); } } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixExtractBExt : extracts rows from B which are located on * other processors and needed for multiplication with A locally. The rows * are returned as CSRMatrix. *--------------------------------------------------------------------------*/ hypre_CSRMatrix * hypre_ParCSRMatrixExtractBExt_Overlap( hypre_ParCSRMatrix *B, hypre_ParCSRMatrix *A, HYPRE_Int data, hypre_ParCSRCommHandle **comm_handle_idx, hypre_ParCSRCommHandle **comm_handle_data, HYPRE_Int *CF_marker, HYPRE_Int *CF_marker_offd, HYPRE_Int skip_fine, HYPRE_Int skip_same_sign ) { MPI_Comm comm = hypre_ParCSRMatrixComm(B); HYPRE_BigInt first_col_diag = hypre_ParCSRMatrixFirstColDiag(B); /*HYPRE_Int first_row_index = hypre_ParCSRMatrixFirstRowIndex(B);*/ HYPRE_BigInt *col_map_offd = hypre_ParCSRMatrixColMapOffd(B); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); HYPRE_Int num_recvs; HYPRE_Int *recv_vec_starts; HYPRE_Int num_sends; HYPRE_Int *send_map_starts; HYPRE_Int *send_map_elmts; hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(B); HYPRE_Int *diag_i = hypre_CSRMatrixI(diag); HYPRE_Int *diag_j = hypre_CSRMatrixJ(diag); HYPRE_Real *diag_data = hypre_CSRMatrixData(diag); hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(B); HYPRE_Int *offd_i = hypre_CSRMatrixI(offd); HYPRE_Int *offd_j = hypre_CSRMatrixJ(offd); HYPRE_Real *offd_data = hypre_CSRMatrixData(offd); HYPRE_Int num_cols_B, num_nonzeros; HYPRE_Int num_rows_B_ext; hypre_CSRMatrix *B_ext; HYPRE_Int *B_ext_i; HYPRE_BigInt *B_ext_j; HYPRE_Complex *B_ext_data; HYPRE_BigInt *idummy; /*--------------------------------------------------------------------- * If there exists no CommPkg for A, a CommPkg is generated using * equally load balanced partitionings *--------------------------------------------------------------------*/ if (!hypre_ParCSRMatrixCommPkg(A)) { hypre_MatvecCommPkgCreate(A); } comm_pkg = hypre_ParCSRMatrixCommPkg(A); num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg); recv_vec_starts = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg); num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); send_map_starts = hypre_ParCSRCommPkgSendMapStarts(comm_pkg); send_map_elmts = hypre_ParCSRCommPkgSendMapElmts(comm_pkg); num_cols_B = hypre_ParCSRMatrixGlobalNumCols(B); num_rows_B_ext = recv_vec_starts[num_recvs]; hypre_ParCSRMatrixExtractBExt_Arrays_Overlap ( &B_ext_i, &B_ext_j, &B_ext_data, &idummy, &num_nonzeros, data, 0, comm, comm_pkg, num_cols_B, num_recvs, num_sends, first_col_diag, B->row_starts, recv_vec_starts, send_map_starts, send_map_elmts, diag_i, diag_j, offd_i, offd_j, col_map_offd, diag_data, offd_data, comm_handle_idx, comm_handle_data, CF_marker, CF_marker_offd, skip_fine, skip_same_sign ); B_ext = hypre_CSRMatrixCreate(num_rows_B_ext,num_cols_B,num_nonzeros); hypre_CSRMatrixMemoryLocation(B_ext) = HYPRE_MEMORY_HOST; hypre_CSRMatrixI(B_ext) = B_ext_i; hypre_CSRMatrixBigJ(B_ext) = B_ext_j; if (data) hypre_CSRMatrixData(B_ext) = B_ext_data; return B_ext; } hypre_CSRMatrix * hypre_ParCSRMatrixExtractBExt( hypre_ParCSRMatrix *B, hypre_ParCSRMatrix *A, HYPRE_Int want_data ) { #if 0 hypre_ParCSRCommHandle *comm_handle_idx, *comm_handle_data; hypre_CSRMatrix *B_ext = hypre_ParCSRMatrixExtractBExt_Overlap(B, A, want_data, &comm_handle_idx, &comm_handle_data, NULL, NULL, 0, 0); HYPRE_Int *send_idx = (HYPRE_Int *)comm_handle_idx->send_data; hypre_ParCSRCommHandleDestroy(comm_handle_idx); hypre_TFree(send_idx, HYPRE_MEMORY_HOST); if (want_data) { HYPRE_Real *send_data = (HYPRE_Real *)comm_handle_data->send_data; hypre_ParCSRCommHandleDestroy(comm_handle_data); hypre_TFree(send_data, HYPRE_MEMORY_HOST); } #else hypre_assert( hypre_CSRMatrixMemoryLocation(hypre_ParCSRMatrixDiag(B)) == hypre_CSRMatrixMemoryLocation(hypre_ParCSRMatrixOffd(B)) ); hypre_CSRMatrix *B_ext; void *request; if (!hypre_ParCSRMatrixCommPkg(A)) { hypre_MatvecCommPkgCreate(A); } hypre_ParcsrGetExternalRowsInit(B, hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(A)), hypre_ParCSRMatrixColMapOffd(A), hypre_ParCSRMatrixCommPkg(A), want_data, &request); B_ext = hypre_ParcsrGetExternalRowsWait(request); #endif return B_ext; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixTranspose *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixTranspose( hypre_ParCSRMatrix *A, hypre_ParCSRMatrix **AT_ptr, HYPRE_Int data ) { hypre_ParCSRCommHandle *comm_handle; MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int num_cols = hypre_ParCSRMatrixNumCols(A); HYPRE_BigInt first_row_index = hypre_ParCSRMatrixFirstRowIndex(A); HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(A); HYPRE_BigInt *col_starts = hypre_ParCSRMatrixColStarts(A); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_Int ierr = 0; HYPRE_Int num_sends, num_recvs, num_cols_offd_AT; HYPRE_Int i, j, k, index, counter, j_row; HYPRE_BigInt value; hypre_ParCSRMatrix *AT; hypre_CSRMatrix *AT_diag; hypre_CSRMatrix *AT_offd; hypre_CSRMatrix *AT_tmp; HYPRE_BigInt first_row_index_AT, first_col_diag_AT; HYPRE_Int local_num_rows_AT, local_num_cols_AT; HYPRE_Int *AT_tmp_i; HYPRE_Int *AT_tmp_j; HYPRE_BigInt *AT_big_j = NULL; HYPRE_Complex *AT_tmp_data; HYPRE_Int *AT_buf_i; HYPRE_BigInt *AT_buf_j; HYPRE_Complex *AT_buf_data; HYPRE_Int *AT_offd_i; HYPRE_Int *AT_offd_j; HYPRE_Complex *AT_offd_data; HYPRE_BigInt *col_map_offd_AT; HYPRE_BigInt *row_starts_AT; HYPRE_BigInt *col_starts_AT; HYPRE_Int num_procs, my_id; HYPRE_Int *recv_procs; HYPRE_Int *send_procs; HYPRE_Int *recv_vec_starts; HYPRE_Int *send_map_starts; HYPRE_Int *send_map_elmts; HYPRE_Int *tmp_recv_vec_starts; HYPRE_Int *tmp_send_map_starts; hypre_ParCSRCommPkg *tmp_comm_pkg; hypre_MPI_Comm_size(comm,&num_procs); hypre_MPI_Comm_rank(comm,&my_id); num_cols_offd_AT = 0; counter = 0; AT_offd_j = NULL; AT_offd_data = NULL; col_map_offd_AT = NULL; HYPRE_MemoryLocation memory_location = hypre_ParCSRMatrixMemoryLocation(A); /*--------------------------------------------------------------------- * If there exists no CommPkg for A, a CommPkg is generated using * equally load balanced partitionings *--------------------------------------------------------------------*/ if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } if (num_procs > 1) { hypre_CSRMatrixTranspose (A_offd, &AT_tmp, data); AT_tmp_i = hypre_CSRMatrixI(AT_tmp); AT_tmp_j = hypre_CSRMatrixJ(AT_tmp); if (data) { AT_tmp_data = hypre_CSRMatrixData(AT_tmp); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg); recv_procs = hypre_ParCSRCommPkgRecvProcs(comm_pkg); send_procs = hypre_ParCSRCommPkgSendProcs(comm_pkg); recv_vec_starts = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg); send_map_starts = hypre_ParCSRCommPkgSendMapStarts(comm_pkg); send_map_elmts = hypre_ParCSRCommPkgSendMapElmts(comm_pkg); AT_buf_i = hypre_CTAlloc(HYPRE_Int, send_map_starts[num_sends], HYPRE_MEMORY_HOST); if (AT_tmp_i[num_cols_offd]) { AT_big_j = hypre_CTAlloc(HYPRE_BigInt, AT_tmp_i[num_cols_offd], HYPRE_MEMORY_HOST); } for (i=0; i < AT_tmp_i[num_cols_offd]; i++) { //AT_tmp_j[i] += first_row_index; AT_big_j[i] = (HYPRE_BigInt)AT_tmp_j[i]+first_row_index; } for (i=0; i < num_cols_offd; i++) { AT_tmp_i[i] = AT_tmp_i[i+1]-AT_tmp_i[i]; } comm_handle = hypre_ParCSRCommHandleCreate(12, comm_pkg, AT_tmp_i, AT_buf_i); } hypre_CSRMatrixTranspose(A_diag, &AT_diag, data); AT_offd_i = hypre_CTAlloc(HYPRE_Int, num_cols+1, memory_location); if (num_procs > 1) { hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; tmp_send_map_starts = hypre_CTAlloc(HYPRE_Int, num_sends+1, HYPRE_MEMORY_HOST); tmp_recv_vec_starts = hypre_CTAlloc(HYPRE_Int, num_recvs+1, HYPRE_MEMORY_HOST); tmp_send_map_starts[0] = send_map_starts[0]; for (i=0; i < num_sends; i++) { tmp_send_map_starts[i+1] = tmp_send_map_starts[i]; for (j=send_map_starts[i]; j < send_map_starts[i+1]; j++) { tmp_send_map_starts[i+1] += AT_buf_i[j]; AT_offd_i[send_map_elmts[j]+1] += AT_buf_i[j]; } } for (i=0; i < num_cols; i++) { AT_offd_i[i+1] += AT_offd_i[i]; } tmp_recv_vec_starts[0] = recv_vec_starts[0]; for (i=0; i < num_recvs; i++) { tmp_recv_vec_starts[i+1] = tmp_recv_vec_starts[i]; for (j=recv_vec_starts[i]; j < recv_vec_starts[i+1]; j++) { tmp_recv_vec_starts[i+1] += AT_tmp_i[j]; } } tmp_comm_pkg = hypre_CTAlloc(hypre_ParCSRCommPkg, 1, HYPRE_MEMORY_HOST); hypre_ParCSRCommPkgComm(tmp_comm_pkg) = comm; hypre_ParCSRCommPkgNumSends(tmp_comm_pkg) = num_sends; hypre_ParCSRCommPkgNumRecvs(tmp_comm_pkg) = num_recvs; hypre_ParCSRCommPkgRecvProcs(tmp_comm_pkg) = recv_procs; hypre_ParCSRCommPkgSendProcs(tmp_comm_pkg) = send_procs; hypre_ParCSRCommPkgRecvVecStarts(tmp_comm_pkg) = tmp_recv_vec_starts; hypre_ParCSRCommPkgSendMapStarts(tmp_comm_pkg) = tmp_send_map_starts; AT_buf_j = hypre_CTAlloc(HYPRE_BigInt, tmp_send_map_starts[num_sends], HYPRE_MEMORY_HOST); comm_handle = hypre_ParCSRCommHandleCreate(22, tmp_comm_pkg, AT_big_j, AT_buf_j); hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; hypre_TFree(AT_big_j, HYPRE_MEMORY_HOST); if (data) { AT_buf_data = hypre_CTAlloc(HYPRE_Complex, tmp_send_map_starts[num_sends], HYPRE_MEMORY_HOST); comm_handle = hypre_ParCSRCommHandleCreate(2,tmp_comm_pkg,AT_tmp_data, AT_buf_data); hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; } hypre_TFree(tmp_recv_vec_starts, HYPRE_MEMORY_HOST); hypre_TFree(tmp_send_map_starts, HYPRE_MEMORY_HOST); hypre_TFree(tmp_comm_pkg, HYPRE_MEMORY_HOST); hypre_CSRMatrixDestroy(AT_tmp); if (AT_offd_i[num_cols]) { AT_offd_j = hypre_CTAlloc(HYPRE_Int, AT_offd_i[num_cols], memory_location); AT_big_j = hypre_CTAlloc(HYPRE_BigInt, AT_offd_i[num_cols], HYPRE_MEMORY_HOST); if (data) { AT_offd_data = hypre_CTAlloc(HYPRE_Complex, AT_offd_i[num_cols], memory_location); } } else { AT_offd_j = NULL; AT_offd_data = NULL; } counter = 0; for (i=0; i < num_sends; i++) { for (j=send_map_starts[i]; j < send_map_starts[i+1]; j++) { j_row = send_map_elmts[j]; index = AT_offd_i[j_row]; for (k=0; k < AT_buf_i[j]; k++) { if (data) { AT_offd_data[index] = AT_buf_data[counter]; } AT_big_j[index++] = AT_buf_j[counter++]; } AT_offd_i[j_row] = index; } } for (i=num_cols; i > 0; i--) { AT_offd_i[i] = AT_offd_i[i-1]; } AT_offd_i[0] = 0; if (counter) { hypre_BigQsort0(AT_buf_j,0,counter-1); num_cols_offd_AT = 1; value = AT_buf_j[0]; for (i=1; i < counter; i++) { if (value < AT_buf_j[i]) { AT_buf_j[num_cols_offd_AT++] = AT_buf_j[i]; value = AT_buf_j[i]; } } } if (num_cols_offd_AT) { col_map_offd_AT = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd_AT, HYPRE_MEMORY_HOST); } else { col_map_offd_AT = NULL; } for (i = 0; i < num_cols_offd_AT; i++) { col_map_offd_AT[i] = AT_buf_j[i]; } hypre_TFree(AT_buf_i, HYPRE_MEMORY_HOST); hypre_TFree(AT_buf_j, HYPRE_MEMORY_HOST); if (data) { hypre_TFree(AT_buf_data, HYPRE_MEMORY_HOST); } for (i=0; i < counter; i++) { AT_offd_j[i] = hypre_BigBinarySearch(col_map_offd_AT,AT_big_j[i], num_cols_offd_AT); } hypre_TFree(AT_big_j, HYPRE_MEMORY_HOST); } AT_offd = hypre_CSRMatrixCreate(num_cols, num_cols_offd_AT, counter); hypre_CSRMatrixMemoryLocation(AT_offd) = memory_location; hypre_CSRMatrixI(AT_offd) = AT_offd_i; hypre_CSRMatrixJ(AT_offd) = AT_offd_j; hypre_CSRMatrixData(AT_offd) = AT_offd_data; row_starts_AT = hypre_CTAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST); for (i=0; i < 2; i++) { row_starts_AT[i] = col_starts[i]; } if (row_starts != col_starts) { col_starts_AT = hypre_CTAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST); for (i=0; i < 2; i++) { col_starts_AT[i] = row_starts[i]; } } else { col_starts_AT = row_starts_AT; } first_row_index_AT = row_starts_AT[0]; first_col_diag_AT = col_starts_AT[0]; local_num_rows_AT = (HYPRE_Int)(row_starts_AT[1]-first_row_index_AT ); local_num_cols_AT = (HYPRE_Int)(col_starts_AT[1]-first_col_diag_AT); AT = hypre_CTAlloc(hypre_ParCSRMatrix, 1, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixComm(AT) = comm; hypre_ParCSRMatrixDiag(AT) = AT_diag; hypre_ParCSRMatrixOffd(AT) = AT_offd; hypre_ParCSRMatrixGlobalNumRows(AT) = hypre_ParCSRMatrixGlobalNumCols(A); hypre_ParCSRMatrixGlobalNumCols(AT) = hypre_ParCSRMatrixGlobalNumRows(A); hypre_ParCSRMatrixRowStarts(AT) = row_starts_AT; hypre_ParCSRMatrixColStarts(AT) = col_starts_AT; hypre_ParCSRMatrixColMapOffd(AT) = col_map_offd_AT; hypre_ParCSRMatrixFirstRowIndex(AT) = first_row_index_AT; hypre_ParCSRMatrixFirstColDiag(AT) = first_col_diag_AT; hypre_ParCSRMatrixLastRowIndex(AT) = first_row_index_AT + local_num_rows_AT - 1; hypre_ParCSRMatrixLastColDiag(AT) = first_col_diag_AT + local_num_cols_AT - 1; hypre_ParCSRMatrixOwnsData(AT) = 1; hypre_ParCSRMatrixOwnsRowStarts(AT) = 1; hypre_ParCSRMatrixOwnsColStarts(AT) = 1; if (row_starts_AT == col_starts_AT) { hypre_ParCSRMatrixOwnsColStarts(AT) = 0; } hypre_ParCSRMatrixCommPkg(AT) = NULL; hypre_ParCSRMatrixCommPkgT(AT) = NULL; hypre_ParCSRMatrixRowindices(AT) = NULL; hypre_ParCSRMatrixRowvalues(AT) = NULL; hypre_ParCSRMatrixGetrowactive(AT) = 0; hypre_ParCSRMatrixOwnsAssumedPartition(AT) = 1; *AT_ptr = AT; return ierr; } /* ----------------------------------------------------------------------------- * generate a parallel spanning tree (for Maxwell Equation) * G_csr is the node to edge connectivity matrix * ----------------------------------------------------------------------------- */ void hypre_ParCSRMatrixGenSpanningTree( hypre_ParCSRMatrix *G_csr, HYPRE_Int **indices, HYPRE_Int G_type ) { HYPRE_BigInt nrows_G, ncols_G; HYPRE_Int *G_diag_i, *G_diag_j, *GT_diag_mat, i, j, k, edge; HYPRE_Int *nodes_marked, *edges_marked, *queue, queue_tail, queue_head, node; HYPRE_Int mypid, nprocs, n_children, *children, nsends, *send_procs, *recv_cnts; HYPRE_Int nrecvs, *recv_procs, n_proc_array, *proc_array, *pgraph_i, *pgraph_j; HYPRE_Int parent, proc, proc2, node2, found, *t_indices, tree_size, *T_diag_i; HYPRE_Int *T_diag_j, *counts, offset; MPI_Comm comm; hypre_ParCSRCommPkg *comm_pkg; hypre_CSRMatrix *G_diag; /* fetch G matrix (G_type = 0 ==> node to edge) */ if (G_type == 0) { nrows_G = hypre_ParCSRMatrixGlobalNumRows(G_csr); ncols_G = hypre_ParCSRMatrixGlobalNumCols(G_csr); G_diag = hypre_ParCSRMatrixDiag(G_csr); G_diag_i = hypre_CSRMatrixI(G_diag); G_diag_j = hypre_CSRMatrixJ(G_diag); } else { nrows_G = hypre_ParCSRMatrixGlobalNumCols(G_csr); ncols_G = hypre_ParCSRMatrixGlobalNumRows(G_csr); G_diag = hypre_ParCSRMatrixDiag(G_csr); T_diag_i = hypre_CSRMatrixI(G_diag); T_diag_j = hypre_CSRMatrixJ(G_diag); counts = hypre_TAlloc(HYPRE_Int, nrows_G , HYPRE_MEMORY_HOST); for (i = 0; i < nrows_G; i++) counts[i] = 0; for (i = 0; i < T_diag_i[ncols_G]; i++) counts[T_diag_j[i]]++; G_diag_i = hypre_TAlloc(HYPRE_Int, (nrows_G+1) , HYPRE_MEMORY_HOST); G_diag_j = hypre_TAlloc(HYPRE_Int, T_diag_i[ncols_G] , HYPRE_MEMORY_HOST); G_diag_i[0] = 0; for (i = 1; i <= nrows_G; i++) G_diag_i[i] = G_diag_i[i-1] + counts[i-1]; for (i = 0; i < ncols_G; i++) { for (j = T_diag_i[i]; j < T_diag_i[i+1]; j++) { k = T_diag_j[j]; offset = G_diag_i[k]++; G_diag_j[offset] = i; } } G_diag_i[0] = 0; for (i = 1; i <= nrows_G; i++) { G_diag_i[i] = G_diag_i[i-1] + counts[i-1]; } hypre_TFree(counts, HYPRE_MEMORY_HOST); } /* form G transpose in special form (2 nodes per edge max) */ GT_diag_mat = hypre_TAlloc(HYPRE_Int, 2 * ncols_G , HYPRE_MEMORY_HOST); for (i = 0; i < 2 * ncols_G; i++) GT_diag_mat[i] = -1; for (i = 0; i < nrows_G; i++) { for (j = G_diag_i[i]; j < G_diag_i[i+1]; j++) { edge = G_diag_j[j]; if (GT_diag_mat[edge*2] == -1) GT_diag_mat[edge*2] = i; else GT_diag_mat[edge*2+1] = i; } } /* BFS on the local matrix graph to find tree */ nodes_marked = hypre_TAlloc(HYPRE_Int, nrows_G , HYPRE_MEMORY_HOST); edges_marked = hypre_TAlloc(HYPRE_Int, ncols_G , HYPRE_MEMORY_HOST); for (i = 0; i < nrows_G; i++) nodes_marked[i] = 0; for (i = 0; i < ncols_G; i++) edges_marked[i] = 0; queue = hypre_TAlloc(HYPRE_Int, nrows_G , HYPRE_MEMORY_HOST); queue_head = 0; queue_tail = 1; queue[0] = 0; nodes_marked[0] = 1; while ((queue_tail-queue_head) > 0) { node = queue[queue_tail-1]; queue_tail--; for (i = G_diag_i[node]; i < G_diag_i[node+1]; i++) { edge = G_diag_j[i]; if (edges_marked[edge] == 0) { if (GT_diag_mat[2*edge+1] != -1) { node2 = GT_diag_mat[2*edge]; if (node2 == node) node2 = GT_diag_mat[2*edge+1]; if (nodes_marked[node2] == 0) { nodes_marked[node2] = 1; edges_marked[edge] = 1; queue[queue_tail] = node2; queue_tail++; } } } } } hypre_TFree(nodes_marked, HYPRE_MEMORY_HOST); hypre_TFree(queue, HYPRE_MEMORY_HOST); hypre_TFree(GT_diag_mat, HYPRE_MEMORY_HOST); /* fetch the communication information from */ comm = hypre_ParCSRMatrixComm(G_csr); hypre_MPI_Comm_rank(comm, &mypid); hypre_MPI_Comm_size(comm, &nprocs); comm_pkg = hypre_ParCSRMatrixCommPkg(G_csr); if (nprocs == 1 && comm_pkg == NULL) { hypre_MatvecCommPkgCreate((hypre_ParCSRMatrix *) G_csr); comm_pkg = hypre_ParCSRMatrixCommPkg(G_csr); } /* construct processor graph based on node-edge connection */ /* (local edges connected to neighbor processor nodes) */ n_children = 0; nrecvs = nsends = 0; if (nprocs > 1) { nsends = hypre_ParCSRCommPkgNumSends(comm_pkg); send_procs = hypre_ParCSRCommPkgSendProcs(comm_pkg); nrecvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg); recv_procs = hypre_ParCSRCommPkgRecvProcs(comm_pkg); proc_array = NULL; if ((nsends+nrecvs) > 0) { n_proc_array = 0; proc_array = hypre_TAlloc(HYPRE_Int, (nsends+nrecvs) , HYPRE_MEMORY_HOST); for (i = 0; i < nsends; i++) proc_array[i] = send_procs[i]; for (i = 0; i < nrecvs; i++) proc_array[nsends+i] = recv_procs[i]; hypre_qsort0(proc_array, 0, nsends+nrecvs-1); n_proc_array = 1; for (i = 1; i < nrecvs+nsends; i++) if (proc_array[i] != proc_array[n_proc_array]) proc_array[n_proc_array++] = proc_array[i]; } pgraph_i = hypre_TAlloc(HYPRE_Int, (nprocs+1) , HYPRE_MEMORY_HOST); recv_cnts = hypre_TAlloc(HYPRE_Int, nprocs , HYPRE_MEMORY_HOST); hypre_MPI_Allgather(&n_proc_array, 1, HYPRE_MPI_INT, recv_cnts, 1, HYPRE_MPI_INT, comm); pgraph_i[0] = 0; for (i = 1; i <= nprocs; i++) pgraph_i[i] = pgraph_i[i-1] + recv_cnts[i-1]; pgraph_j = hypre_TAlloc(HYPRE_Int, pgraph_i[nprocs] , HYPRE_MEMORY_HOST); hypre_MPI_Allgatherv(proc_array, n_proc_array, HYPRE_MPI_INT, pgraph_j, recv_cnts, pgraph_i, HYPRE_MPI_INT, comm); hypre_TFree(recv_cnts, HYPRE_MEMORY_HOST); /* BFS on the processor graph to determine parent and children */ nodes_marked = hypre_TAlloc(HYPRE_Int, nprocs , HYPRE_MEMORY_HOST); for (i = 0; i < nprocs; i++) nodes_marked[i] = -1; queue = hypre_TAlloc(HYPRE_Int, nprocs , HYPRE_MEMORY_HOST); queue_head = 0; queue_tail = 1; node = 0; queue[0] = node; while ((queue_tail-queue_head) > 0) { proc = queue[queue_tail-1]; queue_tail--; for (i = pgraph_i[proc]; i < pgraph_i[proc+1]; i++) { proc2 = pgraph_j[i]; if (nodes_marked[proc2] < 0) { nodes_marked[proc2] = proc; queue[queue_tail] = proc2; queue_tail++; } } } parent = nodes_marked[mypid]; n_children = 0; for (i = 0; i < nprocs; i++) if (nodes_marked[i] == mypid) n_children++; if (n_children == 0) {n_children = 0; children = NULL;} else { children = hypre_TAlloc(HYPRE_Int, n_children , HYPRE_MEMORY_HOST); n_children = 0; for (i = 0; i < nprocs; i++) if (nodes_marked[i] == mypid) children[n_children++] = i; } hypre_TFree(nodes_marked, HYPRE_MEMORY_HOST); hypre_TFree(queue, HYPRE_MEMORY_HOST); hypre_TFree(pgraph_i, HYPRE_MEMORY_HOST); hypre_TFree(pgraph_j, HYPRE_MEMORY_HOST); } /* first, connection with my parent : if the edge in my parent * * is incident to one of my nodes, then my parent will mark it */ found = 0; for (i = 0; i < nrecvs; i++) { proc = hypre_ParCSRCommPkgRecvProc(comm_pkg, i); if (proc == parent) { found = 1; break; } } /* but if all the edges connected to my parent are on my side, * * then I will just pick one of them as tree edge */ if (found == 0) { for (i = 0; i < nsends; i++) { proc = hypre_ParCSRCommPkgSendProc(comm_pkg, i); if (proc == parent) { k = hypre_ParCSRCommPkgSendMapStart(comm_pkg,i); edge = hypre_ParCSRCommPkgSendMapElmt(comm_pkg,k); edges_marked[edge] = 1; break; } } } /* next, if my processor has an edge incident on one node in my * * child, put this edge on the tree. But if there is no such * * edge, then I will assume my child will pick up an edge */ for (j = 0; j < n_children; j++) { proc = children[j]; for (i = 0; i < nsends; i++) { proc2 = hypre_ParCSRCommPkgSendProc(comm_pkg, i); if (proc == proc2) { k = hypre_ParCSRCommPkgSendMapStart(comm_pkg,i); edge = hypre_ParCSRCommPkgSendMapElmt(comm_pkg,k); edges_marked[edge] = 1; break; } } } if (n_children > 0) { hypre_TFree(children, HYPRE_MEMORY_HOST); } /* count the size of the tree */ tree_size = 0; for (i = 0; i < ncols_G; i++) if (edges_marked[i] == 1) tree_size++; t_indices = hypre_TAlloc(HYPRE_Int, (tree_size+1) , HYPRE_MEMORY_HOST); t_indices[0] = tree_size; tree_size = 1; for (i = 0; i < ncols_G; i++) if (edges_marked[i] == 1) t_indices[tree_size++] = i; (*indices) = t_indices; hypre_TFree(edges_marked, HYPRE_MEMORY_HOST); if (G_type != 0) { hypre_TFree(G_diag_i, HYPRE_MEMORY_HOST); hypre_TFree(G_diag_j, HYPRE_MEMORY_HOST); } } /* ----------------------------------------------------------------------------- * extract submatrices based on given indices * ----------------------------------------------------------------------------- */ void hypre_ParCSRMatrixExtractSubmatrices( hypre_ParCSRMatrix *A_csr, HYPRE_Int *indices2, hypre_ParCSRMatrix ***submatrices ) { HYPRE_Int nrows_A, nindices, *indices, *A_diag_i, *A_diag_j, mypid, nprocs; HYPRE_Int i, j, k, *proc_offsets1, *proc_offsets2, *exp_indices; HYPRE_BigInt *itmp_array; HYPRE_Int nnz11, nnz12, nnz21, nnz22, col, ncols_offd, nnz_offd, nnz_diag; HYPRE_Int nrows, nnz; HYPRE_BigInt global_nrows, global_ncols, *row_starts, *col_starts; HYPRE_Int *diag_i, *diag_j, row, *offd_i; HYPRE_Complex *A_diag_a, *diag_a; hypre_ParCSRMatrix *A11_csr, *A12_csr, *A21_csr, *A22_csr; hypre_CSRMatrix *A_diag, *diag, *offd; MPI_Comm comm; /* ----------------------------------------------------- * first make sure the incoming indices are in order * ----------------------------------------------------- */ nindices = indices2[0]; indices = &(indices2[1]); hypre_qsort0(indices, 0, nindices-1); /* ----------------------------------------------------- * fetch matrix information * ----------------------------------------------------- */ nrows_A = (HYPRE_Int) hypre_ParCSRMatrixGlobalNumRows(A_csr); A_diag = hypre_ParCSRMatrixDiag(A_csr); A_diag_i = hypre_CSRMatrixI(A_diag); A_diag_j = hypre_CSRMatrixJ(A_diag); A_diag_a = hypre_CSRMatrixData(A_diag); comm = hypre_ParCSRMatrixComm(A_csr); hypre_MPI_Comm_rank(comm, &mypid); hypre_MPI_Comm_size(comm, &nprocs); if (nprocs > 1) { hypre_error_w_msg(HYPRE_ERROR_GENERIC,"ExtractSubmatrices: cannot handle nprocs > 1 yet.\n"); exit(1); } /* ----------------------------------------------------- * compute new matrix dimensions * ----------------------------------------------------- */ proc_offsets1 = hypre_TAlloc(HYPRE_Int, (nprocs+1) , HYPRE_MEMORY_HOST); proc_offsets2 = hypre_TAlloc(HYPRE_Int, (nprocs+1) , HYPRE_MEMORY_HOST); hypre_MPI_Allgather(&nindices, 1, HYPRE_MPI_INT, proc_offsets1, 1, HYPRE_MPI_INT, comm); k = 0; for (i = 0; i < nprocs; i++) { j = proc_offsets1[i]; proc_offsets1[i] = k; k += j; } proc_offsets1[nprocs] = k; itmp_array = hypre_ParCSRMatrixRowStarts(A_csr); for (i = 0; i <= nprocs; i++) proc_offsets2[i] = itmp_array[i] - proc_offsets1[i]; /* ----------------------------------------------------- * assign id's to row and col for later processing * ----------------------------------------------------- */ exp_indices = hypre_TAlloc(HYPRE_Int, nrows_A , HYPRE_MEMORY_HOST); for (i = 0; i < nrows_A; i++) exp_indices[i] = -1; for (i = 0; i < nindices; i++) { if (exp_indices[indices[i]] == -1) exp_indices[indices[i]] = i; else { hypre_error_w_msg(HYPRE_ERROR_GENERIC,"ExtractSubmatrices: wrong index %d %d\n"); exit(1); } } k = 0; for (i = 0; i < nrows_A; i++) { if (exp_indices[i] < 0) { exp_indices[i] = - k - 1; k++; } } /* ----------------------------------------------------- * compute number of nonzeros for each block * ----------------------------------------------------- */ nnz11 = nnz12 = nnz21 = nnz22 = 0; for (i = 0; i < nrows_A; i++) { if (exp_indices[i] >= 0) { for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { col = A_diag_j[j]; if (exp_indices[col] >= 0) nnz11++; else nnz12++; } } else { for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { col = A_diag_j[j]; if (exp_indices[col] >= 0) nnz21++; else nnz22++; } } } /* ----------------------------------------------------- * create A11 matrix (assume sequential for the moment) * ----------------------------------------------------- */ ncols_offd = 0; nnz_offd = 0; nnz_diag = nnz11; /* This case is not yet implemented! */ global_nrows = 0; global_ncols = 0; row_starts = NULL; col_starts = NULL; A11_csr = hypre_ParCSRMatrixCreate(comm, global_nrows, global_ncols, row_starts, col_starts, ncols_offd, nnz_diag, nnz_offd); nrows = nindices; diag_i = hypre_CTAlloc(HYPRE_Int, nrows+1, HYPRE_MEMORY_HOST); diag_j = hypre_CTAlloc(HYPRE_Int, nnz_diag, HYPRE_MEMORY_HOST); diag_a = hypre_CTAlloc(HYPRE_Complex, nnz_diag, HYPRE_MEMORY_HOST); nnz = 0; row = 0; diag_i[0] = 0; for (i = 0; i < nrows_A; i++) { if (exp_indices[i] >= 0) { for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { col = A_diag_j[j]; if (exp_indices[col] >= 0) { diag_j[nnz] = exp_indices[col]; diag_a[nnz++] = A_diag_a[j]; } } row++; diag_i[row] = nnz; } } diag = hypre_ParCSRMatrixDiag(A11_csr); hypre_CSRMatrixI(diag) = diag_i; hypre_CSRMatrixJ(diag) = diag_j; hypre_CSRMatrixData(diag) = diag_a; offd_i = hypre_CTAlloc(HYPRE_Int, nrows+1, HYPRE_MEMORY_HOST); for (i = 0; i <= nrows; i++) offd_i[i] = 0; offd = hypre_ParCSRMatrixOffd(A11_csr); hypre_CSRMatrixI(offd) = offd_i; hypre_CSRMatrixJ(offd) = NULL; hypre_CSRMatrixData(offd) = NULL; /* ----------------------------------------------------- * create A12 matrix (assume sequential for the moment) * ----------------------------------------------------- */ ncols_offd = 0; nnz_offd = 0; nnz_diag = nnz12; global_nrows = (HYPRE_BigInt)proc_offsets1[nprocs]; global_ncols = (HYPRE_BigInt)proc_offsets2[nprocs]; row_starts = hypre_CTAlloc(HYPRE_BigInt, nprocs+1, HYPRE_MEMORY_HOST); col_starts = hypre_CTAlloc(HYPRE_BigInt, nprocs+1, HYPRE_MEMORY_HOST); for (i = 0; i <= nprocs; i++) { row_starts[i] = (HYPRE_BigInt)proc_offsets1[i]; col_starts[i] = (HYPRE_BigInt)proc_offsets2[i]; } A12_csr = hypre_ParCSRMatrixCreate(comm, global_nrows, global_ncols, row_starts, col_starts, ncols_offd, nnz_diag, nnz_offd); nrows = nindices; diag_i = hypre_CTAlloc(HYPRE_Int, nrows+1, HYPRE_MEMORY_HOST); diag_j = hypre_CTAlloc(HYPRE_Int, nnz_diag, HYPRE_MEMORY_HOST); diag_a = hypre_CTAlloc(HYPRE_Complex, nnz_diag, HYPRE_MEMORY_HOST); nnz = 0; row = 0; diag_i[0] = 0; for (i = 0; i < nrows_A; i++) { if (exp_indices[i] >= 0) { for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { col = A_diag_j[j]; if (exp_indices[col] < 0) { diag_j[nnz] = - exp_indices[col] - 1; diag_a[nnz++] = A_diag_a[j]; } } row++; diag_i[row] = nnz; } } if (nnz > nnz_diag) { hypre_assert(0); hypre_error(HYPRE_ERROR_GENERIC); } diag = hypre_ParCSRMatrixDiag(A12_csr); hypre_CSRMatrixI(diag) = diag_i; hypre_CSRMatrixJ(diag) = diag_j; hypre_CSRMatrixData(diag) = diag_a; offd_i = hypre_CTAlloc(HYPRE_Int, nrows+1, HYPRE_MEMORY_HOST); for (i = 0; i <= nrows; i++) offd_i[i] = 0; offd = hypre_ParCSRMatrixOffd(A12_csr); hypre_CSRMatrixI(offd) = offd_i; hypre_CSRMatrixJ(offd) = NULL; hypre_CSRMatrixData(offd) = NULL; /* ----------------------------------------------------- * create A21 matrix (assume sequential for the moment) * ----------------------------------------------------- */ ncols_offd = 0; nnz_offd = 0; nnz_diag = nnz21; global_nrows = (HYPRE_BigInt)proc_offsets2[nprocs]; global_ncols = (HYPRE_BigInt)proc_offsets1[nprocs]; row_starts = hypre_CTAlloc(HYPRE_BigInt, nprocs+1, HYPRE_MEMORY_HOST); col_starts = hypre_CTAlloc(HYPRE_BigInt, nprocs+1, HYPRE_MEMORY_HOST); for (i = 0; i <= nprocs; i++) { row_starts[i] = (HYPRE_BigInt)proc_offsets2[i]; col_starts[i] = (HYPRE_BigInt)proc_offsets1[i]; } A21_csr = hypre_ParCSRMatrixCreate(comm, global_nrows, global_ncols, row_starts, col_starts, ncols_offd, nnz_diag, nnz_offd); nrows = nrows_A - nindices; diag_i = hypre_CTAlloc(HYPRE_Int, nrows+1, HYPRE_MEMORY_HOST); diag_j = hypre_CTAlloc(HYPRE_Int, nnz_diag, HYPRE_MEMORY_HOST); diag_a = hypre_CTAlloc(HYPRE_Complex, nnz_diag, HYPRE_MEMORY_HOST); nnz = 0; row = 0; diag_i[0] = 0; for (i = 0; i < nrows_A; i++) { if (exp_indices[i] < 0) { for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { col = A_diag_j[j]; if (exp_indices[col] >= 0) { diag_j[nnz] = exp_indices[col]; diag_a[nnz++] = A_diag_a[j]; } } row++; diag_i[row] = nnz; } } diag = hypre_ParCSRMatrixDiag(A21_csr); hypre_CSRMatrixI(diag) = diag_i; hypre_CSRMatrixJ(diag) = diag_j; hypre_CSRMatrixData(diag) = diag_a; offd_i = hypre_CTAlloc(HYPRE_Int, nrows+1, HYPRE_MEMORY_HOST); for (i = 0; i <= nrows; i++) offd_i[i] = 0; offd = hypre_ParCSRMatrixOffd(A21_csr); hypre_CSRMatrixI(offd) = offd_i; hypre_CSRMatrixJ(offd) = NULL; hypre_CSRMatrixData(offd) = NULL; /* ----------------------------------------------------- * create A22 matrix (assume sequential for the moment) * ----------------------------------------------------- */ ncols_offd = 0; nnz_offd = 0; nnz_diag = nnz22; global_nrows = (HYPRE_BigInt)proc_offsets2[nprocs]; global_ncols = (HYPRE_BigInt)proc_offsets2[nprocs]; row_starts = hypre_CTAlloc(HYPRE_BigInt, nprocs+1, HYPRE_MEMORY_HOST); col_starts = hypre_CTAlloc(HYPRE_BigInt, nprocs+1, HYPRE_MEMORY_HOST); for (i = 0; i <= nprocs; i++) { row_starts[i] = (HYPRE_BigInt)proc_offsets2[i]; col_starts[i] = (HYPRE_BigInt)proc_offsets2[i]; } A22_csr = hypre_ParCSRMatrixCreate(comm, global_nrows, global_ncols, row_starts, col_starts, ncols_offd, nnz_diag, nnz_offd); nrows = nrows_A - nindices; diag_i = hypre_CTAlloc(HYPRE_Int, nrows+1, HYPRE_MEMORY_HOST); diag_j = hypre_CTAlloc(HYPRE_Int, nnz_diag, HYPRE_MEMORY_HOST); diag_a = hypre_CTAlloc(HYPRE_Complex, nnz_diag, HYPRE_MEMORY_HOST); nnz = 0; row = 0; diag_i[0] = 0; for (i = 0; i < nrows_A; i++) { if (exp_indices[i] < 0) { for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { col = A_diag_j[j]; if (exp_indices[col] < 0) { diag_j[nnz] = - exp_indices[col] - 1; diag_a[nnz++] = A_diag_a[j]; } } row++; diag_i[row] = nnz; } } diag = hypre_ParCSRMatrixDiag(A22_csr); hypre_CSRMatrixI(diag) = diag_i; hypre_CSRMatrixJ(diag) = diag_j; hypre_CSRMatrixData(diag) = diag_a; offd_i = hypre_CTAlloc(HYPRE_Int, nrows+1, HYPRE_MEMORY_HOST); for (i = 0; i <= nrows; i++) offd_i[i] = 0; offd = hypre_ParCSRMatrixOffd(A22_csr); hypre_CSRMatrixI(offd) = offd_i; hypre_CSRMatrixJ(offd) = NULL; hypre_CSRMatrixData(offd) = NULL; /* ----------------------------------------------------- * hand the matrices back to the caller and clean up * ----------------------------------------------------- */ (*submatrices)[0] = A11_csr; (*submatrices)[1] = A12_csr; (*submatrices)[2] = A21_csr; (*submatrices)[3] = A22_csr; hypre_TFree(proc_offsets1, HYPRE_MEMORY_HOST); hypre_TFree(proc_offsets2, HYPRE_MEMORY_HOST); hypre_TFree(exp_indices, HYPRE_MEMORY_HOST); } /* ----------------------------------------------------------------------------- * extract submatrices of a rectangular matrix * ----------------------------------------------------------------------------- */ void hypre_ParCSRMatrixExtractRowSubmatrices( hypre_ParCSRMatrix *A_csr, HYPRE_Int *indices2, hypre_ParCSRMatrix ***submatrices ) { HYPRE_Int nrows_A, nindices, *indices, *A_diag_i, *A_diag_j, mypid, nprocs; HYPRE_Int i, j, k, *proc_offsets1, *proc_offsets2, *exp_indices; HYPRE_Int nnz11, nnz21, col, ncols_offd, nnz_offd, nnz_diag; HYPRE_Int *A_offd_i, *A_offd_j; HYPRE_Int nrows, nnz; HYPRE_BigInt global_nrows, global_ncols, *row_starts, *col_starts, *itmp_array; HYPRE_Int *diag_i, *diag_j, row, *offd_i, *offd_j, nnz11_offd, nnz21_offd; HYPRE_Complex *A_diag_a, *diag_a, *offd_a; hypre_ParCSRMatrix *A11_csr, *A21_csr; hypre_CSRMatrix *A_diag, *diag, *A_offd, *offd; MPI_Comm comm; /* ----------------------------------------------------- * first make sure the incoming indices are in order * ----------------------------------------------------- */ nindices = indices2[0]; indices = &(indices2[1]); hypre_qsort0(indices, 0, nindices-1); /* ----------------------------------------------------- * fetch matrix information * ----------------------------------------------------- */ nrows_A = (HYPRE_Int)hypre_ParCSRMatrixGlobalNumRows(A_csr); A_diag = hypre_ParCSRMatrixDiag(A_csr); A_diag_i = hypre_CSRMatrixI(A_diag); A_diag_j = hypre_CSRMatrixJ(A_diag); A_diag_a = hypre_CSRMatrixData(A_diag); A_offd = hypre_ParCSRMatrixOffd(A_csr); A_offd_i = hypre_CSRMatrixI(A_offd); A_offd_j = hypre_CSRMatrixJ(A_offd); comm = hypre_ParCSRMatrixComm(A_csr); hypre_MPI_Comm_rank(comm, &mypid); hypre_MPI_Comm_size(comm, &nprocs); /* ----------------------------------------------------- * compute new matrix dimensions * ----------------------------------------------------- */ proc_offsets1 = hypre_TAlloc(HYPRE_Int, (nprocs+1) , HYPRE_MEMORY_HOST); proc_offsets2 = hypre_TAlloc(HYPRE_Int, (nprocs+1) , HYPRE_MEMORY_HOST); hypre_MPI_Allgather(&nindices, 1, HYPRE_MPI_INT, proc_offsets1, 1, HYPRE_MPI_INT, comm); k = 0; for (i = 0; i < nprocs; i++) { j = proc_offsets1[i]; proc_offsets1[i] = k; k += j; } proc_offsets1[nprocs] = k; itmp_array = hypre_ParCSRMatrixRowStarts(A_csr); for (i = 0; i <= nprocs; i++) proc_offsets2[i] = (HYPRE_Int)(itmp_array[i] - proc_offsets1[i]); /* ----------------------------------------------------- * assign id's to row and col for later processing * ----------------------------------------------------- */ exp_indices = hypre_TAlloc(HYPRE_Int, nrows_A , HYPRE_MEMORY_HOST); for (i = 0; i < nrows_A; i++) exp_indices[i] = -1; for (i = 0; i < nindices; i++) { if (exp_indices[indices[i]] == -1) exp_indices[indices[i]] = i; else { hypre_error_w_msg(HYPRE_ERROR_GENERIC,"ExtractRowSubmatrices: wrong index %d %d\n"); exit(1); } } k = 0; for (i = 0; i < nrows_A; i++) { if (exp_indices[i] < 0) { exp_indices[i] = - k - 1; k++; } } /* ----------------------------------------------------- * compute number of nonzeros for each block * ----------------------------------------------------- */ nnz11 = nnz21 = nnz11_offd = nnz21_offd = 0; for (i = 0; i < nrows_A; i++) { if (exp_indices[i] >= 0) { for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { col = A_diag_j[j]; if (exp_indices[col] >= 0) nnz11++; } nnz11_offd += A_offd_i[i+1] - A_offd_i[i]; } else { for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { col = A_diag_j[j]; if (exp_indices[col] < 0) nnz21++; } nnz21_offd += A_offd_i[i+1] - A_offd_i[i]; } } /* ----------------------------------------------------- * create A11 matrix (assume sequential for the moment) * ----------------------------------------------------- */ ncols_offd = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixDiag(A_csr)); nnz_diag = nnz11; nnz_offd = nnz11_offd; global_nrows = (HYPRE_BigInt)proc_offsets1[nprocs]; itmp_array = hypre_ParCSRMatrixColStarts(A_csr); global_ncols = itmp_array[nprocs]; row_starts = hypre_CTAlloc(HYPRE_BigInt, nprocs+1, HYPRE_MEMORY_HOST); col_starts = hypre_CTAlloc(HYPRE_BigInt, nprocs+1, HYPRE_MEMORY_HOST); for (i = 0; i <= nprocs; i++) { row_starts[i] = (HYPRE_BigInt)proc_offsets1[i]; col_starts[i] = itmp_array[i]; } A11_csr = hypre_ParCSRMatrixCreate(comm, global_nrows, global_ncols, row_starts, col_starts, ncols_offd, nnz_diag, nnz_offd); nrows = nindices; diag_i = hypre_CTAlloc(HYPRE_Int, nrows+1, HYPRE_MEMORY_HOST); diag_j = hypre_CTAlloc(HYPRE_Int, nnz_diag, HYPRE_MEMORY_HOST); diag_a = hypre_CTAlloc(HYPRE_Complex, nnz_diag, HYPRE_MEMORY_HOST); nnz = 0; row = 0; diag_i[0] = 0; for (i = 0; i < nrows_A; i++) { if (exp_indices[i] >= 0) { for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { col = A_diag_j[j]; if (exp_indices[col] >= 0) { diag_j[nnz] = exp_indices[col]; diag_a[nnz++] = A_diag_a[j]; } } row++; diag_i[row] = nnz; } } diag = hypre_ParCSRMatrixDiag(A11_csr); hypre_CSRMatrixI(diag) = diag_i; hypre_CSRMatrixJ(diag) = diag_j; hypre_CSRMatrixData(diag) = diag_a; offd_i = hypre_CTAlloc(HYPRE_Int, nrows+1, HYPRE_MEMORY_HOST); offd_j = hypre_CTAlloc(HYPRE_Int, nnz_offd, HYPRE_MEMORY_HOST); offd_a = hypre_CTAlloc(HYPRE_Complex, nnz_offd, HYPRE_MEMORY_HOST); nnz = 0; row = 0; offd_i[0] = 0; for (i = 0; i < nrows_A; i++) { if (exp_indices[i] >= 0) { for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++) { offd_j[nnz] = A_offd_j[j]; offd_a[nnz++] = A_diag_a[j]; } row++; offd_i[row] = nnz; } } offd = hypre_ParCSRMatrixOffd(A11_csr); hypre_CSRMatrixI(offd) = offd_i; hypre_CSRMatrixJ(offd) = offd_j; hypre_CSRMatrixData(offd) = offd_a; /* ----------------------------------------------------- * create A21 matrix * ----------------------------------------------------- */ ncols_offd = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixDiag(A_csr)); nnz_offd = nnz21_offd; nnz_diag = nnz21; global_nrows = (HYPRE_BigInt)proc_offsets2[nprocs]; itmp_array = hypre_ParCSRMatrixColStarts(A_csr); global_ncols = itmp_array[nprocs]; row_starts = hypre_CTAlloc(HYPRE_BigInt, nprocs+1, HYPRE_MEMORY_HOST); col_starts = hypre_CTAlloc(HYPRE_BigInt, nprocs+1, HYPRE_MEMORY_HOST); for (i = 0; i <= nprocs; i++) { row_starts[i] = (HYPRE_BigInt)proc_offsets2[i]; col_starts[i] = itmp_array[i]; } A21_csr = hypre_ParCSRMatrixCreate(comm, global_nrows, global_ncols, row_starts, col_starts, ncols_offd, nnz_diag, nnz_offd); nrows = nrows_A - nindices; diag_i = hypre_CTAlloc(HYPRE_Int, nrows+1, HYPRE_MEMORY_HOST); diag_j = hypre_CTAlloc(HYPRE_Int, nnz_diag, HYPRE_MEMORY_HOST); diag_a = hypre_CTAlloc(HYPRE_Complex, nnz_diag, HYPRE_MEMORY_HOST); nnz = 0; row = 0; diag_i[0] = 0; for (i = 0; i < nrows_A; i++) { if (exp_indices[i] < 0) { for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { diag_j[nnz] = A_diag_j[j]; diag_a[nnz++] = A_diag_a[j]; } row++; diag_i[row] = nnz; } } diag = hypre_ParCSRMatrixDiag(A21_csr); hypre_CSRMatrixI(diag) = diag_i; hypre_CSRMatrixJ(diag) = diag_j; hypre_CSRMatrixData(diag) = diag_a; offd_i = hypre_CTAlloc(HYPRE_Int, nrows+1, HYPRE_MEMORY_HOST); offd_j = hypre_CTAlloc(HYPRE_Int, nnz_offd, HYPRE_MEMORY_HOST); offd_a = hypre_CTAlloc(HYPRE_Complex, nnz_offd, HYPRE_MEMORY_HOST); nnz = 0; row = 0; offd_i[0] = 0; for (i = 0; i < nrows_A; i++) { if (exp_indices[i] < 0) { for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++) { offd_j[nnz] = A_offd_j[j]; offd_a[nnz++] = A_diag_a[j]; } row++; offd_i[row] = nnz; } } offd = hypre_ParCSRMatrixOffd(A21_csr); hypre_CSRMatrixI(offd) = offd_i; hypre_CSRMatrixJ(offd) = offd_j; hypre_CSRMatrixData(offd) = offd_a; /* ----------------------------------------------------- * hand the matrices back to the caller and clean up * ----------------------------------------------------- */ (*submatrices)[0] = A11_csr; (*submatrices)[1] = A21_csr; hypre_TFree(proc_offsets1, HYPRE_MEMORY_HOST); hypre_TFree(proc_offsets2, HYPRE_MEMORY_HOST); hypre_TFree(exp_indices, HYPRE_MEMORY_HOST); } /* ----------------------------------------------------------------------------- * return the sum of all local elements of the matrix * ----------------------------------------------------------------------------- */ HYPRE_Complex hypre_ParCSRMatrixLocalSumElts( hypre_ParCSRMatrix * A ) { hypre_CSRMatrix * A_diag = hypre_ParCSRMatrixDiag( A ); hypre_CSRMatrix * A_offd = hypre_ParCSRMatrixOffd( A ); return hypre_CSRMatrixSumElts(A_diag) + hypre_CSRMatrixSumElts(A_offd); } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixMatAminvDB * computes C = (A - inv(D)B) where D is a diagonal matrix * Note: Data structure of A is expected to be a subset of data structure of B! *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixAminvDB( hypre_ParCSRMatrix *A, hypre_ParCSRMatrix *B, HYPRE_Complex *d, hypre_ParCSRMatrix **C_ptr) { MPI_Comm comm = hypre_ParCSRMatrixComm(B); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); hypre_ParCSRMatrix *C = NULL; HYPRE_Int num_cols_offd_A = hypre_CSRMatrixNumCols(A_offd); hypre_ParCSRCommPkg *comm_pkg_B = hypre_ParCSRMatrixCommPkg(B); hypre_CSRMatrix *B_diag = hypre_ParCSRMatrixDiag(B); hypre_CSRMatrix *B_offd = hypre_ParCSRMatrixOffd(B); HYPRE_Int num_cols_offd_B = hypre_CSRMatrixNumCols(B_offd); HYPRE_Int num_sends_B, num_recvs_B; HYPRE_Int i, j, cnt; HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); HYPRE_Complex *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Complex *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A); HYPRE_Int num_rows = hypre_CSRMatrixNumRows(B_diag); HYPRE_Int *B_diag_i = hypre_CSRMatrixI(B_diag); HYPRE_Int *B_diag_j = hypre_CSRMatrixJ(B_diag); HYPRE_Complex *B_diag_data = hypre_CSRMatrixData(B_diag); HYPRE_Int *B_offd_i = hypre_CSRMatrixI(B_offd); HYPRE_Int *B_offd_j = hypre_CSRMatrixJ(B_offd); HYPRE_Complex *B_offd_data = hypre_CSRMatrixData(B_offd); HYPRE_BigInt *col_map_offd_B = hypre_ParCSRMatrixColMapOffd(B); hypre_CSRMatrix *C_diag = NULL; hypre_CSRMatrix *C_offd = NULL; HYPRE_Int *C_diag_i = NULL; HYPRE_Int *C_diag_j = NULL; HYPRE_Complex *C_diag_data = NULL; HYPRE_Int *C_offd_i = NULL; HYPRE_Int *C_offd_j = NULL; HYPRE_Complex *C_offd_data = NULL; HYPRE_Int num_procs, my_id; HYPRE_Int *recv_procs_B; HYPRE_Int *send_procs_B; HYPRE_Int *recv_vec_starts_B; HYPRE_Int *send_map_starts_B; HYPRE_Int *send_map_elmts_B; hypre_ParCSRCommPkg *comm_pkg_C; HYPRE_Int *recv_procs_C; HYPRE_Int *send_procs_C; HYPRE_Int *recv_vec_starts_C; HYPRE_Int *send_map_starts_C; HYPRE_Int *send_map_elmts_C; HYPRE_Int *map_to_B; /*HYPRE_Int *C_diag_array; HYPRE_Int *C_offd_array;*/ HYPRE_Complex *D_tmp; HYPRE_Int size, rest, num_threads, ii; hypre_MPI_Comm_size(comm,&num_procs); hypre_MPI_Comm_rank(comm,&my_id); num_threads = hypre_NumThreads(); /*C_diag_array = hypre_CTAlloc(HYPRE_Int, num_threads); C_offd_array = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);*/ /*--------------------------------------------------------------------- * If there exists no CommPkg for B, a CommPkg is generated *--------------------------------------------------------------------*/ if (!comm_pkg_B) { hypre_MatvecCommPkgCreate(B); comm_pkg_B = hypre_ParCSRMatrixCommPkg(B); } C = hypre_ParCSRMatrixClone(B, 0); /*hypre_ParCSRMatrixInitialize(C);*/ C_diag = hypre_ParCSRMatrixDiag(C); C_diag_i = hypre_CSRMatrixI(C_diag); C_diag_j = hypre_CSRMatrixJ(C_diag); C_diag_data = hypre_CSRMatrixData(C_diag); C_offd = hypre_ParCSRMatrixOffd(C); C_offd_i = hypre_CSRMatrixI(C_offd); C_offd_j = hypre_CSRMatrixJ(C_offd); C_offd_data = hypre_CSRMatrixData(C_offd); size = num_rows/num_threads; rest = num_rows - size*num_threads; D_tmp = hypre_CTAlloc(HYPRE_Complex, num_rows, HYPRE_MEMORY_HOST); if (num_cols_offd_A) { map_to_B = hypre_CTAlloc(HYPRE_Int, num_cols_offd_A, HYPRE_MEMORY_HOST); cnt = 0; for (i=0; i < num_cols_offd_A; i++) { while (col_map_offd_B[cnt] < col_map_offd_A[i]) { cnt++; } map_to_B[i] = cnt; cnt++; } } #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(ii, i, j) #endif for (ii=0; ii < num_threads; ii++) { HYPRE_Int *A_marker = NULL; HYPRE_Int ns, ne, A_col, num_cols, nmax; if (ii < rest) { ns = ii*size+ii; ne = (ii+1)*size+ii+1; } else { ns = ii*size+rest; ne = (ii+1)*size+rest; } nmax = hypre_max(num_rows, num_cols_offd_B); A_marker = hypre_CTAlloc(HYPRE_Int, nmax, HYPRE_MEMORY_HOST); for (i=0; i < num_rows; i++) A_marker[i] = -1; for (i=ns; i < ne; i++) D_tmp[i] = 1.0/d[i]; num_cols = C_diag_i[ns]; for (i=ns; i < ne; i++) { for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { A_col = A_diag_j[j]; if (A_marker[A_col] < C_diag_i[i]) { A_marker[A_col] = num_cols; C_diag_j[num_cols] = A_col; C_diag_data[num_cols] = A_diag_data[j]; num_cols++; } else { C_diag_data[A_marker[A_col]] += A_diag_data[j]; } } for (j = B_diag_i[i]; j < B_diag_i[i+1]; j++) { A_col = B_diag_j[j]; if (A_marker[A_col] < C_diag_i[i]) { A_marker[A_col] = num_cols; C_diag_j[num_cols] = A_col; C_diag_data[num_cols] = -D_tmp[i]*B_diag_data[j]; num_cols++; } else { C_diag_data[A_marker[A_col]] -= D_tmp[i]*B_diag_data[j]; } } } for (i=0; i < num_cols_offd_B; i++) A_marker[i] = -1; num_cols = C_offd_i[ns]; for (i=ns; i < ne; i++) { for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++) { A_col = map_to_B[A_offd_j[j]]; if (A_marker[A_col] < B_offd_i[i]) { A_marker[A_col] = num_cols; C_offd_j[num_cols] = A_col; C_offd_data[num_cols] = A_offd_data[j]; num_cols++; } else { C_offd_data[A_marker[A_col]] += A_offd_data[j]; } } for (j = B_offd_i[i]; j < B_offd_i[i+1]; j++) { A_col = B_offd_j[j]; if (A_marker[A_col] < B_offd_i[i]) { A_marker[A_col] = num_cols; C_offd_j[num_cols] = A_col; C_offd_data[num_cols] = -D_tmp[i]*B_offd_data[j]; num_cols++; } else { C_offd_data[A_marker[A_col]] -= D_tmp[i]*B_offd_data[j]; } } } hypre_TFree(A_marker, HYPRE_MEMORY_HOST); } /* end parallel region */ /*for (i=0; i < num_cols_offd_B; i++) col_map_offd_C[i] = col_map_offd_B[i]; */ num_sends_B = hypre_ParCSRCommPkgNumSends(comm_pkg_B); num_recvs_B = hypre_ParCSRCommPkgNumRecvs(comm_pkg_B); recv_procs_B = hypre_ParCSRCommPkgRecvProcs(comm_pkg_B); recv_vec_starts_B = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_B); send_procs_B = hypre_ParCSRCommPkgSendProcs(comm_pkg_B); send_map_starts_B = hypre_ParCSRCommPkgSendMapStarts(comm_pkg_B); send_map_elmts_B = hypre_ParCSRCommPkgSendMapElmts(comm_pkg_B); recv_procs_C = hypre_CTAlloc(HYPRE_Int, num_recvs_B, HYPRE_MEMORY_HOST); recv_vec_starts_C = hypre_CTAlloc(HYPRE_Int, num_recvs_B+1, HYPRE_MEMORY_HOST); send_procs_C = hypre_CTAlloc(HYPRE_Int, num_sends_B, HYPRE_MEMORY_HOST); send_map_starts_C = hypre_CTAlloc(HYPRE_Int, num_sends_B+1, HYPRE_MEMORY_HOST); send_map_elmts_C = hypre_CTAlloc(HYPRE_Int, send_map_starts_B[num_sends_B], HYPRE_MEMORY_HOST); for (i=0; i < num_recvs_B; i++) recv_procs_C[i] = recv_procs_B[i]; for (i=0; i < num_recvs_B+1; i++) recv_vec_starts_C[i] = recv_vec_starts_B[i]; for (i=0; i < num_sends_B; i++) send_procs_C[i] = send_procs_B[i]; for (i=0; i < num_sends_B+1; i++) send_map_starts_C[i] = send_map_starts_B[i]; for (i=0; i < send_map_starts_B[num_sends_B]; i++) send_map_elmts_C[i] = send_map_elmts_B[i]; comm_pkg_C = hypre_CTAlloc(hypre_ParCSRCommPkg, 1, HYPRE_MEMORY_HOST); hypre_ParCSRCommPkgComm(comm_pkg_C) = comm; hypre_ParCSRCommPkgNumRecvs(comm_pkg_C) = num_recvs_B; hypre_ParCSRCommPkgRecvProcs(comm_pkg_C) = recv_procs_C; hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_C) = recv_vec_starts_C; hypre_ParCSRCommPkgNumSends(comm_pkg_C) = num_sends_B; hypre_ParCSRCommPkgSendProcs(comm_pkg_C) = send_procs_C; hypre_ParCSRCommPkgSendMapStarts(comm_pkg_C) = send_map_starts_C; hypre_ParCSRCommPkgSendMapElmts(comm_pkg_C) = send_map_elmts_C; hypre_ParCSRMatrixCommPkg(C) = comm_pkg_C; hypre_TFree(D_tmp, HYPRE_MEMORY_HOST); if (num_cols_offd_A) hypre_TFree(map_to_B, HYPRE_MEMORY_HOST); *C_ptr = C; return (hypre_error_flag); } /*-------------------------------------------------------------------------- * hypre_ParTMatmul : multiplies two ParCSRMatrices transpose(A) and B and returns * the product in ParCSRMatrix C * Note that C does not own the partitionings since its row_starts * is owned by A and col_starts by B. *--------------------------------------------------------------------------*/ hypre_ParCSRMatrix *hypre_ParTMatmul( hypre_ParCSRMatrix *A, hypre_ParCSRMatrix *B) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg_A = hypre_ParCSRMatrixCommPkg(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); hypre_CSRMatrix *AT_diag = NULL; hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); hypre_CSRMatrix *AT_offd = NULL; HYPRE_Int num_rows_diag_A = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int num_cols_diag_A = hypre_CSRMatrixNumCols(A_diag); hypre_CSRMatrix *B_diag = hypre_ParCSRMatrixDiag(B); hypre_CSRMatrix *B_offd = hypre_ParCSRMatrixOffd(B); HYPRE_BigInt *col_map_offd_B = hypre_ParCSRMatrixColMapOffd(B); HYPRE_BigInt first_col_diag_B = hypre_ParCSRMatrixFirstColDiag(B); HYPRE_BigInt *col_starts_A = hypre_ParCSRMatrixColStarts(A); HYPRE_BigInt *col_starts_B = hypre_ParCSRMatrixColStarts(B); HYPRE_Int num_rows_diag_B = hypre_CSRMatrixNumRows(B_diag); HYPRE_Int num_cols_diag_B = hypre_CSRMatrixNumCols(B_diag); HYPRE_Int num_cols_offd_B = hypre_CSRMatrixNumCols(B_offd); hypre_ParCSRMatrix *C; HYPRE_BigInt *col_map_offd_C = NULL; HYPRE_Int *map_B_to_C; hypre_CSRMatrix *C_diag = NULL; hypre_CSRMatrix *C_tmp_diag = NULL; HYPRE_Complex *C_diag_data = NULL; HYPRE_Int *C_diag_i = NULL; HYPRE_Int *C_diag_j = NULL; HYPRE_BigInt first_col_diag_C; HYPRE_BigInt last_col_diag_C; hypre_CSRMatrix *C_offd = NULL; hypre_CSRMatrix *C_tmp_offd = NULL; hypre_CSRMatrix *C_int = NULL; hypre_CSRMatrix *C_ext = NULL; HYPRE_Int *C_ext_i; HYPRE_BigInt *C_ext_j; HYPRE_Complex *C_ext_data; HYPRE_Int *C_ext_diag_i; HYPRE_Int *C_ext_diag_j; HYPRE_Complex *C_ext_diag_data; HYPRE_Int *C_ext_offd_i; HYPRE_Int *C_ext_offd_j; HYPRE_Complex *C_ext_offd_data; HYPRE_Int C_ext_size = 0; HYPRE_Int C_ext_diag_size = 0; HYPRE_Int C_ext_offd_size = 0; HYPRE_Int *C_tmp_diag_i; HYPRE_Int *C_tmp_diag_j; HYPRE_Complex *C_tmp_diag_data; HYPRE_Int *C_tmp_offd_i; HYPRE_Int *C_tmp_offd_j; HYPRE_Complex *C_tmp_offd_data; HYPRE_Complex *C_offd_data=NULL; HYPRE_Int *C_offd_i=NULL; HYPRE_Int *C_offd_j=NULL; HYPRE_BigInt *temp; HYPRE_Int *send_map_starts_A; HYPRE_Int *send_map_elmts_A; HYPRE_Int num_sends_A; HYPRE_Int num_cols_offd_C = 0; HYPRE_Int *P_marker; HYPRE_Int i, j; HYPRE_Int i1, j_indx; HYPRE_BigInt n_rows_A, n_cols_A; HYPRE_BigInt n_rows_B, n_cols_B; /*HYPRE_Int allsquare = 0;*/ HYPRE_Int cnt, cnt_offd, cnt_diag; HYPRE_BigInt value; HYPRE_Int num_procs, my_id; HYPRE_Int max_num_threads; HYPRE_Int *C_diag_array = NULL; HYPRE_Int *C_offd_array = NULL; HYPRE_BigInt first_row_index, first_col_diag; HYPRE_Int local_num_rows, local_num_cols; n_rows_A = hypre_ParCSRMatrixGlobalNumRows(A); n_cols_A = hypre_ParCSRMatrixGlobalNumCols(A); n_rows_B = hypre_ParCSRMatrixGlobalNumRows(B); n_cols_B = hypre_ParCSRMatrixGlobalNumCols(B); hypre_MPI_Comm_size(comm,&num_procs); hypre_MPI_Comm_rank(comm, &my_id); max_num_threads = hypre_NumThreads(); if (n_rows_A != n_rows_B || num_rows_diag_A != num_rows_diag_B) { hypre_error_w_msg(HYPRE_ERROR_GENERIC," Error! Incompatible matrix dimensions!\n"); return NULL; } HYPRE_MemoryLocation memory_location_A = hypre_ParCSRMatrixMemoryLocation(A); HYPRE_MemoryLocation memory_location_B = hypre_ParCSRMatrixMemoryLocation(B); /* RL: TODO cannot guarantee, maybe should never assert hypre_assert(memory_location_A == memory_location_B); */ /* RL: in the case of A=H, B=D, or A=D, B=H, let C = D, * not sure if this is the right thing to do. * Also, need something like this in other places * TODO */ HYPRE_MemoryLocation memory_location_C = hypre_max(memory_location_A, memory_location_B); /*if (num_cols_diag_A == num_cols_diag_B) allsquare = 1;*/ hypre_CSRMatrixTranspose(A_diag, &AT_diag, 1); hypre_CSRMatrixTranspose(A_offd, &AT_offd, 1); C_tmp_diag = hypre_CSRMatrixMultiply(AT_diag, B_diag); C_ext_size = 0; if (num_procs > 1) { hypre_CSRMatrix *C_int_diag; hypre_CSRMatrix *C_int_offd; void *request; C_tmp_offd = hypre_CSRMatrixMultiply(AT_diag, B_offd); C_int_diag = hypre_CSRMatrixMultiply(AT_offd, B_diag); C_int_offd = hypre_CSRMatrixMultiply(AT_offd, B_offd); hypre_ParCSRMatrixDiag(B) = C_int_diag; hypre_ParCSRMatrixOffd(B) = C_int_offd; C_int = hypre_MergeDiagAndOffd(B); hypre_ParCSRMatrixDiag(B) = B_diag; hypre_ParCSRMatrixOffd(B) = B_offd; hypre_ExchangeExternalRowsInit(C_int, comm_pkg_A, &request); C_ext = hypre_ExchangeExternalRowsWait(request); C_ext_i = hypre_CSRMatrixI(C_ext); C_ext_j = hypre_CSRMatrixBigJ(C_ext); C_ext_data = hypre_CSRMatrixData(C_ext); C_ext_size = C_ext_i[hypre_CSRMatrixNumRows(C_ext)]; hypre_CSRMatrixDestroy(C_int); hypre_CSRMatrixDestroy(C_int_diag); hypre_CSRMatrixDestroy(C_int_offd); } else { C_tmp_offd = hypre_CSRMatrixCreate(num_cols_diag_A, 0, 0); hypre_CSRMatrixInitialize(C_tmp_offd); } hypre_CSRMatrixDestroy(AT_diag); hypre_CSRMatrixDestroy(AT_offd); /*----------------------------------------------------------------------- * Add contents of C_ext to C_tmp_diag and C_tmp_offd * to obtain C_diag and C_offd *-----------------------------------------------------------------------*/ /* check for new nonzero columns in C_offd generated through C_ext */ first_col_diag_C = first_col_diag_B; last_col_diag_C = first_col_diag_B + (HYPRE_BigInt)num_cols_diag_B - 1; C_tmp_diag_i = hypre_CSRMatrixI(C_tmp_diag); if (C_ext_size || num_cols_offd_B) { HYPRE_Int C_ext_num_rows; num_sends_A = hypre_ParCSRCommPkgNumSends(comm_pkg_A); send_map_starts_A = hypre_ParCSRCommPkgSendMapStarts(comm_pkg_A); send_map_elmts_A = hypre_ParCSRCommPkgSendMapElmts(comm_pkg_A); C_ext_num_rows = send_map_starts_A[num_sends_A]; C_ext_diag_i = hypre_CTAlloc(HYPRE_Int, C_ext_num_rows+1, HYPRE_MEMORY_HOST); C_ext_offd_i = hypre_CTAlloc(HYPRE_Int, C_ext_num_rows+1, HYPRE_MEMORY_HOST); temp = hypre_CTAlloc(HYPRE_BigInt, C_ext_size+num_cols_offd_B, HYPRE_MEMORY_HOST); C_ext_diag_size = 0; C_ext_offd_size = 0; for (i=0; i < C_ext_num_rows; i++) { for (j=C_ext_i[i]; j < C_ext_i[i+1]; j++) if (C_ext_j[j] < first_col_diag_C || C_ext_j[j] > last_col_diag_C) temp[C_ext_offd_size++] = C_ext_j[j]; else C_ext_diag_size++; C_ext_diag_i[i+1] = C_ext_diag_size; C_ext_offd_i[i+1] = C_ext_offd_size; } cnt = C_ext_offd_size; for (i=0; i < num_cols_offd_B; i++) temp[cnt++] = col_map_offd_B[i]; if (cnt) { hypre_BigQsort0(temp,0,cnt-1); value = temp[0]; num_cols_offd_C = 1; for (i=1; i < cnt; i++) { if (temp[i] > value) { value = temp[i]; temp[num_cols_offd_C++] = value; } } } if (num_cols_offd_C) col_map_offd_C = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd_C, HYPRE_MEMORY_HOST); for (i=0; i < num_cols_offd_C; i++) col_map_offd_C[i] = temp[i]; hypre_TFree(temp, HYPRE_MEMORY_HOST); if (C_ext_diag_size) { C_ext_diag_j = hypre_CTAlloc(HYPRE_Int, C_ext_diag_size, HYPRE_MEMORY_HOST); C_ext_diag_data = hypre_CTAlloc(HYPRE_Complex, C_ext_diag_size, HYPRE_MEMORY_HOST); } if (C_ext_offd_size) { C_ext_offd_j = hypre_CTAlloc(HYPRE_Int, C_ext_offd_size, HYPRE_MEMORY_HOST); C_ext_offd_data = hypre_CTAlloc(HYPRE_Complex, C_ext_offd_size, HYPRE_MEMORY_HOST); } C_tmp_diag_j = hypre_CSRMatrixJ(C_tmp_diag); C_tmp_diag_data = hypre_CSRMatrixData(C_tmp_diag); C_tmp_offd_i = hypre_CSRMatrixI(C_tmp_offd); C_tmp_offd_j = hypre_CSRMatrixJ(C_tmp_offd); C_tmp_offd_data = hypre_CSRMatrixData(C_tmp_offd); cnt_offd = 0; cnt_diag = 0; for (i=0; i < C_ext_num_rows; i++) { for (j=C_ext_i[i]; j < C_ext_i[i+1]; j++) if (C_ext_j[j] < first_col_diag_C || C_ext_j[j] > last_col_diag_C) { C_ext_offd_j[cnt_offd] = hypre_BigBinarySearch(col_map_offd_C, C_ext_j[j], num_cols_offd_C); C_ext_offd_data[cnt_offd++] = C_ext_data[j]; } else { C_ext_diag_j[cnt_diag] = (HYPRE_Int)(C_ext_j[j] - first_col_diag_C); C_ext_diag_data[cnt_diag++] = C_ext_data[j]; } } } if (C_ext) { hypre_CSRMatrixDestroy(C_ext); C_ext = NULL; } if (num_cols_offd_B) { map_B_to_C = hypre_CTAlloc(HYPRE_Int, num_cols_offd_B, HYPRE_MEMORY_HOST); cnt = 0; for (i=0; i < num_cols_offd_C; i++) if (col_map_offd_C[i] == col_map_offd_B[cnt]) { map_B_to_C[cnt++] = i; if (cnt == num_cols_offd_B) break; } for (i=0; i < hypre_CSRMatrixI(C_tmp_offd)[hypre_CSRMatrixNumRows(C_tmp_offd)]; i++) { j_indx = C_tmp_offd_j[i]; C_tmp_offd_j[i] = map_B_to_C[j_indx]; } } /*----------------------------------------------------------------------- * Need to compute C_diag = C_tmp_diag + C_ext_diag * and C_offd = C_tmp_offd + C_ext_offd !!!! * First generate structure *-----------------------------------------------------------------------*/ if (C_ext_size || num_cols_offd_B) { C_diag_i = hypre_CTAlloc(HYPRE_Int, num_cols_diag_A+1, memory_location_C); C_offd_i = hypre_CTAlloc(HYPRE_Int, num_cols_diag_A+1, memory_location_C); C_diag_array = hypre_CTAlloc(HYPRE_Int, max_num_threads, HYPRE_MEMORY_HOST); C_offd_array = hypre_CTAlloc(HYPRE_Int, max_num_threads, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel #endif { HYPRE_Int *B_marker = NULL; HYPRE_Int *B_marker_offd = NULL; HYPRE_Int ik, jk, j1, j2, jcol; HYPRE_Int ns, ne, ii, nnz_d, nnz_o; HYPRE_Int rest, size; HYPRE_Int num_threads = hypre_NumActiveThreads(); size = num_cols_diag_A/num_threads; rest = num_cols_diag_A - size*num_threads; ii = hypre_GetThreadNum(); if (ii < rest) { ns = ii*size+ii; ne = (ii+1)*size+ii+1; } else { ns = ii*size+rest; ne = (ii+1)*size+rest; } B_marker = hypre_CTAlloc(HYPRE_Int, num_cols_diag_B, HYPRE_MEMORY_HOST); B_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd_C, HYPRE_MEMORY_HOST); for (ik = 0; ik < num_cols_diag_B; ik++) B_marker[ik] = -1; for (ik = 0; ik < num_cols_offd_C; ik++) B_marker_offd[ik] = -1; nnz_d = 0; nnz_o = 0; for (ik = ns; ik < ne; ik++) { for (jk = C_tmp_diag_i[ik]; jk < C_tmp_diag_i[ik+1]; jk++) { jcol = C_tmp_diag_j[jk]; B_marker[jcol] = ik; nnz_d++; } for (jk = C_tmp_offd_i[ik]; jk < C_tmp_offd_i[ik+1]; jk++) { jcol = C_tmp_offd_j[jk]; B_marker_offd[jcol] = ik; nnz_o++; } for (jk = 0; jk < num_sends_A; jk++) for (j1 = send_map_starts_A[jk]; j1 < send_map_starts_A[jk+1]; j1++) if (send_map_elmts_A[j1] == ik) { for (j2 = C_ext_diag_i[j1]; j2 < C_ext_diag_i[j1+1]; j2++) { jcol = C_ext_diag_j[j2]; if (B_marker[jcol] < ik) { B_marker[jcol] = ik; nnz_d++; } } for (j2 = C_ext_offd_i[j1]; j2 < C_ext_offd_i[j1+1]; j2++) { jcol = C_ext_offd_j[j2]; if (B_marker_offd[jcol] < ik) { B_marker_offd[jcol] = ik; nnz_o++; } } break; } C_diag_array[ii] = nnz_d; C_offd_array[ii] = nnz_o; } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (ii == 0) { nnz_d = 0; nnz_o = 0; for (ik = 0; ik < num_threads-1; ik++) { C_diag_array[ik+1] += C_diag_array[ik]; C_offd_array[ik+1] += C_offd_array[ik]; } nnz_d = C_diag_array[num_threads-1]; nnz_o = C_offd_array[num_threads-1]; C_diag_i[num_cols_diag_A] = nnz_d; C_offd_i[num_cols_diag_A] = nnz_o; C_diag = hypre_CSRMatrixCreate(num_cols_diag_A, num_cols_diag_A, nnz_d); C_offd = hypre_CSRMatrixCreate(num_cols_diag_A, num_cols_offd_C, nnz_o); hypre_CSRMatrixI(C_diag) = C_diag_i; hypre_CSRMatrixInitialize_v2(C_diag, 0, memory_location_C); C_diag_j = hypre_CSRMatrixJ(C_diag); C_diag_data = hypre_CSRMatrixData(C_diag); hypre_CSRMatrixI(C_offd) = C_offd_i; hypre_CSRMatrixInitialize_v2(C_offd, 0, memory_location_C); C_offd_j = hypre_CSRMatrixJ(C_offd); C_offd_data = hypre_CSRMatrixData(C_offd); } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif /*----------------------------------------------------------------------- * Need to compute C_diag = C_tmp_diag + C_ext_diag * and C_offd = C_tmp_offd + C_ext_offd !!!! * Now fill in values *-----------------------------------------------------------------------*/ for (ik = 0; ik < num_cols_diag_B; ik++) B_marker[ik] = -1; for (ik = 0; ik < num_cols_offd_C; ik++) B_marker_offd[ik] = -1; /*----------------------------------------------------------------------- * Populate matrices *-----------------------------------------------------------------------*/ nnz_d = 0; nnz_o = 0; nnz_o = 0; if (ii) { nnz_d = C_diag_array[ii-1]; nnz_o = C_offd_array[ii-1]; } for (ik = ns; ik < ne; ik++) { C_diag_i[ik] = nnz_d; C_offd_i[ik] = nnz_o; for (jk = C_tmp_diag_i[ik]; jk < C_tmp_diag_i[ik+1]; jk++) { jcol = C_tmp_diag_j[jk]; C_diag_j[nnz_d] = jcol; C_diag_data[nnz_d] = C_tmp_diag_data[jk]; B_marker[jcol] = nnz_d; nnz_d++; } for (jk = C_tmp_offd_i[ik]; jk < C_tmp_offd_i[ik+1]; jk++) { jcol = C_tmp_offd_j[jk]; C_offd_j[nnz_o] = jcol; C_offd_data[nnz_o] = C_tmp_offd_data[jk]; B_marker_offd[jcol] = nnz_o; nnz_o++; } for (jk = 0; jk < num_sends_A; jk++) for (j1 = send_map_starts_A[jk]; j1 < send_map_starts_A[jk+1]; j1++) if (send_map_elmts_A[j1] == ik) { for (j2 = C_ext_diag_i[j1]; j2 < C_ext_diag_i[j1+1]; j2++) { jcol = C_ext_diag_j[j2]; if (B_marker[jcol] < C_diag_i[ik]) { C_diag_j[nnz_d] = jcol; C_diag_data[nnz_d] = C_ext_diag_data[j2]; B_marker[jcol] = nnz_d; nnz_d++; } else C_diag_data[B_marker[jcol]] += C_ext_diag_data[j2]; } for (j2 = C_ext_offd_i[j1]; j2 < C_ext_offd_i[j1+1]; j2++) { jcol = C_ext_offd_j[j2]; if (B_marker_offd[jcol] < C_offd_i[ik]) { C_offd_j[nnz_o] = jcol; C_offd_data[nnz_o] = C_ext_offd_data[j2]; B_marker_offd[jcol] = nnz_o; nnz_o++; } else C_offd_data[B_marker_offd[jcol]] += C_ext_offd_data[j2]; } break; } } hypre_TFree(B_marker, HYPRE_MEMORY_HOST); hypre_TFree(B_marker_offd, HYPRE_MEMORY_HOST); } /*end parallel region */ hypre_TFree(C_diag_array, HYPRE_MEMORY_HOST); hypre_TFree(C_offd_array, HYPRE_MEMORY_HOST); } /*C = hypre_ParCSRMatrixCreate(comm, n_cols_A, n_cols_B, col_starts_A, col_starts_B, num_cols_offd_C, nnz_diag, nnz_offd); hypre_CSRMatrixDestroy(hypre_ParCSRMatrixDiag(C)); hypre_CSRMatrixDestroy(hypre_ParCSRMatrixOffd(C)); */ /* row_starts[0] is start of local rows. row_starts[1] is start of next processor's rows */ first_row_index = col_starts_A[0]; local_num_rows = (HYPRE_Int)(col_starts_A[1]-first_row_index ); first_col_diag = col_starts_B[0]; local_num_cols = (HYPRE_Int)(col_starts_B[1]-first_col_diag); C = hypre_CTAlloc(hypre_ParCSRMatrix, 1, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixComm(C) = comm; hypre_ParCSRMatrixGlobalNumRows(C) = n_cols_A; hypre_ParCSRMatrixGlobalNumCols(C) = n_cols_B; hypre_ParCSRMatrixFirstRowIndex(C) = first_row_index; hypre_ParCSRMatrixFirstColDiag(C) = first_col_diag; hypre_ParCSRMatrixLastRowIndex(C) = first_row_index + (HYPRE_BigInt)local_num_rows - 1; hypre_ParCSRMatrixLastColDiag(C) = first_col_diag + (HYPRE_BigInt)local_num_cols - 1; hypre_ParCSRMatrixColMapOffd(C) = NULL; hypre_ParCSRMatrixAssumedPartition(C) = NULL; hypre_ParCSRMatrixRowStarts(C) = col_starts_A; hypre_ParCSRMatrixColStarts(C) = col_starts_B; hypre_ParCSRMatrixCommPkg(C) = NULL; hypre_ParCSRMatrixCommPkgT(C) = NULL; /* set defaults */ hypre_ParCSRMatrixOwnsData(C) = 1; hypre_ParCSRMatrixRowindices(C) = NULL; hypre_ParCSRMatrixRowvalues(C) = NULL; hypre_ParCSRMatrixGetrowactive(C) = 0; /* Note that C does not own the partitionings */ hypre_ParCSRMatrixSetRowStartsOwner(C,0); hypre_ParCSRMatrixSetColStartsOwner(C,0); if (C_diag) { hypre_ParCSRMatrixDiag(C) = C_diag; } else { hypre_ParCSRMatrixDiag(C) = C_tmp_diag; } if (C_offd) { hypre_ParCSRMatrixOffd(C) = C_offd; } else { hypre_ParCSRMatrixOffd(C) = C_tmp_offd; } hypre_CSRMatrixMemoryLocation(hypre_ParCSRMatrixDiag(C)) = memory_location_C; hypre_CSRMatrixMemoryLocation(hypre_ParCSRMatrixOffd(C)) = memory_location_C; if (num_cols_offd_C) { HYPRE_Int jj_count_offd, nnz_offd; HYPRE_BigInt *new_col_map_offd_C = NULL; P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_offd_C, HYPRE_MEMORY_HOST); for (i=0; i < num_cols_offd_C; i++) { P_marker[i] = -1; } jj_count_offd = 0; nnz_offd = C_offd_i[num_cols_diag_A]; for (i=0; i < nnz_offd; i++) { i1 = C_offd_j[i]; if (P_marker[i1]) { P_marker[i1] = 0; jj_count_offd++; } } if (jj_count_offd < num_cols_offd_C) { new_col_map_offd_C = hypre_CTAlloc(HYPRE_BigInt, jj_count_offd, HYPRE_MEMORY_HOST); jj_count_offd = 0; for (i=0; i < num_cols_offd_C; i++) { if (!P_marker[i]) { P_marker[i] = jj_count_offd; new_col_map_offd_C[jj_count_offd++] = col_map_offd_C[i]; } } for (i=0; i < nnz_offd; i++) { i1 = C_offd_j[i]; C_offd_j[i] = P_marker[i1]; } num_cols_offd_C = jj_count_offd; hypre_TFree(col_map_offd_C, HYPRE_MEMORY_HOST); col_map_offd_C = new_col_map_offd_C; hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(C)) = num_cols_offd_C; } hypre_TFree(P_marker, HYPRE_MEMORY_HOST); } hypre_ParCSRMatrixColMapOffd(C) = col_map_offd_C; /*----------------------------------------------------------------------- * Free various arrays *-----------------------------------------------------------------------*/ if (C_ext_size || num_cols_offd_B) { hypre_TFree(C_ext_diag_i, HYPRE_MEMORY_HOST); hypre_TFree(C_ext_offd_i, HYPRE_MEMORY_HOST); } if (C_ext_diag_size) { hypre_TFree(C_ext_diag_j, HYPRE_MEMORY_HOST); hypre_TFree(C_ext_diag_data, HYPRE_MEMORY_HOST); } if (C_ext_offd_size) { hypre_TFree(C_ext_offd_j, HYPRE_MEMORY_HOST); hypre_TFree(C_ext_offd_data, HYPRE_MEMORY_HOST); } if (num_cols_offd_B) { hypre_TFree(map_B_to_C, HYPRE_MEMORY_HOST); } if (C_diag) { hypre_CSRMatrixDestroy(C_tmp_diag); } if (C_offd) { hypre_CSRMatrixDestroy(C_tmp_offd); } #if defined(HYPRE_USING_CUDA) if ( hypre_GetExecPolicy2(memory_location_A, memory_location_B) == HYPRE_EXEC_DEVICE ) { hypre_CSRMatrixMoveDiagFirstDevice(hypre_ParCSRMatrixDiag(C)); hypre_SyncCudaComputeStream(hypre_handle()); } #endif return C; } HYPRE_Int hypre_ParvecBdiagInvScal( hypre_ParVector *b, HYPRE_Int blockSize, hypre_ParVector **bs, hypre_ParCSRMatrix *A) { MPI_Comm comm = hypre_ParCSRMatrixComm(b); HYPRE_Int num_procs, my_id; hypre_MPI_Comm_rank(comm, &my_id); hypre_MPI_Comm_size(comm, &num_procs); HYPRE_Int i, j, s, block_start, block_end; HYPRE_BigInt nrow_global = hypre_ParVectorGlobalSize(b); HYPRE_BigInt first_row = hypre_ParVectorFirstIndex(b); HYPRE_BigInt last_row = hypre_ParVectorLastIndex(b); HYPRE_BigInt end_row = last_row + 1; /* one past-the-last */ HYPRE_BigInt first_row_block = first_row / (HYPRE_BigInt)(blockSize) * (HYPRE_BigInt)blockSize; HYPRE_BigInt end_row_block = hypre_min( (last_row / (HYPRE_BigInt)blockSize + 1) * (HYPRE_BigInt)blockSize, nrow_global ); hypre_assert(blockSize == A->bdiag_size); HYPRE_Complex *bdiaginv = A->bdiaginv; hypre_ParCSRCommPkg *comm_pkg = A->bdiaginv_comm_pkg; HYPRE_Complex *dense = bdiaginv; //for (i=first_row_block; i < end_row; i+=blockSize) ; //printf("===[%d %d), [ %d %d ) %d === \n", first_row, end_row, first_row_block, end_row_block, i); /* local vector of b */ hypre_Vector *b_local = hypre_ParVectorLocalVector(b); HYPRE_Complex *b_local_data = hypre_VectorData(b_local); /* number of sends (#procs) */ HYPRE_Int num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); /* number of rows to send */ HYPRE_Int num_rows_send = hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends); /* number of recvs (#procs) */ HYPRE_Int num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg); /* number of rows to recv */ HYPRE_Int num_rows_recv = hypre_ParCSRCommPkgRecvVecStart(comm_pkg, num_recvs); hypre_ParCSRCommHandle *comm_handle; j = 2; HYPRE_BigInt *part = hypre_TAlloc(HYPRE_BigInt, j, HYPRE_MEMORY_HOST); memcpy(part, hypre_ParVectorPartitioning(b), j*sizeof(HYPRE_BigInt)); hypre_ParVector *bnew = hypre_ParVectorCreate( hypre_ParVectorComm(b), hypre_ParVectorGlobalSize(b), part ); hypre_ParVectorInitialize(bnew); hypre_Vector *bnew_local = hypre_ParVectorLocalVector(bnew); HYPRE_Complex *bnew_local_data = hypre_VectorData(bnew_local); /* send and recv b */ HYPRE_Complex *send_b = hypre_TAlloc(HYPRE_Complex, num_rows_send, HYPRE_MEMORY_HOST); HYPRE_Complex *recv_b = hypre_TAlloc(HYPRE_Complex, num_rows_recv, HYPRE_MEMORY_HOST); for (i = 0; i < num_rows_send; i++) { j = hypre_ParCSRCommPkgSendMapElmt(comm_pkg, i); send_b[i] = b_local_data[j]; } comm_handle = hypre_ParCSRCommHandleCreate(1, comm_pkg, send_b, recv_b); /* ... */ hypre_ParCSRCommHandleDestroy(comm_handle); for (block_start = first_row_block; block_start < end_row_block; block_start += blockSize) { HYPRE_BigInt big_i; block_end = hypre_min(block_start + (HYPRE_BigInt)blockSize, nrow_global); s = (HYPRE_Int)(block_end - block_start); for (big_i = block_start; big_i < block_end; big_i++) { if (big_i < first_row || big_i >= end_row) { continue; } HYPRE_Int local_i = (HYPRE_Int)(big_i - first_row); HYPRE_Int block_i = (HYPRE_Int)(big_i - block_start); bnew_local_data[local_i] = 0.0; for (j = 0; j < s; j++) { HYPRE_BigInt global_rid = block_start + (HYPRE_BigInt)j; HYPRE_Complex val = dense[block_i + j*blockSize]; if (val == 0.0) { continue; } if (global_rid >= first_row && global_rid < end_row) { HYPRE_Int rid = (HYPRE_Int)(global_rid - first_row); bnew_local_data[local_i] += val * b_local_data[rid]; } else { HYPRE_Int rid; if (global_rid < first_row) { rid = (HYPRE_Int)(global_rid - first_row_block); } else { rid = (HYPRE_Int)(first_row - first_row_block + global_rid - end_row); } bnew_local_data[local_i] += val * recv_b[rid]; } } } dense += blockSize * blockSize; } hypre_TFree(send_b, HYPRE_MEMORY_HOST); hypre_TFree(recv_b, HYPRE_MEMORY_HOST); *bs = bnew; return hypre_error_flag; } /** * @brief Compute As = B^{-1}*A, where B is the block diagonal of A * @param[in] A : * @param[in] blockSize: block size * @param[out] B : * @return * @warning */ HYPRE_Int hypre_ParcsrBdiagInvScal( hypre_ParCSRMatrix *A, HYPRE_Int blockSize, hypre_ParCSRMatrix **As) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); HYPRE_Int num_procs, my_id; hypre_MPI_Comm_rank(comm, &my_id); hypre_MPI_Comm_size(comm, &num_procs); HYPRE_Int i, j, k, s; HYPRE_BigInt block_start, block_end; /* diag part of A */ hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_a = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); /* off-diag part of A */ hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_a = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A); HYPRE_Int nrow_local = hypre_CSRMatrixNumRows(A_diag); HYPRE_BigInt first_row = hypre_ParCSRMatrixFirstRowIndex(A); HYPRE_BigInt last_row = hypre_ParCSRMatrixLastRowIndex(A); HYPRE_BigInt end_row = first_row + (HYPRE_BigInt)nrow_local; /* one past-the-last */ HYPRE_Int ncol_local = hypre_CSRMatrixNumCols(A_diag); HYPRE_BigInt first_col = hypre_ParCSRMatrixFirstColDiag(A); /* HYPRE_Int last_col = hypre_ParCSRMatrixLastColDiag(A); */ HYPRE_BigInt end_col = first_col + (HYPRE_BigInt)ncol_local; HYPRE_BigInt nrow_global = hypre_ParCSRMatrixGlobalNumRows(A); HYPRE_BigInt ncol_global = hypre_ParCSRMatrixGlobalNumCols(A); HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(A); void *request; /* if square globally and locally */ HYPRE_Int square2 = (nrow_global == ncol_global) && (nrow_local == ncol_local) && (first_row == first_col); if (nrow_global != ncol_global) { hypre_printf("hypre_ParcsrBdiagInvScal: only support N_ROW == N_COL\n"); return hypre_error_flag; } /* in block diagonals, row range of the blocks this proc span */ HYPRE_BigInt first_row_block = first_row / (HYPRE_BigInt)blockSize * (HYPRE_BigInt)blockSize; HYPRE_BigInt end_row_block = hypre_min( (last_row / (HYPRE_BigInt)blockSize + 1) * (HYPRE_BigInt)blockSize, nrow_global ); HYPRE_Int num_blocks = (HYPRE_Int)(last_row / (HYPRE_BigInt)blockSize + 1 - first_row / (HYPRE_BigInt)blockSize); //for (i=first_row_block; i < end_row; i+=blockSize) ; //printf("===[%d %d), [ %d %d ) %d === \n", first_row, end_row, first_row_block, end_row_block, i); //return 0; /* number of external rows */ HYPRE_Int num_ext_rows = (HYPRE_Int)(end_row_block - first_row_block - (end_row - first_row)); HYPRE_BigInt *ext_indices; HYPRE_Int A_ext_nnz; hypre_CSRMatrix *A_ext = NULL; HYPRE_Complex *A_ext_a = NULL; HYPRE_Int *A_ext_i = NULL; HYPRE_BigInt *A_ext_j = NULL; HYPRE_Real *dense_all = hypre_CTAlloc(HYPRE_Complex, num_blocks*blockSize*blockSize, HYPRE_MEMORY_HOST); HYPRE_Real *dense = dense_all; HYPRE_Int *IPIV = hypre_TAlloc(HYPRE_Int, blockSize, HYPRE_MEMORY_HOST); HYPRE_Complex *dgetri_work = NULL; HYPRE_Int dgetri_lwork = -1, lapack_info; HYPRE_Int num_cols_A_offd_new; HYPRE_BigInt *col_map_offd_A_new; HYPRE_BigInt big_i; HYPRE_Int *offd2new = NULL; HYPRE_Int *marker_diag, *marker_newoffd; HYPRE_Int nnz_diag = A_diag_i[nrow_local]; HYPRE_Int nnz_offd = A_offd_i[nrow_local]; HYPRE_Int nnz_diag_new = 0, nnz_offd_new = 0; HYPRE_Int *A_diag_i_new, *A_diag_j_new, *A_offd_i_new, *A_offd_j_new; HYPRE_Complex *A_diag_a_new, *A_offd_a_new; /* heuristic */ HYPRE_Int nnz_diag_alloc = 2 * nnz_diag; HYPRE_Int nnz_offd_alloc = 2 * nnz_offd; A_diag_i_new = hypre_CTAlloc(HYPRE_Int, nrow_local + 1, HYPRE_MEMORY_HOST); A_diag_j_new = hypre_CTAlloc(HYPRE_Int, nnz_diag_alloc, HYPRE_MEMORY_HOST); A_diag_a_new = hypre_CTAlloc(HYPRE_Complex, nnz_diag_alloc, HYPRE_MEMORY_HOST); A_offd_i_new = hypre_CTAlloc(HYPRE_Int, nrow_local + 1, HYPRE_MEMORY_HOST); A_offd_j_new = hypre_CTAlloc(HYPRE_Int, nnz_offd_alloc, HYPRE_MEMORY_HOST); A_offd_a_new = hypre_CTAlloc(HYPRE_Complex, nnz_offd_alloc, HYPRE_MEMORY_HOST); hypre_ParCSRMatrix *Anew; hypre_CSRMatrix *Anew_diag; hypre_CSRMatrix *Anew_offd; HYPRE_BigInt *row_starts_new, *col_starts_new; HYPRE_Real eps = 2.2e-16; /* Start with extracting the external rows */ HYPRE_BigInt *ext_offd; ext_indices = hypre_CTAlloc(HYPRE_BigInt, num_ext_rows, HYPRE_MEMORY_HOST); j = 0; for (big_i = first_row_block; big_i < first_row; big_i++) { ext_indices[j++] = big_i; } for (big_i = end_row; big_i < end_row_block; big_i++) { ext_indices[j++] = big_i; } hypre_assert(j == num_ext_rows); /* create CommPkg for external rows */ hypre_ParCSRFindExtendCommPkg(comm, nrow_global, first_row, nrow_local, row_starts, hypre_ParCSRMatrixAssumedPartition(A), num_ext_rows, ext_indices, &A->bdiaginv_comm_pkg); hypre_ParcsrGetExternalRowsInit(A, num_ext_rows, ext_indices, A->bdiaginv_comm_pkg, 1, &request); A_ext = hypre_ParcsrGetExternalRowsWait(request); hypre_TFree(ext_indices, HYPRE_MEMORY_HOST); A_ext_i = hypre_CSRMatrixI(A_ext); A_ext_j = hypre_CSRMatrixBigJ(A_ext); A_ext_a = hypre_CSRMatrixData(A_ext); A_ext_nnz = A_ext_i[num_ext_rows]; ext_offd = hypre_CTAlloc(HYPRE_BigInt, A_ext_nnz, HYPRE_MEMORY_HOST); /* fint the offd incides in A_ext */ for (i = 0, j = 0; i < A_ext_nnz; i++) { /* global index */ HYPRE_BigInt cid = A_ext_j[i]; /* keep the offd indices */ if (cid < first_col || cid >= end_col) { ext_offd[j++] = cid; } } /* remove duplicates after sorting (TODO better ways?) */ hypre_BigQsort0(ext_offd, 0, j-1); for (i = 0, k = 0; i < j; i++) { if (i == 0 || ext_offd[i] != ext_offd[i-1]) { ext_offd[k++] = ext_offd[i]; } } /* uniion these `k' new indices into col_map_offd_A */ col_map_offd_A_new = hypre_CTAlloc(HYPRE_BigInt, num_cols_A_offd + k, HYPRE_MEMORY_HOST); if (k) { /* map offd to offd_new */ offd2new = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); } hypre_union2(num_cols_A_offd, col_map_offd_A, k, ext_offd, &num_cols_A_offd_new, col_map_offd_A_new, offd2new, NULL); hypre_TFree(ext_offd, HYPRE_MEMORY_HOST); /* * adjust column indices in A_ext */ for (i = 0; i < A_ext_nnz; i++) { HYPRE_BigInt cid = A_ext_j[i]; if (cid < first_col || cid >= end_col) { j = hypre_BigBinarySearch(col_map_offd_A_new, cid, num_cols_A_offd_new); /* searching must succeed */ hypre_assert(j >= 0 && j < num_cols_A_offd_new); /* trick: save ncol_local + j back */ A_ext_j[i] = ncol_local + j; } else { /* save local index: [0, ncol_local-1] */ A_ext_j[i] = cid - first_col; } } /* marker for diag */ marker_diag = hypre_TAlloc(HYPRE_Int, ncol_local, HYPRE_MEMORY_HOST); for (i = 0; i < ncol_local; i++) { marker_diag[i] = -1; } /* marker for newoffd */ marker_newoffd = hypre_TAlloc(HYPRE_Int, num_cols_A_offd_new, HYPRE_MEMORY_HOST); for (i = 0; i < num_cols_A_offd_new; i++) { marker_newoffd[i] = -1; } /* outer most loop for blocks */ for (block_start = first_row_block; block_start < end_row_block; block_start += (HYPRE_BigInt)blockSize) { HYPRE_BigInt big_i; block_end = hypre_min(block_start + (HYPRE_BigInt)blockSize, nrow_global); s = (HYPRE_Int)(block_end - block_start); /* 1. fill the dense block diag matrix */ for (big_i = block_start; big_i < block_end; big_i++) { /* row index in this block */ HYPRE_Int block_i = (HYPRE_Int)(big_i - block_start); /* row index i: it can be local or external */ if (big_i >= first_row && big_i < end_row) { /* is a local row */ j = (HYPRE_Int)(big_i - first_row); for (k = A_diag_i[j]; k < A_diag_i[j+1]; k++) { HYPRE_BigInt cid = (HYPRE_BigInt)A_diag_j[k] + first_col; if (cid >= block_start && cid < block_end) { dense[block_i + (HYPRE_Int)(cid-block_start)*blockSize] = A_diag_a[k]; } } if (num_cols_A_offd) { for (k = A_offd_i[j]; k < A_offd_i[j+1]; k++) { HYPRE_BigInt cid = col_map_offd_A[A_offd_j[k]]; if (cid >= block_start && cid < block_end) { dense[block_i + (HYPRE_Int)(cid-block_start)*blockSize] = A_offd_a[k]; } } } } else { /* is an external row */ if (big_i < first_row) { j = (HYPRE_Int)(big_i - first_row_block); } else { j = (HYPRE_Int)(first_row - first_row_block + big_i - end_row); } for (k = A_ext_i[j]; k < A_ext_i[j+1]; k++) { HYPRE_BigInt cid = A_ext_j[k]; /* recover the global index */ cid = cid < (HYPRE_BigInt)ncol_local ? cid + first_col : col_map_offd_A_new[cid-ncol_local]; if (cid >= block_start && cid < block_end) { dense[block_i + (HYPRE_Int)(cid-block_start)*blockSize] = A_ext_a[k]; } } } } /* 2. invert the dense matrix */ hypre_dgetrf(&s, &s, dense, &blockSize, IPIV, &lapack_info); hypre_assert(lapack_info == 0); if (lapack_info == 0) { HYPRE_Int query = -1; HYPRE_Real lwork_opt; /* query the optimal size of work */ hypre_dgetri(&s, dense, &blockSize, IPIV, &lwork_opt, &query, &lapack_info); hypre_assert(lapack_info == 0); if (lwork_opt > dgetri_lwork) { dgetri_lwork = lwork_opt; dgetri_work = hypre_TReAlloc(dgetri_work, HYPRE_Complex, dgetri_lwork, HYPRE_MEMORY_HOST); } hypre_dgetri(&s, dense, &blockSize, IPIV, dgetri_work, &dgetri_lwork, &lapack_info); hypre_assert(lapack_info == 0); } /* filter out *zeros* */ HYPRE_Real Fnorm = 0.0; for (i = 0; i < s; i++) { for (j = 0; j < s; j++) { HYPRE_Complex t = dense[j+i*blockSize]; Fnorm += t * t; } } Fnorm = sqrt(Fnorm); for (i = 0; i < s; i++) { for (j = 0; j < s; j++) { if ( hypre_abs(dense[j+i*blockSize]) < eps * Fnorm ) { dense[j+i*blockSize] = 0.0; } } } /* 3. premultiplication: one-pass dynamic allocation */ for (big_i = block_start; big_i < block_end; big_i++) { /* starting points of this row in j */ HYPRE_Int diag_i_start = nnz_diag_new; HYPRE_Int offd_i_start = nnz_offd_new; /* compute a new row with global index 'i' and local index 'local_i' */ HYPRE_Int local_i = (HYPRE_Int)(big_i - first_row); /* row index in this block */ HYPRE_Int block_i = (HYPRE_Int)(big_i - block_start); if (big_i < first_row || big_i >= end_row) { continue; } /* if square^2: reserve the first space in diag part to the diag entry */ if (square2) { marker_diag[local_i] = nnz_diag_new; if (nnz_diag_new == nnz_diag_alloc) { nnz_diag_alloc = nnz_diag_alloc * 2 + 1; A_diag_j_new = hypre_TReAlloc(A_diag_j_new, HYPRE_Int, nnz_diag_alloc, HYPRE_MEMORY_HOST); A_diag_a_new = hypre_TReAlloc(A_diag_a_new, HYPRE_Complex, nnz_diag_alloc, HYPRE_MEMORY_HOST); } A_diag_j_new[nnz_diag_new] = local_i; A_diag_a_new[nnz_diag_new] = 0.0; nnz_diag_new ++; } /* combine s rows */ for (j = 0; j < s; j++) { /* row to combine: global row id */ HYPRE_BigInt global_rid = block_start + (HYPRE_BigInt)j; /* the multipiler */ HYPRE_Complex val = dense[block_i + j*blockSize]; if (val == 0.0) { continue; } if (global_rid >= first_row && global_rid < end_row) { /* this row is local */ HYPRE_Int rid = (HYPRE_Int)(global_rid - first_row); HYPRE_Int ii; for (ii = A_diag_i[rid]; ii < A_diag_i[rid+1]; ii++) { HYPRE_Int col = A_diag_j[ii]; HYPRE_Complex vv = A_diag_a[ii]; if (marker_diag[col] < diag_i_start) { /* this col has not been seen before, create new entry */ marker_diag[col] = nnz_diag_new; if (nnz_diag_new == nnz_diag_alloc) { nnz_diag_alloc = nnz_diag_alloc * 2 + 1; A_diag_j_new = hypre_TReAlloc(A_diag_j_new, HYPRE_Int, nnz_diag_alloc, HYPRE_MEMORY_HOST); A_diag_a_new = hypre_TReAlloc(A_diag_a_new, HYPRE_Complex, nnz_diag_alloc, HYPRE_MEMORY_HOST); } A_diag_j_new[nnz_diag_new] = col; A_diag_a_new[nnz_diag_new] = val * vv; nnz_diag_new ++; } else { /* existing entry, update */ HYPRE_Int p = marker_diag[col]; hypre_assert(A_diag_j_new[p] == col); A_diag_a_new[p] += val * vv; } } for (ii = A_offd_i[rid]; ii < A_offd_i[rid+1]; ii++) { HYPRE_Int col = A_offd_j[ii]; /* use the mapper to map to new offd */ HYPRE_Int col_new = offd2new ? offd2new[col] : col; HYPRE_Complex vv = A_offd_a[ii]; if (marker_newoffd[col_new] < offd_i_start) { /* this col has not been seen before, create new entry */ marker_newoffd[col_new] = nnz_offd_new; if (nnz_offd_new == nnz_offd_alloc) { nnz_offd_alloc = nnz_offd_alloc * 2 + 1; A_offd_j_new = hypre_TReAlloc(A_offd_j_new, HYPRE_Int, nnz_offd_alloc, HYPRE_MEMORY_HOST); A_offd_a_new = hypre_TReAlloc(A_offd_a_new, HYPRE_Complex, nnz_offd_alloc, HYPRE_MEMORY_HOST); } A_offd_j_new[nnz_offd_new] = col_new; A_offd_a_new[nnz_offd_new] = val * vv; nnz_offd_new ++; } else { /* existing entry, update */ HYPRE_Int p = marker_newoffd[col_new]; hypre_assert(A_offd_j_new[p] == col_new); A_offd_a_new[p] += val * vv; } } } else { /* this is an external row: go to A_ext */ HYPRE_Int rid, ii; if (global_rid < first_row) { rid = (HYPRE_Int)(global_rid - first_row_block); } else { rid = (HYPRE_Int)(first_row - first_row_block + global_rid - end_row); } for (ii = A_ext_i[rid]; ii < A_ext_i[rid+1]; ii++) { HYPRE_Int col = (HYPRE_Int)A_ext_j[ii]; HYPRE_Complex vv = A_ext_a[ii]; if (col < ncol_local) { /* in diag part */ if (marker_diag[col] < diag_i_start) { /* this col has not been seen before, create new entry */ marker_diag[col] = nnz_diag_new; if (nnz_diag_new == nnz_diag_alloc) { nnz_diag_alloc = nnz_diag_alloc * 2 + 1; A_diag_j_new = hypre_TReAlloc(A_diag_j_new, HYPRE_Int, nnz_diag_alloc, HYPRE_MEMORY_HOST); A_diag_a_new = hypre_TReAlloc(A_diag_a_new, HYPRE_Complex, nnz_diag_alloc, HYPRE_MEMORY_HOST); } A_diag_j_new[nnz_diag_new] = col; A_diag_a_new[nnz_diag_new] = val * vv; nnz_diag_new ++; } else { /* existing entry, update */ HYPRE_Int p = marker_diag[col]; hypre_assert(A_diag_j_new[p] == col); A_diag_a_new[p] += val * vv; } } else { /* in offd part */ col -= ncol_local; if (marker_newoffd[col] < offd_i_start) { /* this col has not been seen before, create new entry */ marker_newoffd[col] = nnz_offd_new; if (nnz_offd_new == nnz_offd_alloc) { nnz_offd_alloc = nnz_offd_alloc * 2 + 1; A_offd_j_new = hypre_TReAlloc(A_offd_j_new, HYPRE_Int, nnz_offd_alloc, HYPRE_MEMORY_HOST); A_offd_a_new = hypre_TReAlloc(A_offd_a_new, HYPRE_Complex, nnz_offd_alloc, HYPRE_MEMORY_HOST); } A_offd_j_new[nnz_offd_new] = col; A_offd_a_new[nnz_offd_new] = val * vv; nnz_offd_new ++; } else { /* existing entry, update */ HYPRE_Int p = marker_newoffd[col]; hypre_assert(A_offd_j_new[p] == col); A_offd_a_new[p] += val * vv; } } } } } /* done for row local_i */ A_diag_i_new[local_i + 1] = nnz_diag_new; A_offd_i_new[local_i + 1] = nnz_offd_new; } /* for i, each row */ dense += blockSize * blockSize; } /* for each block */ /* done with all rows */ /* resize properly */ A_diag_j_new = hypre_TReAlloc(A_diag_j_new, HYPRE_Int, nnz_diag_new, HYPRE_MEMORY_HOST); A_diag_a_new = hypre_TReAlloc(A_diag_a_new, HYPRE_Complex, nnz_diag_new, HYPRE_MEMORY_HOST); A_offd_j_new = hypre_TReAlloc(A_offd_j_new, HYPRE_Int, nnz_offd_new, HYPRE_MEMORY_HOST); A_offd_a_new = hypre_TReAlloc(A_offd_a_new, HYPRE_Complex, nnz_offd_new, HYPRE_MEMORY_HOST); /* readjust col_map_offd_new */ for (i = 0; i < num_cols_A_offd_new; i++) { marker_newoffd[i] = -1; } for (i = 0; i < nnz_offd_new; i++) { j = A_offd_j_new[i]; if (marker_newoffd[j] == -1) { marker_newoffd[j] = 1; } } for (i = 0, j = 0; i < num_cols_A_offd_new; i++) { if (marker_newoffd[i] == 1) { col_map_offd_A_new[j] = col_map_offd_A_new[i]; marker_newoffd[i] = j++; } } num_cols_A_offd_new = j; for (i = 0; i < nnz_offd_new; i++) { j = marker_newoffd[A_offd_j_new[i]]; hypre_assert(j >= 0 && j < num_cols_A_offd_new); A_offd_j_new[i] = j; } j = 2; row_starts_new = hypre_CTAlloc(HYPRE_BigInt, j, HYPRE_MEMORY_HOST); col_starts_new = hypre_CTAlloc(HYPRE_BigInt, j, HYPRE_MEMORY_HOST); memcpy(row_starts_new, hypre_ParCSRMatrixRowStarts(A), j*sizeof(HYPRE_BigInt)); memcpy(col_starts_new, hypre_ParCSRMatrixColStarts(A), j*sizeof(HYPRE_BigInt)); /* Now, we should have everything of Parcsr matrix As */ Anew = hypre_ParCSRMatrixCreate(comm, nrow_global, ncol_global, row_starts_new, col_starts_new, num_cols_A_offd_new, nnz_diag_new, nnz_offd_new); Anew_diag = hypre_ParCSRMatrixDiag(Anew); hypre_CSRMatrixData(Anew_diag) = A_diag_a_new; hypre_CSRMatrixI(Anew_diag) = A_diag_i_new; hypre_CSRMatrixJ(Anew_diag) = A_diag_j_new; Anew_offd = hypre_ParCSRMatrixOffd(Anew); hypre_CSRMatrixData(Anew_offd) = A_offd_a_new; hypre_CSRMatrixI(Anew_offd) = A_offd_i_new; hypre_CSRMatrixJ(Anew_offd) = A_offd_j_new; hypre_ParCSRMatrixColMapOffd(Anew) = col_map_offd_A_new; hypre_ParCSRMatrixSetNumNonzeros(Anew); hypre_ParCSRMatrixDNumNonzeros(Anew) = (HYPRE_Real) hypre_ParCSRMatrixNumNonzeros(Anew); //printf("nnz_diag %d --> %d, nnz_offd %d --> %d\n", nnz_diag, nnz_diag_new, nnz_offd, nnz_offd_new); /* create CommPkg of Anew */ hypre_MatvecCommPkgCreate(Anew); *As = Anew; /* if (bdiaginv) { *bdiaginv = dense_all; } else { hypre_TFree(dense_all, HYPRE_MEMORY_HOST); } */ /* save diagonal blocks in A */ A->bdiag_size = blockSize; A->bdiaginv = dense_all; /* free workspace */ hypre_TFree(IPIV, HYPRE_MEMORY_HOST); hypre_TFree(dgetri_work, HYPRE_MEMORY_HOST); hypre_TFree(marker_diag, HYPRE_MEMORY_HOST); hypre_TFree(marker_newoffd, HYPRE_MEMORY_HOST); hypre_TFree(offd2new, HYPRE_MEMORY_HOST); hypre_CSRMatrixDestroy(A_ext); return hypre_error_flag; } HYPRE_Int hypre_ParcsrGetExternalRowsInit( hypre_ParCSRMatrix *A, HYPRE_Int indices_len, HYPRE_BigInt *indices, hypre_ParCSRCommPkg *comm_pkg, HYPRE_Int want_data, void **request_ptr) { HYPRE_Int i, j, k; HYPRE_Int num_sends, num_rows_send, num_nnz_send, *send_i, num_recvs, num_rows_recv, num_nnz_recv, *recv_i, *send_jstarts, *recv_jstarts, *send_i_offset; HYPRE_BigInt *send_j, *recv_j; HYPRE_Complex *send_a = NULL, *recv_a = NULL; hypre_ParCSRCommPkg *comm_pkg_j; hypre_ParCSRCommHandle *comm_handle, *comm_handle_j, *comm_handle_a; /* HYPRE_Int global_num_rows = hypre_ParCSRMatrixGlobalNumRows(A); */ /* diag part of A */ hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_a = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); /* HYPRE_Int local_num_rows = hypre_CSRMatrixNumRows(A_diag); */ /* off-diag part of A */ hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_a = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); /* HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(A); */ /* HYPRE_BigInt first_row = hypre_ParCSRMatrixFirstRowIndex(A); */ HYPRE_BigInt first_col = hypre_ParCSRMatrixFirstColDiag(A); HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A); MPI_Comm comm = hypre_ParCSRMatrixComm(A); HYPRE_Int num_procs; HYPRE_Int my_id; void **vrequest; hypre_CSRMatrix *A_ext; hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); /* number of sends (#procs) */ num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); /* number of rows to send */ num_rows_send = hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends); /* number of recvs (#procs) */ num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg); /* number of rows to recv */ num_rows_recv = hypre_ParCSRCommPkgRecvVecStart(comm_pkg, num_recvs); /* must be true if indices contains proper offd indices */ hypre_assert(indices_len == num_rows_recv); /* send_i/recv_i: * the arrays to send and recv: we first send and recv the row lengths */ send_i = hypre_TAlloc(HYPRE_Int, num_rows_send, HYPRE_MEMORY_HOST); recv_i = hypre_CTAlloc(HYPRE_Int, num_rows_recv + 1, HYPRE_MEMORY_HOST); /* fill the send array with row lengths */ for (i = 0, num_nnz_send = 0; i < num_rows_send; i++) { /* j: row index to send */ j = hypre_ParCSRCommPkgSendMapElmt(comm_pkg, i); send_i[i] = A_diag_i[j+1] - A_diag_i[j] + A_offd_i[j+1] - A_offd_i[j]; num_nnz_send += send_i[i]; } /* send this array out: note the shift in recv_i by one (async) */ comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, send_i, recv_i+1); /* prepare data to send out. overlap with the above commmunication */ send_j = hypre_TAlloc(HYPRE_BigInt, num_nnz_send, HYPRE_MEMORY_HOST); if (want_data) { send_a = hypre_TAlloc(HYPRE_Complex, num_nnz_send, HYPRE_MEMORY_HOST); } send_i_offset = hypre_TAlloc(HYPRE_Int, num_rows_send + 1, HYPRE_MEMORY_HOST); send_i_offset[0] = 0; hypre_TMemcpy(send_i_offset + 1, send_i, HYPRE_Int, num_rows_send, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST); /* prefix sum. TODO: OMP parallelization */ for (i = 1; i <= num_rows_send; i++) { send_i_offset[i] += send_i_offset[i-1]; } hypre_assert(send_i_offset[num_rows_send] == num_nnz_send); /* pointers to each proc in send_j */ send_jstarts = hypre_TAlloc(HYPRE_Int, num_sends + 1, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for HYPRE_SMP_SCHEDULE #endif for (i = 0; i <= num_sends; i++) { send_jstarts[i] = send_i_offset[hypre_ParCSRCommPkgSendMapStart(comm_pkg, i)]; } hypre_assert(send_jstarts[num_sends] == num_nnz_send); /* fill the CSR matrix: j and a */ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for HYPRE_SMP_SCHEDULE private(i,j,k) #endif for (i = 0; i < num_rows_send; i++) { HYPRE_Int i1 = send_i_offset[i]; j = hypre_ParCSRCommPkgSendMapElmt(comm_pkg, i); /* open row j and fill ja and a to send */ for (k = A_diag_i[j]; k < A_diag_i[j+1]; k++) { send_j[i1] = first_col + A_diag_j[k]; if (want_data) { send_a[i1] = A_diag_a[k]; } i1++; } if (num_procs > 1) { for (k = A_offd_i[j]; k < A_offd_i[j+1]; k++) { send_j[i1] = col_map_offd_A[A_offd_j[k]]; if (want_data) { send_a[i1] = A_offd_a[k]; } i1++; } } hypre_assert(send_i_offset[i+1] == i1); } /* finish the above communication: send_i/recv_i */ hypre_ParCSRCommHandleDestroy(comm_handle); /* adjust recv_i to ptrs */ for (i = 1; i <= num_rows_recv; i++) { recv_i[i] += recv_i[i-1]; } num_nnz_recv = recv_i[num_rows_recv]; recv_j = hypre_CTAlloc(HYPRE_BigInt, num_nnz_recv, HYPRE_MEMORY_HOST); if (want_data) { recv_a = hypre_CTAlloc(HYPRE_Complex, num_nnz_recv, HYPRE_MEMORY_HOST); } recv_jstarts = hypre_CTAlloc(HYPRE_Int, num_recvs + 1, HYPRE_MEMORY_HOST); for (i = 1; i <= num_recvs; i++) { j = hypre_ParCSRCommPkgRecvVecStart(comm_pkg, i); recv_jstarts[i] = recv_i[j]; } /* ready to send and recv: create a communication package for data */ comm_pkg_j = hypre_CTAlloc(hypre_ParCSRCommPkg, 1, HYPRE_MEMORY_HOST); hypre_ParCSRCommPkgComm (comm_pkg_j) = comm; hypre_ParCSRCommPkgNumSends (comm_pkg_j) = num_sends; hypre_ParCSRCommPkgSendProcs (comm_pkg_j) = hypre_ParCSRCommPkgSendProcs(comm_pkg); hypre_ParCSRCommPkgSendMapStarts(comm_pkg_j) = send_jstarts; hypre_ParCSRCommPkgNumRecvs (comm_pkg_j) = num_recvs; hypre_ParCSRCommPkgRecvProcs (comm_pkg_j) = hypre_ParCSRCommPkgRecvProcs(comm_pkg); hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_j) = recv_jstarts; /* init communication */ /* ja */ comm_handle_j = hypre_ParCSRCommHandleCreate(21, comm_pkg_j, send_j, recv_j); if (want_data) { /* a */ comm_handle_a = hypre_ParCSRCommHandleCreate(1, comm_pkg_j, send_a, recv_a); } else { comm_handle_a = NULL; } /* create A_ext */ A_ext = hypre_CSRMatrixCreate(num_rows_recv, hypre_ParCSRMatrixGlobalNumCols(A), num_nnz_recv); hypre_CSRMatrixMemoryLocation(A_ext) = HYPRE_MEMORY_HOST; hypre_CSRMatrixI (A_ext) = recv_i; hypre_CSRMatrixBigJ(A_ext) = recv_j; hypre_CSRMatrixData(A_ext) = recv_a; /* output */ vrequest = hypre_TAlloc(void *, 4, HYPRE_MEMORY_HOST); vrequest[0] = (void *) comm_handle_j; vrequest[1] = (void *) comm_handle_a; vrequest[2] = (void *) A_ext; vrequest[3] = (void *) comm_pkg_j; *request_ptr = (void *) vrequest; /* free */ hypre_TFree(send_i, HYPRE_MEMORY_HOST); hypre_TFree(send_i_offset, HYPRE_MEMORY_HOST); return hypre_error_flag; } hypre_CSRMatrix* hypre_ParcsrGetExternalRowsWait(void *vrequest) { void **request = (void **) vrequest; hypre_ParCSRCommHandle *comm_handle_j = (hypre_ParCSRCommHandle *) request[0]; hypre_ParCSRCommHandle *comm_handle_a = (hypre_ParCSRCommHandle *) request[1]; hypre_CSRMatrix *A_ext = (hypre_CSRMatrix *) request[2]; hypre_ParCSRCommPkg *comm_pkg_j = (hypre_ParCSRCommPkg *) request[3]; HYPRE_BigInt *send_j = (HYPRE_BigInt *) hypre_ParCSRCommHandleSendData(comm_handle_j); if (comm_handle_a) { HYPRE_Complex *send_a = (HYPRE_Complex *) hypre_ParCSRCommHandleSendData(comm_handle_a); hypre_ParCSRCommHandleDestroy(comm_handle_a); hypre_TFree(send_a, HYPRE_MEMORY_HOST); } hypre_ParCSRCommHandleDestroy(comm_handle_j); hypre_TFree(send_j, HYPRE_MEMORY_HOST); hypre_TFree(hypre_ParCSRCommPkgSendMapStarts(comm_pkg_j), HYPRE_MEMORY_HOST); hypre_TFree(hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_j), HYPRE_MEMORY_HOST); hypre_TFree(comm_pkg_j, HYPRE_MEMORY_HOST); hypre_TFree(request, HYPRE_MEMORY_HOST); return A_ext; } /* C = alpha * A + beta * B * A and B are assumed to have the same row and column partitionings */ HYPRE_Int hypre_ParcsrAdd( HYPRE_Complex alpha, hypre_ParCSRMatrix *A, HYPRE_Complex beta, hypre_ParCSRMatrix *B, hypre_ParCSRMatrix **Cout ) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); HYPRE_Int num_procs, my_id; hypre_MPI_Comm_rank(comm, &my_id); hypre_MPI_Comm_size(comm, &num_procs); HYPRE_Int i, j; /* diag part of A */ hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Complex *A_diag_a = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); /* off-diag part of A */ hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Complex *A_offd_a = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A); HYPRE_Int *A2C_offd = hypre_TAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); HYPRE_BigInt nrow_global = hypre_ParCSRMatrixGlobalNumRows(A); HYPRE_BigInt ncol_global = hypre_ParCSRMatrixGlobalNumCols(A); HYPRE_Int nrow_local = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int ncol_local = hypre_CSRMatrixNumCols(A_diag); HYPRE_Int nnz_diag_A = A_diag_i[nrow_local]; HYPRE_Int nnz_offd_A = A_offd_i[nrow_local]; /* diag part of B */ hypre_CSRMatrix *B_diag = hypre_ParCSRMatrixDiag(B); HYPRE_Complex *B_diag_a = hypre_CSRMatrixData(B_diag); HYPRE_Int *B_diag_i = hypre_CSRMatrixI(B_diag); HYPRE_Int *B_diag_j = hypre_CSRMatrixJ(B_diag); /* off-diag part of B */ hypre_CSRMatrix *B_offd = hypre_ParCSRMatrixOffd(B); HYPRE_Complex *B_offd_a = hypre_CSRMatrixData(B_offd); HYPRE_Int *B_offd_i = hypre_CSRMatrixI(B_offd); HYPRE_Int *B_offd_j = hypre_CSRMatrixJ(B_offd); HYPRE_Int num_cols_B_offd = hypre_CSRMatrixNumCols(B_offd); HYPRE_BigInt *col_map_offd_B = hypre_ParCSRMatrixColMapOffd(B); HYPRE_Int *B2C_offd = hypre_TAlloc(HYPRE_Int, num_cols_B_offd, HYPRE_MEMORY_HOST); hypre_assert(nrow_global == hypre_ParCSRMatrixGlobalNumRows(B)); hypre_assert(ncol_global == hypre_ParCSRMatrixGlobalNumCols(B)); hypre_assert(nrow_local == hypre_CSRMatrixNumRows(B_diag)); hypre_assert(ncol_local == hypre_CSRMatrixNumCols(B_diag)); HYPRE_Int nnz_diag_B = B_diag_i[nrow_local]; HYPRE_Int nnz_offd_B = B_offd_i[nrow_local]; HYPRE_MemoryLocation memory_location_A = hypre_ParCSRMatrixMemoryLocation(A); HYPRE_MemoryLocation memory_location_B = hypre_ParCSRMatrixMemoryLocation(B); /* RL: TODO cannot guarantee, maybe should never assert hypre_assert(memory_location_A == memory_location_B); */ /* RL: in the case of A=H, B=D, or A=D, B=H, let C = D, * not sure if this is the right thing to do. * Also, need something like this in other places * TODO */ HYPRE_MemoryLocation memory_location_C = hypre_max(memory_location_A, memory_location_B); /* C */ hypre_ParCSRMatrix *C; HYPRE_BigInt *row_starts_C, *col_starts_C; hypre_CSRMatrix *C_diag; hypre_CSRMatrix *C_offd; HYPRE_Int num_cols_C_offd = num_cols_A_offd + num_cols_B_offd; HYPRE_BigInt *col_map_offd_C = hypre_TAlloc(HYPRE_BigInt, num_cols_C_offd, HYPRE_MEMORY_HOST); HYPRE_Int nnz_diag_C_alloc = nnz_diag_A + nnz_diag_B; HYPRE_Int nnz_offd_C_alloc = nnz_offd_A + nnz_offd_B; HYPRE_Int nnz_diag_C = 0, nnz_offd_C = 0; HYPRE_Int *C_diag_i = hypre_CTAlloc(HYPRE_Int, nrow_local + 1, memory_location_C); HYPRE_Int *C_diag_j = hypre_CTAlloc(HYPRE_Int, nnz_diag_C_alloc, memory_location_C); HYPRE_Complex *C_diag_a = hypre_CTAlloc(HYPRE_Complex, nnz_diag_C_alloc, memory_location_C); HYPRE_Int *C_offd_i = hypre_CTAlloc(HYPRE_Int, nrow_local + 1, memory_location_C); HYPRE_Int *C_offd_j = hypre_CTAlloc(HYPRE_Int, nnz_offd_C_alloc, memory_location_C); HYPRE_Complex *C_offd_a = hypre_CTAlloc(HYPRE_Complex, nnz_offd_C_alloc, memory_location_C); hypre_union2( num_cols_A_offd, col_map_offd_A, num_cols_B_offd, col_map_offd_B, &num_cols_C_offd, col_map_offd_C, A2C_offd, B2C_offd ); HYPRE_Int *marker_diag = hypre_TAlloc(HYPRE_Int, ncol_local, HYPRE_MEMORY_HOST); HYPRE_Int *marker_offd = hypre_TAlloc(HYPRE_Int, num_cols_C_offd, HYPRE_MEMORY_HOST); for (i = 0; i < ncol_local; i++) { marker_diag[i] = -1; } for (i = 0; i < num_cols_C_offd; i++) { marker_offd[i] = -1; } /* main loop for each row i */ for (i = 0; i < nrow_local; i++) { HYPRE_Int diag_i_start = nnz_diag_C; HYPRE_Int offd_i_start = nnz_offd_C; for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { HYPRE_Int col = A_diag_j[j]; HYPRE_Complex val = A_diag_a[j]; if (marker_diag[col] < diag_i_start) { /* this col has not been seen before, create new entry */ marker_diag[col] = nnz_diag_C; C_diag_j[nnz_diag_C] = col; C_diag_a[nnz_diag_C] = alpha * val; nnz_diag_C ++; } else { /* this should not happen */ hypre_printf("hypre warning: invalid ParCSR matrix %s %s %d\n", __FILE__, __func__, __LINE__); } } for (j = B_diag_i[i]; j < B_diag_i[i+1]; j++) { HYPRE_Int col = B_diag_j[j]; HYPRE_Complex val = B_diag_a[j]; if (marker_diag[col] < diag_i_start /*&& hypre_abs(val) > 0.0*/) { /* this col has not been seen before, create new entry */ marker_diag[col] = nnz_diag_C; C_diag_j[nnz_diag_C] = col; C_diag_a[nnz_diag_C] = beta * val; nnz_diag_C ++; } else { /* existing entry, update */ HYPRE_Int p = marker_diag[col]; hypre_assert(C_diag_j[p] == col); C_diag_a[p] += beta * val; } } C_diag_i[i+1] = nnz_diag_C; if (num_procs <= 1) { continue; } for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++) { HYPRE_Int colA = A_offd_j[j]; HYPRE_Int colC = A2C_offd[colA]; HYPRE_Complex val = A_offd_a[j]; if (marker_offd[colC] < offd_i_start) { /* this col has not been seen before, create new entry */ marker_offd[colC] = nnz_offd_C; C_offd_j[nnz_offd_C] = colC; C_offd_a[nnz_offd_C] = alpha * val; nnz_offd_C ++; } else { /* this should not happen */ hypre_printf("hypre warning: invalid ParCSR matrix %s %s %d\n", __FILE__, __func__, __LINE__); } } for (j = B_offd_i[i]; j < B_offd_i[i+1]; j++) { HYPRE_Int colB = B_offd_j[j]; HYPRE_Int colC = B2C_offd[colB]; HYPRE_Complex val = B_offd_a[j]; if (marker_offd[colC] < offd_i_start /*&& hypre_abs(val) > 0.0*/) { /* this col has not been seen before, create new entry */ marker_offd[colC] = nnz_offd_C; C_offd_j[nnz_offd_C] = colC; C_offd_a[nnz_offd_C] = beta * val; nnz_offd_C ++; } else { /* existing entry, update */ HYPRE_Int p = marker_offd[colC]; hypre_assert(C_offd_j[p] == colC); C_offd_a[p] += beta * val; } } C_offd_i[i+1] = nnz_offd_C; } j = 2; row_starts_C = hypre_TAlloc(HYPRE_BigInt, j, HYPRE_MEMORY_HOST); col_starts_C = hypre_TAlloc(HYPRE_BigInt, j, HYPRE_MEMORY_HOST); memcpy(row_starts_C, hypre_ParCSRMatrixRowStarts(A), j*sizeof(HYPRE_BigInt)); memcpy(col_starts_C, hypre_ParCSRMatrixColStarts(A), j*sizeof(HYPRE_BigInt)); /* Now, we should have everything of Parcsr matrix C */ C = hypre_ParCSRMatrixCreate(comm, nrow_global, ncol_global, row_starts_C, col_starts_C, num_cols_C_offd, nnz_diag_C, nnz_offd_C); C_diag = hypre_ParCSRMatrixDiag(C); hypre_CSRMatrixData(C_diag) = C_diag_a; hypre_CSRMatrixI(C_diag) = C_diag_i; hypre_CSRMatrixJ(C_diag) = C_diag_j; hypre_CSRMatrixMemoryLocation(C_diag) = memory_location_C; C_offd = hypre_ParCSRMatrixOffd(C); hypre_CSRMatrixData(C_offd) = C_offd_a; hypre_CSRMatrixI(C_offd) = C_offd_i; hypre_CSRMatrixJ(C_offd) = C_offd_j; hypre_CSRMatrixMemoryLocation(C_offd) = memory_location_C; hypre_ParCSRMatrixColMapOffd(C) = col_map_offd_C; hypre_ParCSRMatrixSetNumNonzeros(C); hypre_ParCSRMatrixDNumNonzeros(C) = (HYPRE_Real) hypre_ParCSRMatrixNumNonzeros(C); /* create CommPkg of C */ hypre_MatvecCommPkgCreate(C); *Cout = C; /* done */ hypre_TFree(A2C_offd, HYPRE_MEMORY_HOST); hypre_TFree(B2C_offd, HYPRE_MEMORY_HOST); hypre_TFree(marker_diag, HYPRE_MEMORY_HOST); hypre_TFree(marker_offd, HYPRE_MEMORY_HOST); return hypre_error_flag; } HYPRE_Real hypre_ParCSRMatrixFnorm( hypre_ParCSRMatrix *A ) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); HYPRE_Real f_diag, f_offd, local_result, result; f_diag = hypre_CSRMatrixFnorm(hypre_ParCSRMatrixDiag(A)); f_offd = hypre_CSRMatrixFnorm(hypre_ParCSRMatrixOffd(A)); local_result = f_diag * f_diag + f_offd * f_offd; hypre_MPI_Allreduce(&local_result, &result, 1, HYPRE_MPI_REAL, hypre_MPI_SUM, comm); return sqrt(result); } HYPRE_Int hypre_ExchangeExternalRowsInit( hypre_CSRMatrix *B_ext, hypre_ParCSRCommPkg *comm_pkg_A, void **request_ptr) { MPI_Comm comm = hypre_ParCSRCommPkgComm(comm_pkg_A); HYPRE_Int num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg_A); HYPRE_Int *recv_procs = hypre_ParCSRCommPkgRecvProcs(comm_pkg_A); HYPRE_Int *recv_vec_starts = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_A); HYPRE_Int num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg_A); HYPRE_Int *send_procs = hypre_ParCSRCommPkgSendProcs(comm_pkg_A); HYPRE_Int *send_map_starts = hypre_ParCSRCommPkgSendMapStarts(comm_pkg_A); HYPRE_Int num_elmts_send = send_map_starts[num_sends]; HYPRE_Int num_elmts_recv = recv_vec_starts[num_recvs]; HYPRE_Int *B_ext_i = B_ext ? hypre_CSRMatrixI(B_ext) : NULL; HYPRE_BigInt *B_ext_j = B_ext ? hypre_CSRMatrixBigJ(B_ext) : NULL; HYPRE_Complex *B_ext_data = B_ext ? hypre_CSRMatrixData(B_ext) : NULL; HYPRE_Int B_ext_ncols = B_ext ? hypre_CSRMatrixNumCols(B_ext) : 0; HYPRE_Int B_ext_nrows = B_ext ? hypre_CSRMatrixNumRows(B_ext) : 0; HYPRE_Int *B_ext_rownnz = hypre_CTAlloc(HYPRE_Int, B_ext_nrows, HYPRE_MEMORY_HOST); hypre_assert(num_elmts_recv == B_ext_nrows); /* output matrix */ hypre_CSRMatrix *B_int; HYPRE_Int B_int_nrows = num_elmts_send; HYPRE_Int B_int_ncols = B_ext_ncols; HYPRE_Int *B_int_i = hypre_TAlloc(HYPRE_Int, B_int_nrows + 1, HYPRE_MEMORY_HOST); HYPRE_BigInt *B_int_j = NULL; HYPRE_Complex *B_int_data = NULL; HYPRE_Int B_int_nnz; hypre_ParCSRCommHandle *comm_handle, *comm_handle_j, *comm_handle_a; hypre_ParCSRCommPkg *comm_pkg_j; HYPRE_Int *jdata_recv_vec_starts; HYPRE_Int *jdata_send_map_starts; HYPRE_Int i; HYPRE_Int num_procs; void **vrequest; hypre_MPI_Comm_size(comm, &num_procs); jdata_send_map_starts = hypre_TAlloc(HYPRE_Int, num_sends+1, HYPRE_MEMORY_HOST); /*-------------------------------------------------------------------------- * B_ext_rownnz contains the number of elements of row j * (to be determined through send_map_elmnts on the receiving end) *--------------------------------------------------------------------------*/ for (i = 0; i < B_ext_nrows; i++) { B_ext_rownnz[i] = B_ext_i[i+1] - B_ext_i[i]; } /*-------------------------------------------------------------------------- * initialize communication: send/recv the row nnz * (note the use of comm_pkg_A, mode 12, as in transpose matvec *--------------------------------------------------------------------------*/ comm_handle = hypre_ParCSRCommHandleCreate(12, comm_pkg_A, B_ext_rownnz, B_int_i + 1); jdata_recv_vec_starts = hypre_TAlloc(HYPRE_Int, num_recvs + 1, HYPRE_MEMORY_HOST); jdata_recv_vec_starts[0] = 0; for (i = 1; i <= num_recvs; i++) { jdata_recv_vec_starts[i] = B_ext_i[recv_vec_starts[i]]; } comm_pkg_j = hypre_CTAlloc(hypre_ParCSRCommPkg, 1, HYPRE_MEMORY_HOST); hypre_ParCSRCommPkgComm(comm_pkg_j) = comm; hypre_ParCSRCommPkgNumSends(comm_pkg_j) = num_recvs; hypre_ParCSRCommPkgNumRecvs(comm_pkg_j) = num_sends; hypre_ParCSRCommPkgSendProcs(comm_pkg_j) = recv_procs; hypre_ParCSRCommPkgRecvProcs(comm_pkg_j) = send_procs; hypre_ParCSRCommHandleDestroy(comm_handle); /*-------------------------------------------------------------------------- * compute B_int: row nnz to row ptrs *--------------------------------------------------------------------------*/ B_int_i[0] = 0; for (i = 1; i <= B_int_nrows; i++) { B_int_i[i] += B_int_i[i-1]; } B_int_nnz = B_int_i[B_int_nrows]; B_int_j = hypre_TAlloc(HYPRE_BigInt, B_int_nnz, HYPRE_MEMORY_HOST); B_int_data = hypre_TAlloc(HYPRE_Complex, B_int_nnz, HYPRE_MEMORY_HOST); for (i = 0; i <= num_sends; i++) { jdata_send_map_starts[i] = B_int_i[send_map_starts[i]]; } /* note the order of send/recv is reversed */ hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_j) = jdata_send_map_starts; hypre_ParCSRCommPkgSendMapStarts(comm_pkg_j) = jdata_recv_vec_starts; /* send/recv CSR rows */ comm_handle_a = hypre_ParCSRCommHandleCreate( 1, comm_pkg_j, B_ext_data, B_int_data); comm_handle_j = hypre_ParCSRCommHandleCreate(21, comm_pkg_j, B_ext_j, B_int_j); /* create CSR */ B_int = hypre_CSRMatrixCreate(B_int_nrows, B_int_ncols, B_int_nnz); hypre_CSRMatrixMemoryLocation(B_int) = HYPRE_MEMORY_HOST; hypre_CSRMatrixI(B_int) = B_int_i; hypre_CSRMatrixBigJ(B_int) = B_int_j; hypre_CSRMatrixData(B_int) = B_int_data; /* output */ vrequest = hypre_TAlloc(void *, 4, HYPRE_MEMORY_HOST); vrequest[0] = (void *) comm_handle_j; vrequest[1] = (void *) comm_handle_a; vrequest[2] = (void *) B_int; vrequest[3] = (void *) comm_pkg_j; *request_ptr = (void *) vrequest; hypre_TFree(B_ext_rownnz, HYPRE_MEMORY_HOST); return hypre_error_flag; } hypre_CSRMatrix* hypre_ExchangeExternalRowsWait(void *vrequest) { void **request = (void **) vrequest; hypre_ParCSRCommHandle *comm_handle_j = (hypre_ParCSRCommHandle *) request[0]; hypre_ParCSRCommHandle *comm_handle_a = (hypre_ParCSRCommHandle *) request[1]; hypre_CSRMatrix *B_int = (hypre_CSRMatrix *) request[2]; hypre_ParCSRCommPkg *comm_pkg_j = (hypre_ParCSRCommPkg *) request[3]; /* communication done */ hypre_ParCSRCommHandleDestroy(comm_handle_a); hypre_ParCSRCommHandleDestroy(comm_handle_j); hypre_TFree(hypre_ParCSRCommPkgSendMapStarts(comm_pkg_j), HYPRE_MEMORY_HOST); hypre_TFree(hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_j), HYPRE_MEMORY_HOST); hypre_TFree(comm_pkg_j, HYPRE_MEMORY_HOST); hypre_TFree(request, HYPRE_MEMORY_HOST); return B_int; } /* ----------------------------------------------------------------------------- * extract submatrix A_{FF}, A_{FC}, A_{CF} or A_{CC} * char job[2] = "FF", "FC", "CF" or "CC" * ----------------------------------------------------------------------------- */ HYPRE_Int hypre_ParCSRMatrixExtractSubmatrixFC( hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, HYPRE_BigInt *cpts_starts_in, const char *job, hypre_ParCSRMatrix **B_ptr, HYPRE_Real strength_thresh) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; /* diag part of A */ hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Complex *A_diag_a = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); /* off-diag part of A */ hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Complex *A_offd_a = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); //HYPRE_Int *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A); hypre_ParCSRMatrix *B; hypre_CSRMatrix *B_diag, *B_offd; HYPRE_Real *B_maxel_row; HYPRE_Int *B_diag_i, *B_diag_j, *B_offd_i, *B_offd_j; HYPRE_Complex *B_diag_a, *B_offd_a; HYPRE_Int num_cols_B_offd; HYPRE_BigInt *col_map_offd_B; HYPRE_Int i, j, k, k1, k2; HYPRE_BigInt B_nrow_global, B_ncol_global; HYPRE_Int A_nlocal, B_nrow_local, B_ncol_local, B_nnz_diag, B_nnz_offd; HYPRE_BigInt total_global_fpts, total_global_cpts, *fpts_starts, *cpts_starts; HYPRE_Int nf_local, nc_local; HYPRE_Int row_set, col_set; HYPRE_BigInt *B_row_starts, *B_col_starts, B_first_col; HYPRE_Int my_id, num_procs, *sub_idx_diag, *sub_idx_offd; HYPRE_Int num_sends, *send_buf_data; /* MPI size and rank*/ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); row_set = job[0] == 'F' ? -1 : 1; col_set = job[1] == 'F' ? -1 : 1; A_nlocal = hypre_CSRMatrixNumRows(A_diag); /*-------------- global number of C points and local C points * assuming cpts_starts is given */ if (row_set == 1 || col_set == 1) { /* copy cpts_starts first */ HYPRE_Int len; len = 2; cpts_starts = hypre_TAlloc(HYPRE_BigInt, len, HYPRE_MEMORY_HOST); memcpy(cpts_starts, cpts_starts_in, len*sizeof(HYPRE_BigInt)); if (my_id == (num_procs -1)) { total_global_cpts = cpts_starts[1]; } hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_INT, num_procs-1, comm); nc_local = (HYPRE_Int)(cpts_starts[1] - cpts_starts[0]); } /*-------------- global number of F points, local F points, and F starts */ if (row_set == -1 || col_set == -1) { nf_local = 0; for (i = 0; i < A_nlocal; i++) { if (CF_marker[i] < 0) { nf_local++; } } fpts_starts = hypre_TAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST); hypre_MPI_Scan(&nf_local, fpts_starts+1, 1, HYPRE_MPI_BIG_INT, hypre_MPI_SUM, comm); fpts_starts[0] = fpts_starts[1] - nf_local; if (my_id == num_procs - 1) { total_global_fpts = fpts_starts[1]; } hypre_MPI_Bcast(&total_global_fpts, 1, HYPRE_MPI_INT, num_procs-1, comm); } if (row_set == -1 && col_set == -1) { /* FF */ B_nrow_local = nf_local; B_ncol_local = nf_local; B_nrow_global = total_global_fpts; B_ncol_global = total_global_fpts; B_row_starts = B_col_starts = fpts_starts; } else if (row_set == -1 && col_set == 1) { /* FC */ B_nrow_local = nf_local; B_ncol_local = nc_local; B_nrow_global = total_global_fpts; B_ncol_global = total_global_cpts; B_row_starts = fpts_starts; B_col_starts = cpts_starts; } else if (row_set == 1 && col_set == -1) { /* CF */ B_nrow_local = nc_local; B_ncol_local = nf_local; B_nrow_global = total_global_cpts; B_ncol_global = total_global_fpts; B_row_starts = cpts_starts; B_col_starts = fpts_starts; } else { /* CC */ B_nrow_local = nc_local; B_ncol_local = nc_local; B_nrow_global = total_global_cpts; B_ncol_global = total_global_cpts; B_row_starts = B_col_starts = cpts_starts; } /* global index of my first col */ B_first_col = B_col_starts[0]; /* sub_idx_diag: [local] mapping from F+C to F/C, if not selected, be -1 */ sub_idx_diag = hypre_TAlloc(HYPRE_Int, A_nlocal, HYPRE_MEMORY_HOST); for (i = 0, k = 0; i < A_nlocal; i++) { HYPRE_Int CF_i = CF_marker[i] > 0 ? 1 : -1; if (CF_i == col_set) { sub_idx_diag[i] = k++; } else { sub_idx_diag[i] = -1; } } hypre_assert(k == B_ncol_local); num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); send_buf_data = hypre_TAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); k = 0; for (i = 0; i < num_sends; i++) { /* start pos of elements sent to send_proc[i] */ HYPRE_Int si = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); HYPRE_Int ei = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); /* loop through all elems to send_proc[i] */ for (j = si; j < ei; j++) { /* j1: local idx */ HYPRE_Int j1 = sub_idx_diag[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)]; if (j1 != -1) { /* adjust j1 to B global idx */ j1 += B_first_col; } send_buf_data[k++] = j1; } } hypre_assert(k == hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends)); /* recv buffer */ sub_idx_offd = hypre_TAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); /* create a handle to start communication. 11: for integer */ comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, send_buf_data, sub_idx_offd); /* destroy the handle to finish communication */ hypre_ParCSRCommHandleDestroy(comm_handle); for (i = 0, num_cols_B_offd = 0; i < num_cols_A_offd; i++) { if (sub_idx_offd[i] != -1) { num_cols_B_offd ++; } } col_map_offd_B = hypre_TAlloc(HYPRE_BigInt, num_cols_B_offd, HYPRE_MEMORY_HOST); for (i = 0, k = 0; i < num_cols_A_offd; i++) { if (sub_idx_offd[i] != -1) { col_map_offd_B[k] = sub_idx_offd[i]; sub_idx_offd[i] = k++; } } hypre_assert(k == num_cols_B_offd); /* count nnz and set ia */ B_nnz_diag = B_nnz_offd = 0; B_maxel_row = hypre_TAlloc(HYPRE_Real, B_nrow_local, HYPRE_MEMORY_HOST); B_diag_i = hypre_TAlloc(HYPRE_Int, B_nrow_local+1, HYPRE_MEMORY_HOST); B_offd_i = hypre_TAlloc(HYPRE_Int, B_nrow_local+1, HYPRE_MEMORY_HOST); B_diag_i[0] = B_offd_i[0] = 0; for (i = 0, k = 0; i < A_nlocal; i++) { HYPRE_Int CF_i = CF_marker[i] > 0 ? 1 : -1; if (CF_i != row_set) { continue; } k++; // Get max abs-value element of this row HYPRE_Real temp_max = 0; if (strength_thresh > 0) { for (j = A_diag_i[i]+1; j < A_diag_i[i+1]; j++) { if (hypre_cabs(A_diag_a[j]) > temp_max) { temp_max = hypre_cabs(A_diag_a[j]); } } for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++) { if (hypre_cabs(A_offd_a[j]) > temp_max) { temp_max = hypre_cabs(A_offd_a[j]); } } } B_maxel_row[k-1] = temp_max; // add one for diagonal element j = A_diag_i[i]; if (sub_idx_diag[A_diag_j[j]] != -1) { B_nnz_diag++; } // Count nnzs larger than tolerance times max row element for (j = A_diag_i[i]+1; j < A_diag_i[i+1]; j++) { if ( (sub_idx_diag[A_diag_j[j]] != -1) && (hypre_cabs(A_diag_a[j]) > (strength_thresh*temp_max)) ) { B_nnz_diag++; } } for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++) { if ( (sub_idx_offd[A_offd_j[j]] != -1) && (hypre_cabs(A_offd_a[j]) > (strength_thresh*temp_max)) ) { B_nnz_offd++; } } B_diag_i[k] = B_nnz_diag; B_offd_i[k] = B_nnz_offd; } hypre_assert(k == B_nrow_local); B_diag_j = hypre_TAlloc(HYPRE_Int, B_nnz_diag, HYPRE_MEMORY_HOST); B_diag_a = hypre_TAlloc(HYPRE_Complex, B_nnz_diag, HYPRE_MEMORY_HOST); B_offd_j = hypre_TAlloc(HYPRE_Int, B_nnz_offd, HYPRE_MEMORY_HOST); B_offd_a = hypre_TAlloc(HYPRE_Complex, B_nnz_offd, HYPRE_MEMORY_HOST); for (i = 0, k=0, k1 = 0, k2 = 0; i < A_nlocal; i++) { HYPRE_Int CF_i = CF_marker[i] > 0 ? 1 : -1; if (CF_i != row_set) { continue; } HYPRE_Real maxel = B_maxel_row[k]; k++; for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { HYPRE_Int j1 = sub_idx_diag[A_diag_j[j]]; if ( (j1 != -1) && ( (hypre_cabs(A_diag_a[j]) > (strength_thresh*maxel)) || j==A_diag_i[i] ) ) { B_diag_j[k1] = j1; B_diag_a[k1] = A_diag_a[j]; k1++; } } for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++) { HYPRE_Int j1 = sub_idx_offd[A_offd_j[j]]; if ((j1 != -1) && (hypre_cabs(A_offd_a[j]) > (strength_thresh*maxel))) { hypre_assert(j1 >= 0 && j1 < num_cols_B_offd); B_offd_j[k2] = j1; B_offd_a[k2] = A_offd_a[j]; k2++; } } } hypre_assert(k1 == B_nnz_diag && k2 == B_nnz_offd); /* ready to create B = A(rowset, colset) */ B = hypre_ParCSRMatrixCreate(comm, B_nrow_global, B_ncol_global, B_row_starts, B_col_starts, num_cols_B_offd, B_nnz_diag, B_nnz_offd); B_diag = hypre_ParCSRMatrixDiag(B); hypre_CSRMatrixMemoryLocation(B_diag) = HYPRE_MEMORY_HOST; hypre_CSRMatrixData(B_diag) = B_diag_a; hypre_CSRMatrixI(B_diag) = B_diag_i; hypre_CSRMatrixJ(B_diag) = B_diag_j; B_offd = hypre_ParCSRMatrixOffd(B); hypre_CSRMatrixMemoryLocation(B_offd) = HYPRE_MEMORY_HOST; hypre_CSRMatrixData(B_offd) = B_offd_a; hypre_CSRMatrixI(B_offd) = B_offd_i; hypre_CSRMatrixJ(B_offd) = B_offd_j; hypre_ParCSRMatrixColMapOffd(B) = col_map_offd_B; hypre_ParCSRMatrixSetNumNonzeros(B); hypre_ParCSRMatrixDNumNonzeros(B) = (HYPRE_Real) hypre_ParCSRMatrixNumNonzeros(B); hypre_MatvecCommPkgCreate(B); *B_ptr = B; hypre_TFree(B_maxel_row, HYPRE_MEMORY_HOST); hypre_TFree(send_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(sub_idx_diag, HYPRE_MEMORY_HOST); hypre_TFree(sub_idx_offd, HYPRE_MEMORY_HOST); return hypre_error_flag; }
clauses-5.c
void foo (int *p) { int i, j = 0; #pragma omp parallel if (2, 1) /* { dg-error "expected" } */ ; #pragma omp parallel num_threads (3, 4) /* { dg-error "expected" } */ ; #pragma omp teams num_teams (4, 5) /* { dg-error "expected" } */ ; #pragma omp teams thread_limit (6, 7) /* { dg-error "expected" } */ ; #pragma omp for linear (j : 8, 9) /* { dg-error "expected" } */ for (i = 0; i < 30; i++) j += (8, 9); #pragma omp for schedule (static, 3, 4) /* { dg-error "expected" } */ for (i = 0; i < 30; i++) ; #pragma omp for collapse (1, 1) /* { dg-error "expected" } */ for (i = 0; i < 30; i++) ; #pragma omp for ordered (1, 1) /* { dg-error "expected" } */ for (i = 0; i < 30; i++) ; #pragma omp simd safelen (3, 4) /* { dg-error "expected" } */ for (i = 0; i < 30; i++) ; #pragma omp simd simdlen (4, 8) /* { dg-error "expected" } */ for (i = 0; i < 30; i++) ; #pragma omp simd aligned (p: 4, 8) /* { dg-error "expected" } */ for (i = 0; i < 30; i++) ; #pragma omp teams #pragma omp distribute dist_schedule (static, 6, 7) /* { dg-error "expected" } */ for (i = 0; i < 30; i++) ; #pragma omp task final (8, 1) /* { dg-error "expected" } */ ; #pragma omp task priority (2, 3) /* { dg-error "expected" } */ ; #pragma omp taskloop grainsize (4, 5) /* { dg-error "expected" } */ for (i = 0; i < 30; i++) ; #pragma omp taskloop num_tasks (5, 6) /* { dg-error "expected" } */ for (i = 0; i < 30; i++) ; #pragma omp target device (5, 1) /* { dg-error "expected" } */ ; #pragma omp critical (baz) hint (2, 3) /* { dg-error "expected" } */ ; }
sparse-new.c
/**********************************************************************************************/ /* This program is part of the Barcelona OpenMP Tasks Suite */ /* Copyright (C) 2009 Barcelona Supercomputing Center - Centro Nacional de Supercomputacion */ /* Copyright (C) 2009 Universitat Politecnica de Catalunya */ /* */ /* This program is free software; you can redistribute it and/or modify */ /* it under the terms of the GNU General Public License as published by */ /* the Free Software Foundation; either version 2 of the License, or */ /* (at your option) any later version. */ /* */ /* This program is distributed in the hope that it will be useful, */ /* but WITHOUT ANY WARRANTY; without even the implied warranty of */ /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */ /* GNU General Public License for more details. */ /* */ /* You should have received a copy of the GNU General Public License */ /* along with this program; if not, write to the Free Software */ /* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /**********************************************************************************************/ #include <stdio.h> #include <stdint.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <libgen.h> #include <omp.h> #define EPSILON 1.0E-6 unsigned int bots_arg_size = 50; unsigned int bots_arg_size_1 = 80; #define TRUE 1 #define FALSE 0 #define BOTS_RESULT_SUCCESSFUL 1 #define BOTS_RESULT_UNSUCCESSFUL 0 /*********************************************************************** * checkmat: **********************************************************************/ int checkmat (float *M, float *N) { int i, j; float r_err; int bad = 0; for (i = 0; i < bots_arg_size_1; i++) { for (j = 0; j < bots_arg_size_1; j++) { r_err = M[i*bots_arg_size_1+j] - N[i*bots_arg_size_1+j]; if (r_err < 0.0 ) r_err = -r_err; r_err = r_err / M[i*bots_arg_size_1+j]; if(r_err > EPSILON) { fprintf(stderr,"Checking failure: A[%d][%d]=%f B[%d][%d]=%f; Relative Error=%f\n", i,j, M[i*bots_arg_size_1+j], i,j, N[i*bots_arg_size_1+j], r_err); bad = 1; } } } return bad ? FALSE : TRUE; } /*********************************************************************** * genmat: **********************************************************************/ void genmat (float *M[]) { int null_entry, init_val, i, j, ii, jj; float *p; init_val = 1325; /* generating the structure */ for (ii=0; ii < bots_arg_size; ii++) { for (jj=0; jj < bots_arg_size; jj++) { /* computing null entries */ null_entry=FALSE; if ((ii<jj) && (ii%3 !=0)) null_entry = TRUE; if ((ii>jj) && (jj%3 !=0)) null_entry = TRUE; if (ii%2==1) null_entry = TRUE; if (jj%2==1) null_entry = TRUE; if (ii==jj) null_entry = FALSE; if (ii==jj-1) null_entry = FALSE; if (ii-1 == jj) null_entry = FALSE; /* allocating matrix */ if (null_entry == FALSE){ M[ii*bots_arg_size+jj] = (float *) malloc(bots_arg_size_1*bots_arg_size_1*sizeof(float)); if ((M[ii*bots_arg_size+jj] == NULL)) { fprintf(stderr,"Error: Out of memory\n"); exit(101); } /* initializing matrix */ p = M[ii*bots_arg_size+jj]; for (i = 0; i < bots_arg_size_1; i++) { for (j = 0; j < bots_arg_size_1; j++) { init_val = (3125 * init_val) % 65536; (*p) = (float)((init_val - 32768.0) / 16384.0); p++; } } } else { M[ii*bots_arg_size+jj] = NULL; } } } } /*********************************************************************** * print_structure: **********************************************************************/ void print_structure(char *name, float *M[]) { int ii, jj; fprintf(stderr,"Structure for matrix %s @ 0x%p\n",name, M); for (ii = 0; ii < bots_arg_size; ii++) { for (jj = 0; jj < bots_arg_size; jj++) { if (M[ii*bots_arg_size+jj]!=NULL) {fprintf(stderr,"x");} else fprintf(stderr," "); } fprintf(stderr,"\n"); } fprintf(stderr,"\n"); } /*********************************************************************** * allocate_clean_block: **********************************************************************/ float * allocate_clean_block() { int i,j; float *p, *q; p = (float *) malloc(bots_arg_size_1*bots_arg_size_1*sizeof(float)); q=p; if (p!=NULL){ for (i = 0; i < bots_arg_size_1; i++) for (j = 0; j < bots_arg_size_1; j++){(*p)=0.0; p++;} } else { fprintf(stderr,"Error: Out of memory\n"); exit (101); } return (q); } /*********************************************************************** * lu0: **********************************************************************/ void lu0(float *diag) { int i, j, k; for (k=0; k<bots_arg_size_1; k++) for (i=k+1; i<bots_arg_size_1; i++) { diag[i*bots_arg_size_1+k] = diag[i*bots_arg_size_1+k] / diag[k*bots_arg_size_1+k]; for (j=k+1; j<bots_arg_size_1; j++) diag[i*bots_arg_size_1+j] = diag[i*bots_arg_size_1+j] - diag[i*bots_arg_size_1+k] * diag[k*bots_arg_size_1+j]; } } /*********************************************************************** * bdiv: **********************************************************************/ void bdiv(float *diag, float *row) { int i, j, k; for (i=0; i<bots_arg_size_1; i++) for (k=0; k<bots_arg_size_1; k++) { row[i*bots_arg_size_1+k] = row[i*bots_arg_size_1+k] / diag[k*bots_arg_size_1+k]; for (j=k+1; j<bots_arg_size_1; j++) row[i*bots_arg_size_1+j] = row[i*bots_arg_size_1+j] - row[i*bots_arg_size_1+k]*diag[k*bots_arg_size_1+j]; } } /*********************************************************************** * bmod: **********************************************************************/ void bmod(float *row, float *col, float *inner) { int i, j, k; for (i=0; i<bots_arg_size_1; i++) for (j=0; j<bots_arg_size_1; j++) for (k=0; k<bots_arg_size_1; k++) inner[i*bots_arg_size_1+j] = inner[i*bots_arg_size_1+j] - row[i*bots_arg_size_1+k]*col[k*bots_arg_size_1+j]; } /*********************************************************************** * fwd: **********************************************************************/ void fwd(float *diag, float *col) { int i, j, k; for (j=0; j<bots_arg_size_1; j++) for (k=0; k<bots_arg_size_1; k++) for (i=k+1; i<bots_arg_size_1; i++) col[i*bots_arg_size_1+j] = col[i*bots_arg_size_1+j] - diag[i*bots_arg_size_1+k]*col[k*bots_arg_size_1+j]; } void sparselu_init (float ***pBENCH, char *pass) { *pBENCH = (float **) malloc(bots_arg_size*bots_arg_size*sizeof(float *)); genmat(*pBENCH); print_structure(pass, *pBENCH); } void sparselu_par_call(float **BENCH) { int ii, jj, kk; fprintf(stderr,"Computing SparseLU Factorization (%dx%d matrix with %dx%d blocks) ", bots_arg_size,bots_arg_size,bots_arg_size_1,bots_arg_size_1); #pragma omp parallel #pragma omp single { double d1 = omp_get_wtime(); for (kk=0; kk<bots_arg_size; kk++) { #pragma omp task firstprivate(kk) shared(BENCH) depend(inout:BENCH[kk*bots_arg_size+kk]) lu0(BENCH[kk*bots_arg_size+kk]); for (jj=kk+1; jj<bots_arg_size; jj++) if (BENCH[kk*bots_arg_size+jj] != NULL) #pragma omp task firstprivate(kk, jj) shared(BENCH) depend(in:BENCH[kk*bots_arg_size+jj]) depend(inout:BENCH[kk*bots_arg_size+kk]) fwd(BENCH[kk*bots_arg_size+kk], BENCH[kk*bots_arg_size+jj]); for (ii=kk+1; ii<bots_arg_size; ii++) if (BENCH[ii*bots_arg_size+kk] != NULL) #pragma omp task firstprivate(kk, ii) shared(BENCH) depend(inout:BENCH[ii*bots_arg_size+kk]) depend(in:BENCH[kk*bots_arg_size+kk]) bdiv (BENCH[kk*bots_arg_size+kk], BENCH[ii*bots_arg_size+kk]); for (ii=kk+1; ii<bots_arg_size; ii++) if (BENCH[ii*bots_arg_size+kk] != NULL) for (jj=kk+1; jj<bots_arg_size; jj++) if (BENCH[kk*bots_arg_size+jj] != NULL) { if (BENCH[ii*bots_arg_size+jj]==NULL) BENCH[ii*bots_arg_size+jj] = allocate_clean_block(); #pragma omp task firstprivate(kk, jj, ii) shared(BENCH) depend(in:BENCH[kk*bots_arg_size+jj],BENCH[ii*bots_arg_size+kk]) depend(inout:BENCH[ii*bots_arg_size+jj]) bmod(BENCH[ii*bots_arg_size+kk], BENCH[kk*bots_arg_size+jj], BENCH[ii*bots_arg_size+jj]); } } #pragma omp taskwait double d2 = omp_get_wtime(); fprintf(stderr," Par Time: %f\n",d2-d1); } fprintf(stderr," completed!\n"); } void sparselu_seq_call(float **BENCH) { int ii, jj, kk; double d1 = omp_get_wtime(); for (kk=0; kk<bots_arg_size; kk++) { lu0(BENCH[kk*bots_arg_size+kk]); for (jj=kk+1; jj<bots_arg_size; jj++) if (BENCH[kk*bots_arg_size+jj] != NULL) { fwd(BENCH[kk*bots_arg_size+kk], BENCH[kk*bots_arg_size+jj]); } for (ii=kk+1; ii<bots_arg_size; ii++) if (BENCH[ii*bots_arg_size+kk] != NULL) { bdiv (BENCH[kk*bots_arg_size+kk], BENCH[ii*bots_arg_size+kk]); } for (ii=kk+1; ii<bots_arg_size; ii++) if (BENCH[ii*bots_arg_size+kk] != NULL) for (jj=kk+1; jj<bots_arg_size; jj++) if (BENCH[kk*bots_arg_size+jj] != NULL) { if (BENCH[ii*bots_arg_size+jj]==NULL) BENCH[ii*bots_arg_size+jj] = allocate_clean_block(); bmod(BENCH[ii*bots_arg_size+kk], BENCH[kk*bots_arg_size+jj], BENCH[ii*bots_arg_size+jj]); } } double d2 = omp_get_wtime(); fprintf(stderr,"Serial Time: %f\n",d2-d1); } void sparselu_fini (float **BENCH, char *pass) { print_structure(pass, BENCH); } int sparselu_check(float **SEQ, float **BENCH) { int ii,jj,ok=1; for (ii=0; ((ii<bots_arg_size) && ok); ii++) { for (jj=0; ((jj<bots_arg_size) && ok); jj++) { if ((SEQ[ii*bots_arg_size+jj] == NULL) && (BENCH[ii*bots_arg_size+jj] != NULL)) ok = FALSE; if ((SEQ[ii*bots_arg_size+jj] != NULL) && (BENCH[ii*bots_arg_size+jj] == NULL)) ok = FALSE; if ((SEQ[ii*bots_arg_size+jj] != NULL) && (BENCH[ii*bots_arg_size+jj] != NULL)) ok = checkmat(SEQ[ii*bots_arg_size+jj], BENCH[ii*bots_arg_size+jj]); if(!ok)abort(); } } if (ok) fprintf(stderr,"stämmer\n"); if (ok) return BOTS_RESULT_SUCCESSFUL; else return BOTS_RESULT_UNSUCCESSFUL; } int main ( int argc, char *argv[]) { float **SEQ,**BENCH; sparselu_init(&BENCH,"benchmark"); sparselu_par_call(BENCH); sparselu_fini(BENCH,"benchmark"); sparselu_init(&SEQ,"serial"); sparselu_seq_call(SEQ); sparselu_fini(SEQ,"serial"); fprintf(stderr,"Testar om Parallel och Seriell version stämmer med varandra...\n"); return (sparselu_check(SEQ,BENCH) == BOTS_RESULT_SUCCESSFUL) ? 0 : 1; }
private-clauseModificado.c
#include <stdio.h> #ifdef _OPENMP #include <omp.h> #else #define omp_get_thread_num() 0 #endif main() { int i, n = 7; int a[n], suma = 0; for (i=0; i<n; i++) a[i] = i; #pragma omp parallel private(suma) { //suma=0; #pragma omp for for (i=0; i<n; i++) { suma = suma + a[i]; printf("thread %d suma a[%d] / ", omp_get_thread_num(), i); } printf("\n* thread %d suma= %d", omp_get_thread_num(), suma); } printf("\n"); }
hypre_merge_sort.c
/****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ #include "_hypre_utilities.h" #include "hypre_hopscotch_hash.h" #include "../seq_mv/HYPRE_seq_mv.h" //#define DBG_MERGE_SORT #ifdef DBG_MERGE_SORT #include <algorithm> #include <unordered_map> #endif #define SWAP(T, a, b) do { T tmp = a; a = b; b = tmp; } while (0) /*-------------------------------------------------------------------------- * hypre_MergeOrderedArrays: merge two ordered arrays *--------------------------------------------------------------------------*/ HYPRE_Int hypre_MergeOrderedArrays( HYPRE_Int size1, HYPRE_Int *array1, HYPRE_Int size2, HYPRE_Int *array2, HYPRE_Int *size3_ptr, HYPRE_Int **array3_ptr ) { HYPRE_Int *array3; HYPRE_Int i, j, k; array3 = hypre_CTAlloc(HYPRE_Int, (size1 + size2), HYPRE_MEMORY_HOST); i = j = k = 0; while (i < size1 && j < size2) { if (array1[i] > array2[j]) { array3[k++] = array2[j++]; } else if (array1[i] < array2[j]) { array3[k++] = array1[i++]; } else { array3[k++] = array1[i++]; j++; } } while (i < size1) { array3[k++] = array1[i++]; } while (j < size2) { array3[k++] = array2[j++]; } /* Set pointers */ *size3_ptr = k; *array3_ptr = hypre_TReAlloc(array3, HYPRE_Int, k, HYPRE_MEMORY_HOST); return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_union2 * * Union of two sorted (in ascending order) array arr1 and arr2 into arr3 * * Assumptions: * 1) no duplicate entries in arr1 and arr2. But an entry is * allowed to appear in both arr1 and arr2 * 2) arr3 should have enough space on entry * 3) map1 and map2 map arr1 and arr2 to arr3 *--------------------------------------------------------------------------*/ void hypre_union2( HYPRE_Int n1, HYPRE_BigInt *arr1, HYPRE_Int n2, HYPRE_BigInt *arr2, HYPRE_Int *n3, HYPRE_BigInt *arr3, HYPRE_Int *map1, HYPRE_Int *map2 ) { HYPRE_Int i = 0, j = 0, k = 0; while (i < n1 && j < n2) { if (arr1[i] < arr2[j]) { if (map1) { map1[i] = k; } arr3[k++] = arr1[i++]; } else if (arr1[i] > arr2[j]) { if (map2) { map2[j] = k; } arr3[k++] = arr2[j++]; } else /* == */ { if (map1) { map1[i] = k; } if (map2) { map2[j] = k; } arr3[k++] = arr1[i++]; j++; } } while (i < n1) { if (map1) { map1[i] = k; } arr3[k++] = arr1[i++]; } while (j < n2) { if (map2) { map2[j] = k; } arr3[k++] = arr2[j++]; } *n3 = k; } /*-------------------------------------------------------------------------- * hypre_merge *--------------------------------------------------------------------------*/ static void hypre_merge( HYPRE_Int *first1, HYPRE_Int *last1, HYPRE_Int *first2, HYPRE_Int *last2, HYPRE_Int *out ) { for ( ; first1 != last1; ++out) { if (first2 == last2) { for ( ; first1 != last1; ++first1, ++out) { *out = *first1; } return; } if (*first2 < *first1) { *out = *first2; ++first2; } else { *out = *first1; ++first1; } } for ( ; first2 != last2; ++first2, ++out) { *out = *first2; } } /*-------------------------------------------------------------------------- * hypre_big_merge *--------------------------------------------------------------------------*/ static void hypre_big_merge( HYPRE_BigInt *first1, HYPRE_BigInt *last1, HYPRE_BigInt *first2, HYPRE_BigInt *last2, HYPRE_BigInt *out ) { for ( ; first1 != last1; ++out) { if (first2 == last2) { for ( ; first1 != last1; ++first1, ++out) { *out = *first1; } return; } if (*first2 < *first1) { *out = *first2; ++first2; } else { *out = *first1; ++first1; } } for ( ; first2 != last2; ++first2, ++out) { *out = *first2; } } /*-------------------------------------------------------------------------- * kth_element_ *--------------------------------------------------------------------------*/ static void kth_element_( HYPRE_Int *out1, HYPRE_Int *out2, HYPRE_Int *a1, HYPRE_Int *a2, HYPRE_Int left, HYPRE_Int right, HYPRE_Int n1, HYPRE_Int n2, HYPRE_Int k) { while (1) { HYPRE_Int i = (left + right)/2; // right < k -> i < k HYPRE_Int j = k - i - 1; #ifdef DBG_MERGE_SORT hypre_assert(left <= right && right <= k); hypre_assert(i < k); // i == k implies left == right == k that can never happen hypre_assert(j >= 0 && j < n2); #endif if ((j == -1 || a1[i] >= a2[j]) && (j == n2 - 1 || a1[i] <= a2[j + 1])) { *out1 = i; *out2 = j + 1; return; } else if (j >= 0 && a2[j] >= a1[i] && (i == n1 - 1 || a2[j] <= a1[i + 1])) { *out1 = i + 1; *out2 = j; return; } else if (a1[i] > a2[j] && j != n2 - 1 && a1[i] > a2[j+1]) { // search in left half of a1 right = i - 1; } else { // search in right half of a1 left = i + 1; } } } /** * Partition the input so that * a1[0:*out1) and a2[0:*out2) contain the smallest k elements */ /*-------------------------------------------------------------------------- * kth_element * * Partition the input so that * a1[0:*out1) and a2[0:*out2) contain the smallest k elements *--------------------------------------------------------------------------*/ static void kth_element( HYPRE_Int *out1, HYPRE_Int *out2, HYPRE_Int *a1, HYPRE_Int *a2, HYPRE_Int n1, HYPRE_Int n2, HYPRE_Int k) { // either of the inputs is empty if (n1 == 0) { *out1 = 0; *out2 = k; return; } if (n2 == 0) { *out1 = k; *out2 = 0; return; } if (k >= n1 + n2) { *out1 = n1; *out2 = n2; return; } // one is greater than the other if (k < n1 && a1[k] <= a2[0]) { *out1 = k; *out2 = 0; return; } if (k - n1 >= 0 && a2[k - n1] >= a1[n1 - 1]) { *out1 = n1; *out2 = k - n1; return; } if (k < n2 && a2[k] <= a1[0]) { *out1 = 0; *out2 = k; return; } if (k - n2 >= 0 && a1[k - n2] >= a2[n2 - 1]) { *out1 = k - n2; *out2 = n2; return; } // now k > 0 // faster to do binary search on the shorter sequence if (n1 > n2) { SWAP(HYPRE_Int, n1, n2); SWAP(HYPRE_Int *, a1, a2); SWAP(HYPRE_Int *, out1, out2); } if (k < (n1 + n2)/2) { kth_element_(out1, out2, a1, a2, 0, hypre_min(n1 - 1, k), n1, n2, k); } else { // when k is big, faster to find (n1 + n2 - k)th biggest element HYPRE_Int offset1 = hypre_max(k - n2, 0), offset2 = hypre_max(k - n1, 0); HYPRE_Int new_k = k - offset1 - offset2; HYPRE_Int new_n1 = hypre_min(n1 - offset1, new_k + 1); HYPRE_Int new_n2 = hypre_min(n2 - offset2, new_k + 1); kth_element_(out1, out2, a1 + offset1, a2 + offset2, 0, new_n1 - 1, new_n1, new_n2, new_k); *out1 += offset1; *out2 += offset2; } #ifdef DBG_MERGE_SORT hypre_assert(*out1 + *out2 == k); #endif } /*-------------------------------------------------------------------------- * big_kth_element_ *--------------------------------------------------------------------------*/ static void big_kth_element_( HYPRE_Int *out1, HYPRE_Int *out2, HYPRE_BigInt *a1, HYPRE_BigInt *a2, HYPRE_Int left, HYPRE_Int right, HYPRE_Int n1, HYPRE_Int n2, HYPRE_Int k) { while (1) { HYPRE_Int i = (left + right)/2; // right < k -> i < k HYPRE_Int j = k - i - 1; #ifdef DBG_MERGE_SORT hypre_assert(left <= right && right <= k); hypre_assert(i < k); // i == k implies left == right == k that can never happen hypre_assert(j >= 0 && j < n2); #endif if ((j == -1 || a1[i] >= a2[j]) && (j == n2 - 1 || a1[i] <= a2[j + 1])) { *out1 = i; *out2 = j + 1; return; } else if (j >= 0 && a2[j] >= a1[i] && (i == n1 - 1 || a2[j] <= a1[i + 1])) { *out1 = i + 1; *out2 = j; return; } else if (a1[i] > a2[j] && j != n2 - 1 && a1[i] > a2[j+1]) { // search in left half of a1 right = i - 1; } else { // search in right half of a1 left = i + 1; } } } /*-------------------------------------------------------------------------- * big_kth_element * * Partition the input so that * a1[0:*out1) and a2[0:*out2) contain the smallest k elements *--------------------------------------------------------------------------*/ static void big_kth_element( HYPRE_Int *out1, HYPRE_Int *out2, HYPRE_BigInt *a1, HYPRE_BigInt *a2, HYPRE_Int n1, HYPRE_Int n2, HYPRE_Int k) { // either of the inputs is empty if (n1 == 0) { *out1 = 0; *out2 = k; return; } if (n2 == 0) { *out1 = k; *out2 = 0; return; } if (k >= n1 + n2) { *out1 = n1; *out2 = n2; return; } // one is greater than the other if (k < n1 && a1[k] <= a2[0]) { *out1 = k; *out2 = 0; return; } if (k - n1 >= 0 && a2[k - n1] >= a1[n1 - 1]) { *out1 = n1; *out2 = k - n1; return; } if (k < n2 && a2[k] <= a1[0]) { *out1 = 0; *out2 = k; return; } if (k - n2 >= 0 && a1[k - n2] >= a2[n2 - 1]) { *out1 = k - n2; *out2 = n2; return; } // now k > 0 // faster to do binary search on the shorter sequence if (n1 > n2) { SWAP(HYPRE_Int, n1, n2); SWAP(HYPRE_BigInt *, a1, a2); SWAP(HYPRE_Int *, out1, out2); } if (k < (n1 + n2)/2) { big_kth_element_(out1, out2, a1, a2, 0, hypre_min(n1 - 1, k), n1, n2, k); } else { // when k is big, faster to find (n1 + n2 - k)th biggest element HYPRE_Int offset1 = hypre_max(k - n2, 0), offset2 = hypre_max(k - n1, 0); HYPRE_Int new_k = k - offset1 - offset2; HYPRE_Int new_n1 = hypre_min(n1 - offset1, new_k + 1); HYPRE_Int new_n2 = hypre_min(n2 - offset2, new_k + 1); big_kth_element_(out1, out2, a1 + (HYPRE_BigInt)offset1, a2 + (HYPRE_BigInt)offset2, 0, new_n1 - 1, new_n1, new_n2, new_k); *out1 += offset1; *out2 += offset2; } #ifdef DBG_MERGE_SORT hypre_assert(*out1 + *out2 == k); #endif } /*-------------------------------------------------------------------------- * hypre_parallel_merge * * @param num_threads number of threads that participate in this merge * @param my_thread_num thread id (zer0-based) among the threads that * participate in this merge *--------------------------------------------------------------------------*/ static void hypre_parallel_merge( HYPRE_Int *first1, HYPRE_Int *last1, HYPRE_Int *first2, HYPRE_Int *last2, HYPRE_Int *out, HYPRE_Int num_threads, HYPRE_Int my_thread_num ) { HYPRE_Int n1 = last1 - first1; HYPRE_Int n2 = last2 - first2; HYPRE_Int n = n1 + n2; HYPRE_Int n_per_thread = (n + num_threads - 1)/num_threads; HYPRE_Int begin_rank = hypre_min(n_per_thread*my_thread_num, n); HYPRE_Int end_rank = hypre_min(begin_rank + n_per_thread, n); #ifdef DBG_MERGE_SORT hypre_assert(std::is_sorted(first1, last1)); hypre_assert(std::is_sorted(first2, last2)); #endif HYPRE_Int begin1, begin2, end1, end2; kth_element(&begin1, &begin2, first1, first2, n1, n2, begin_rank); kth_element(&end1, &end2, first1, first2, n1, n2, end_rank); while (begin1 > end1 && begin1 > 0 && begin2 < n2 && first1[begin1 - 1] == first2[begin2]) { #ifdef DBG_MERGE_SORT printf("%s:%d\n", __FILE__, __LINE__); #endif begin1--; begin2++; } while (begin2 > end2 && end1 > 0 && end2 < n2 && first1[end1 - 1] == first2[end2]) { #ifdef DBG_MERGE_SORT printf("%s:%d\n", __FILE__, __LINE__); #endif end1--; end2++; } #ifdef DBG_MERGE_SORT hypre_assert(begin1 <= end1); hypre_assert(begin2 <= end2); #endif hypre_merge( first1 + begin1, first1 + end1, first2 + begin2, first2 + end2, out + begin1 + begin2); #ifdef DBG_MERGE_SORT hypre_assert(std::is_sorted(out + begin1 + begin2, out + end1 + end2)); #endif } /*-------------------------------------------------------------------------- * hypre_big_parallel_merge * * @param num_threads number of threads that participate in this merge * @param my_thread_num thread id (zero-based) among the threads that * participate in this merge *--------------------------------------------------------------------------*/ static void hypre_big_parallel_merge( HYPRE_BigInt *first1, HYPRE_BigInt *last1, HYPRE_BigInt *first2, HYPRE_BigInt *last2, HYPRE_BigInt *out, HYPRE_Int num_threads, HYPRE_Int my_thread_num) { HYPRE_Int n1 = (HYPRE_Int)(last1 - first1); HYPRE_Int n2 = (HYPRE_Int)(last2 - first2); HYPRE_Int n = n1 + n2; HYPRE_Int n_per_thread = (n + num_threads - 1)/num_threads; HYPRE_Int begin_rank = hypre_min(n_per_thread*my_thread_num, n); HYPRE_Int end_rank = hypre_min(begin_rank + n_per_thread, n); #ifdef DBG_MERGE_SORT hypre_assert(std::is_sorted(first1, last1)); hypre_assert(std::is_sorted(first2, last2)); #endif HYPRE_Int begin1, begin2, end1, end2; big_kth_element(&begin1, &begin2, first1, first2, n1, n2, begin_rank); big_kth_element(&end1, &end2, first1, first2, n1, n2, end_rank); while (begin1 > end1 && begin1 > 0 && begin2 < n2 && first1[begin1 - 1] == first2[begin2]) { #ifdef DBG_MERGE_SORT printf("%s:%d\n", __FILE__, __LINE__); #endif begin1--; begin2++; } while (begin2 > end2 && end1 > 0 && end2 < n2 && first1[end1 - 1] == first2[end2]) { #ifdef DBG_MERGE_SORT printf("%s:%d\n", __FILE__, __LINE__); #endif end1--; end2++; } #ifdef DBG_MERGE_SORT hypre_assert(begin1 <= end1); hypre_assert(begin2 <= end2); #endif hypre_big_merge( first1 + (HYPRE_BigInt)begin1, first1 + (HYPRE_BigInt)end1, first2 + (HYPRE_BigInt)begin2, first2 + (HYPRE_BigInt)end2, out + (HYPRE_BigInt)(begin1 + begin2)); #ifdef DBG_MERGE_SORT hypre_assert(std::is_sorted(out + begin1 + begin2, out + end1 + end2)); #endif } /*-------------------------------------------------------------------------- * hypre_merge_sort *--------------------------------------------------------------------------*/ void hypre_merge_sort( HYPRE_Int *in, HYPRE_Int *temp, HYPRE_Int len, HYPRE_Int **out ) { if (0 == len) return; #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_MERGE] -= hypre_MPI_Wtime(); #endif #ifdef DBG_MERGE_SORT HYPRE_Int *dbg_buf = new HYPRE_Int[len]; std::copy(in, in + len, dbg_buf); std::sort(dbg_buf, dbg_buf + len); #endif // HYPRE_Int thread_private_len[hypre_NumThreads()]; // HYPRE_Int out_len = 0; #ifdef HYPRE_USING_OPENMP #pragma omp parallel #endif { HYPRE_Int num_threads = hypre_NumActiveThreads(); HYPRE_Int my_thread_num = hypre_GetThreadNum(); // thread-private sort HYPRE_Int i_per_thread = (len + num_threads - 1)/num_threads; HYPRE_Int i_begin = hypre_min(i_per_thread*my_thread_num, len); HYPRE_Int i_end = hypre_min(i_begin + i_per_thread, len); hypre_qsort0(in, i_begin, i_end - 1); // merge sorted sequences HYPRE_Int in_group_size; HYPRE_Int *in_buf = in; HYPRE_Int *out_buf = temp; for (in_group_size = 1; in_group_size < num_threads; in_group_size *= 2) { #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif // merge 2 in-groups into 1 out-group HYPRE_Int out_group_size = in_group_size*2; HYPRE_Int group_leader = my_thread_num/out_group_size*out_group_size; // HYPRE_Int group_sub_leader = hypre_min(group_leader + in_group_size, num_threads - 1); HYPRE_Int id_in_group = my_thread_num%out_group_size; HYPRE_Int num_threads_in_group = hypre_min(group_leader + out_group_size, num_threads) - group_leader; HYPRE_Int in_group1_begin = hypre_min(i_per_thread*group_leader, len); HYPRE_Int in_group1_end = hypre_min(in_group1_begin + i_per_thread*in_group_size, len); HYPRE_Int in_group2_begin = hypre_min(in_group1_begin + i_per_thread*in_group_size, len); HYPRE_Int in_group2_end = hypre_min(in_group2_begin + i_per_thread*in_group_size, len); hypre_parallel_merge( in_buf + in_group1_begin, in_buf + in_group1_end, in_buf + in_group2_begin, in_buf + in_group2_end, out_buf + in_group1_begin, num_threads_in_group, id_in_group); HYPRE_Int *temp = in_buf; in_buf = out_buf; out_buf = temp; } *out = in_buf; } /* omp parallel */ #ifdef DBG_MERGE_SORT hypre_assert(std::equal(*out, *out + len, dbg_buf)); delete[] dbg_buf; #endif #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_MERGE] += hypre_MPI_Wtime(); #endif } /*-------------------------------------------------------------------------- * hypre_sort_and_create_inverse_map * * Sort array "in" with length len and put result in array "out" * "in" will be deallocated unless in == *out * inverse_map is an inverse hash table s.t. * inverse_map[i] = j iff (*out)[j] = i *--------------------------------------------------------------------------*/ void hypre_sort_and_create_inverse_map(HYPRE_Int *in, HYPRE_Int len, HYPRE_Int **out, hypre_UnorderedIntMap *inverse_map) { if (len == 0) { return; } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_MERGE] -= hypre_MPI_Wtime(); #endif HYPRE_Int *temp = hypre_TAlloc(HYPRE_Int, len, HYPRE_MEMORY_HOST); hypre_merge_sort(in, temp, len, out); hypre_UnorderedIntMapCreate(inverse_map, 2*len, 16*hypre_NumThreads()); HYPRE_Int i; #ifdef HYPRE_CONCURRENT_HOPSCOTCH #pragma omp parallel for HYPRE_SMP_SCHEDULE #endif for (i = 0; i < len; i++) { HYPRE_Int old = hypre_UnorderedIntMapPutIfAbsent(inverse_map, (*out)[i], i); hypre_assert(old == HYPRE_HOPSCOTCH_HASH_EMPTY); #ifdef DBG_MERGE_SORT if (hypre_UnorderedIntMapGet(inverse_map, (*out)[i]) != i) { fprintf(stderr, "%d %d\n", i, (*out)[i]); hypre_assert(false); } #endif } #ifdef DBG_MERGE_SORT std::unordered_map<HYPRE_Int, HYPRE_Int> inverse_map2(len); for (HYPRE_Int i = 0; i < len; ++i) { inverse_map2[(*out)[i]] = i; if (hypre_UnorderedIntMapGet(inverse_map, (*out)[i]) != i) { fprintf(stderr, "%d %d\n", i, (*out)[i]); hypre_assert(false); } } hypre_assert(hypre_UnorderedIntMapSize(inverse_map) == len); #endif if (*out == in) { hypre_TFree(temp, HYPRE_MEMORY_HOST); } else { hypre_TFree(in, HYPRE_MEMORY_HOST); } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_MERGE] += hypre_MPI_Wtime(); #endif } /*-------------------------------------------------------------------------- * hypre_big_merge_sort *--------------------------------------------------------------------------*/ void hypre_big_merge_sort(HYPRE_BigInt *in, HYPRE_BigInt *temp, HYPRE_Int len, HYPRE_BigInt **out) { if (0 == len) return; #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_MERGE] -= hypre_MPI_Wtime(); #endif #ifdef DBG_MERGE_SORT HYPRE_Int *dbg_buf = new HYPRE_Int[len]; std::copy(in, in + len, dbg_buf); std::sort(dbg_buf, dbg_buf + len); #endif // HYPRE_Int thread_private_len[hypre_NumThreads()]; // HYPRE_Int out_len = 0; #ifdef HYPRE_USING_OPENMP #pragma omp parallel #endif { HYPRE_Int num_threads = hypre_NumActiveThreads(); HYPRE_Int my_thread_num = hypre_GetThreadNum(); // thread-private sort HYPRE_Int i_per_thread = (len + num_threads - 1)/num_threads; HYPRE_Int i_begin = hypre_min(i_per_thread*my_thread_num, len); HYPRE_Int i_end = hypre_min(i_begin + i_per_thread, len); hypre_BigQsort0(in, i_begin, i_end - 1); // merge sorted sequences HYPRE_Int in_group_size; HYPRE_BigInt *in_buf = in; HYPRE_BigInt *out_buf = temp; for (in_group_size = 1; in_group_size < num_threads; in_group_size *= 2) { #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif // merge 2 in-groups into 1 out-group HYPRE_Int out_group_size = in_group_size*2; HYPRE_Int group_leader = my_thread_num/out_group_size*out_group_size; // HYPRE_Int group_sub_leader = hypre_min(group_leader + in_group_size, num_threads - 1); HYPRE_Int id_in_group = my_thread_num%out_group_size; HYPRE_Int num_threads_in_group = hypre_min(group_leader + out_group_size, num_threads) - group_leader; HYPRE_Int in_group1_begin = hypre_min(i_per_thread*group_leader, len); HYPRE_Int in_group1_end = hypre_min(in_group1_begin + i_per_thread*in_group_size, len); HYPRE_Int in_group2_begin = hypre_min(in_group1_begin + i_per_thread*in_group_size, len); HYPRE_Int in_group2_end = hypre_min(in_group2_begin + i_per_thread*in_group_size, len); hypre_big_parallel_merge( in_buf + (HYPRE_BigInt)in_group1_begin, in_buf + (HYPRE_BigInt)in_group1_end, in_buf + (HYPRE_BigInt)in_group2_begin, in_buf + (HYPRE_BigInt)in_group2_end, out_buf + (HYPRE_BigInt)in_group1_begin, num_threads_in_group, id_in_group); HYPRE_BigInt *temp = in_buf; in_buf = out_buf; out_buf = temp; } *out = in_buf; } /* omp parallel */ #ifdef DBG_MERGE_SORT hypre_assert(std::equal(*out, *out + len, dbg_buf)); delete[] dbg_buf; #endif #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_MERGE] += hypre_MPI_Wtime(); #endif } /*-------------------------------------------------------------------------- * hypre_big_sort_and_create_inverse_map *--------------------------------------------------------------------------*/ void hypre_big_sort_and_create_inverse_map(HYPRE_BigInt *in, HYPRE_Int len, HYPRE_BigInt **out, hypre_UnorderedBigIntMap *inverse_map) { if (len == 0) { return; } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_MERGE] -= hypre_MPI_Wtime(); #endif HYPRE_BigInt *temp = hypre_TAlloc(HYPRE_BigInt, len, HYPRE_MEMORY_HOST); hypre_big_merge_sort(in, temp, len, out); hypre_UnorderedBigIntMapCreate(inverse_map, 2*len, 16*hypre_NumThreads()); HYPRE_Int i; #ifdef HYPRE_CONCURRENT_HOPSCOTCH #pragma omp parallel for HYPRE_SMP_SCHEDULE #endif for (i = 0; i < len; i++) { HYPRE_Int old = hypre_UnorderedBigIntMapPutIfAbsent(inverse_map, (*out)[i], i); hypre_assert(old == HYPRE_HOPSCOTCH_HASH_EMPTY); #ifdef DBG_MERGE_SORT if (hypre_UnorderedBigIntMapGet(inverse_map, (*out)[i]) != i) { fprintf(stderr, "%d %d\n", i, (*out)[i]); hypre_assert(false); } #endif } #ifdef DBG_MERGE_SORT std::unordered_map<HYPRE_Int, HYPRE_Int> inverse_map2(len); for (HYPRE_Int i = 0; i < len; ++i) { inverse_map2[(*out)[i]] = i; if (hypre_UnorderedBigIntMapGet(inverse_map, (*out)[i]) != i) { fprintf(stderr, "%d %d\n", i, (*out)[i]); hypre_assert(false); } } hypre_assert(hypre_UnorderedBigIntMapSize(inverse_map) == len); #endif if (*out == in) { hypre_TFree(temp, HYPRE_MEMORY_HOST); } else { hypre_TFree(in, HYPRE_MEMORY_HOST); } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_MERGE] += hypre_MPI_Wtime(); #endif } /* vim: set tabstop=8 softtabstop=3 sw=3 expandtab: */
GB_unaryop__one_fp32_fp32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__one_fp32_fp32 // op(A') function: GB_tran__one_fp32_fp32 // C type: float // A type: float // cast: ; // unaryop: cij = 1 #define GB_ATYPE \ float #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ ; #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = 1 ; // casting #define GB_CASTING(z, aij) \ ; ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ONE || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__one_fp32_fp32 ( float *Cx, // Cx and Ax may be aliased float *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__one_fp32_fp32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
atomic-13.c
/* { dg-do run } */ extern void abort (void); long long l, m; int i, j; void foo (void) { #pragma omp atomic read i = l; #pragma omp atomic read m = j; if (i != 77 || m != 88) abort (); #pragma omp atomic write l = 1 + i + 6 * 1; #pragma omp atomic write j = 170 - 170 + m + 1 * 7; #pragma omp atomic capture i = l += 4; #pragma omp atomic capture m = j += 4; if (i != 88 || m != 99) abort (); #pragma omp atomic capture { i = l; l += 4; } #pragma omp atomic capture { m = j; j += 4; } if (i != 88 || m != 99) abort (); #pragma omp atomic capture { l += 4; i = l; } #pragma omp atomic capture { j += 4; m = j; } if (i != 96 || m != 107) abort (); } int main () { l = 77; j = 88; foo (); return 0; }
FullyDistVec.h
/****************************************************************/ /* Parallel Combinatorial BLAS Library (for Graph Computations) */ /* version 1.2 -------------------------------------------------*/ /* date: 10/06/2011 --------------------------------------------*/ /* authors: Aydin Buluc (abuluc@lbl.gov), Adam Lugowski --------*/ /****************************************************************/ /* Copyright (c) 2011, Aydin Buluc Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #ifndef _FULLY_DIST_VEC_H_ #define _FULLY_DIST_VEC_H_ #include <iostream> #include <fstream> #include <vector> #include <utility> #include <iterator> #include "CombBLAS.h" #include "CommGrid.h" #include "FullyDist.h" #include "Exception.h" template <class IT, class NT> class FullyDistSpVec; template <class IT, class NT, class DER> class SpParMat; template <class IT> class DistEdgeList; template <class IU, class NU> class DenseVectorLocalIterator; // ABAB: As opposed to SpParMat, IT here is used to encode global size and global indices; // therefore it can not be 32-bits, in general. template <class IT, class NT> class FullyDistVec: public FullyDist<IT,NT, typename CombBLAS::disable_if< CombBLAS::is_boolean<NT>::value, NT >::type > { public: FullyDistVec ( ); FullyDistVec ( IT globallen, NT initval); FullyDistVec ( shared_ptr<CommGrid> grid); FullyDistVec ( shared_ptr<CommGrid> grid, IT globallen, NT initval); FullyDistVec ( const FullyDistSpVec<IT, NT> & rhs ); // Sparse -> Dense conversion constructor FullyDistVec ( const vector<NT> & fillarr, shared_ptr<CommGrid> grid ); // initialize a FullyDistVec with a vector from each processor template <class ITRHS, class NTRHS> FullyDistVec ( const FullyDistVec<ITRHS, NTRHS>& rhs ); // type converter constructor class ScalarReadSaveHandler { public: NT getNoNum(IT index) { return static_cast<NT>(1); } template <typename c, typename t> NT read(std::basic_istream<c,t>& is, IT index) { NT v; is >> v; return v; } template <typename c, typename t> void save(std::basic_ostream<c,t>& os, const NT& v, IT index) { os << v; } }; template <class HANDLER> ifstream& ReadDistribute (ifstream& infile, int master, HANDLER handler); ifstream& ReadDistribute (ifstream& infile, int master) { return ReadDistribute(infile, master, ScalarReadSaveHandler()); } template <class HANDLER> void SaveGathered(ofstream& outfile, int master, HANDLER handler, bool printProcSplits = false); void SaveGathered(ofstream& outfile, int master) { SaveGathered(outfile, master, ScalarReadSaveHandler(), false); } template <class ITRHS, class NTRHS> FullyDistVec<IT,NT> & operator=(const FullyDistVec< ITRHS,NTRHS > & rhs); // assignment with type conversion FullyDistVec<IT,NT> & operator=(const FullyDistVec<IT,NT> & rhs); //!< Actual assignment operator FullyDistVec<IT,NT> & operator=(const FullyDistSpVec<IT,NT> & rhs); //!< FullyDistSpVec->FullyDistVec conversion operator FullyDistVec<IT,NT> & operator=(const DenseParVec<IT,NT> & rhs); //!< DenseParVec->FullyDistVec conversion operator FullyDistVec<IT,NT> operator() (const FullyDistVec<IT,IT> & ri) const; //<! subsref //! like operator=, but instead of making a deep copy it just steals the contents. //! Useful for places where the "victim" will be distroyed immediately after the call. FullyDistVec<IT,NT> & stealFrom(FullyDistVec<IT,NT> & victim); FullyDistVec<IT,NT> & operator+=(const FullyDistSpVec<IT,NT> & rhs); FullyDistVec<IT,NT> & operator+=(const FullyDistVec<IT,NT> & rhs); FullyDistVec<IT,NT> & operator-=(const FullyDistSpVec<IT,NT> & rhs); FullyDistVec<IT,NT> & operator-=(const FullyDistVec<IT,NT> & rhs); bool operator==(const FullyDistVec<IT,NT> & rhs) const; void SetElement (IT indx, NT numx); // element-wise assignment void SetLocalElement(IT index, NT value) { arr[index] = value; }; // no checks, local index NT GetElement (IT indx) const; // element-wise fetch NT operator[](IT indx) const // more c++ like API { return GetElement(indx); } void Set(const FullyDistSpVec< IT,NT > & rhs); void iota(IT globalsize, NT first); void RandPerm(); // randomly permute the vector FullyDistVec<IT,IT> sort(); // sort and return the permutation using FullyDist<IT,NT,typename CombBLAS::disable_if< CombBLAS::is_boolean<NT>::value, NT >::type>::LengthUntil; using FullyDist<IT,NT,typename CombBLAS::disable_if< CombBLAS::is_boolean<NT>::value, NT >::type>::TotalLength; using FullyDist<IT,NT,typename CombBLAS::disable_if< CombBLAS::is_boolean<NT>::value, NT >::type>::Owner; using FullyDist<IT,NT,typename CombBLAS::disable_if< CombBLAS::is_boolean<NT>::value, NT >::type>::MyLocLength; IT LocArrSize() const { return arr.size(); } // = MyLocLength() once arr is resized template <typename _Predicate> FullyDistSpVec<IT,NT> Find(_Predicate pred) const; //!< Return the elements for which pred is true template <typename _Predicate> FullyDistVec<IT,IT> FindInds(_Predicate pred) const; //!< Return the indices where pred is true template <typename _Predicate> IT Count(_Predicate pred) const; //!< Return the number of elements for which pred is true template <typename _UnaryOperation> void Apply(_UnaryOperation __unary_op) { transform(arr.begin(), arr.end(), arr.begin(), __unary_op); } template <typename _BinaryOperation> void ApplyInd(_BinaryOperation __binary_op) { IT offset = LengthUntil(); #ifdef _OPENMP #pragma omp parallel for #endif for(IT i=0; (unsigned)i < arr.size(); ++i) arr[i] = __binary_op(arr[i], i + offset); } template <typename _UnaryOperation, typename IRRELEVANT_NT> void Apply(_UnaryOperation __unary_op, const FullyDistSpVec<IT,IRRELEVANT_NT>& mask); // extended callback versions template <typename _BinaryOperation, typename _BinaryPredicate, class NT2> void EWiseApply(const FullyDistVec<IT,NT2> & other, _BinaryOperation __binary_op, _BinaryPredicate _do_op, const bool useExtendedBinOp); template <typename _BinaryOperation, typename _BinaryPredicate, class NT2> void EWiseApply(const FullyDistSpVec<IT,NT2> & other, _BinaryOperation __binary_op, _BinaryPredicate _do_op, bool applyNulls, NT2 nullValue, const bool useExtendedBinOp); // plain fallback versions template <typename _BinaryOperation, typename _BinaryPredicate, class NT2> void EWiseApply(const FullyDistVec<IT,NT2> & other, _BinaryOperation __binary_op, _BinaryPredicate _do_op) { EWiseApply(other, EWiseExtToPlainAdapter<NT, NT, NT2, _BinaryOperation>(__binary_op), EWiseExtToPlainAdapter<bool, NT, NT2, _BinaryPredicate>(_do_op), true); } template <typename _BinaryOperation, typename _BinaryPredicate, class NT2> void EWiseApply(const FullyDistSpVec<IT,NT2> & other, _BinaryOperation __binary_op, _BinaryPredicate _do_op, bool applyNulls, NT2 nullValue) { EWiseApply(other, EWiseExtToPlainAdapter<NT, NT, NT2, _BinaryOperation>(__binary_op), EWiseExtToPlainAdapter<bool, NT, NT2, _BinaryPredicate>(_do_op), applyNulls, nullValue, true); } template <typename T1, typename T2> class retTrue { public: bool operator()(const T1& x, const T2& y) { return true; } }; template <typename _BinaryOperation, class NT2> void EWiseApply(const FullyDistVec<IT,NT2> & other, _BinaryOperation __binary_op) { this->EWiseApply(other, __binary_op, retTrue<NT, NT2>()); } template <typename _BinaryOperation, class NT2> void EWiseApply(const FullyDistSpVec<IT,NT2> & other, _BinaryOperation __binary_op, bool applyNulls, NT2 nullValue) { this->EWiseApply(other, __binary_op, retTrue<NT, NT2>(), applyNulls, nullValue); } void PrintToFile(string prefix) { ofstream output; commGrid->OpenDebugFile(prefix, output); copy(arr.begin(), arr.end(), ostream_iterator<NT> (output, " ")); output << endl; output.close(); } void PrintInfo(string vectorname) const; void DebugPrint(); shared_ptr<CommGrid> getcommgrid() const { return commGrid; } template <typename _BinaryOperation> NT Reduce(_BinaryOperation __binary_op, NT identity); //! Reduce can be used to implement max_element, for instance template <typename OUT, typename _BinaryOperation, typename _UnaryOperation> OUT Reduce(_BinaryOperation __binary_op, OUT default_val, _UnaryOperation __unary_op); void SelectCandidates(double nver); using FullyDist<IT,NT,typename CombBLAS::disable_if< CombBLAS::is_boolean<NT>::value, NT >::type>::glen; using FullyDist<IT,NT,typename CombBLAS::disable_if< CombBLAS::is_boolean<NT>::value, NT >::type>::commGrid; private: vector< NT > arr; template <typename _BinaryOperation> void EWise(const FullyDistVec<IT,NT> & rhs, _BinaryOperation __binary_op); template <class IU, class NU> friend class DenseParMat; template <class IU, class NU, class UDER> friend class SpParMat; template <class IU, class NU> friend class FullyDistVec; template <class IU, class NU> friend class FullyDistSpVec; template <class IU, class NU> friend class DenseVectorLocalIterator; template <typename SR, typename IU, typename NUM, typename NUV, typename UDER> friend FullyDistVec<IU,typename promote_trait<NUM,NUV>::T_promote> SpMV (const SpParMat<IU,NUM,UDER> & A, const FullyDistVec<IU,NUV> & x ); template <typename IU, typename NU1, typename NU2> friend FullyDistSpVec<IU,typename promote_trait<NU1,NU2>::T_promote> EWiseMult (const FullyDistSpVec<IU,NU1> & V, const FullyDistVec<IU,NU2> & W , bool exclude, NU2 zero); template <typename IU, typename NU1, typename NU2, typename _BinaryOperation> friend FullyDistSpVec<IU,typename promote_trait<NU1,NU2>::T_promote> EWiseApply (const FullyDistSpVec<IU,NU1> & V, const FullyDistVec<IU,NU2> & W , _BinaryOperation _binary_op, typename promote_trait<NU1,NU2>::T_promote zero); template <typename RET, typename IU, typename NU1, typename NU2, typename _BinaryOperation, typename _BinaryPredicate> friend FullyDistSpVec<IU,RET> EWiseApply (const FullyDistSpVec<IU,NU1> & V, const FullyDistVec<IU,NU2> & W , _BinaryOperation _binary_op, _BinaryPredicate _doOp, bool allowVNulls, NU1 Vzero, const bool useExtendedBinOp); template <typename IU> friend void RenameVertices(DistEdgeList<IU> & DEL); template <typename IU, typename NU> friend FullyDistVec<IU,NU> Concatenate ( vector< FullyDistVec<IU,NU> > & vecs); }; #include "FullyDistVec.cpp" #endif
Cylinder.h
#ifndef CYLINDER_HEADER #define CYLINDER_HEADER #include "basic.h" #include <stdexcept> #include <utility> #include <MiscLib/Vector.h> #include "PointCloud.h" #include <ostream> #include <istream> #include <GfxTL/HyperplaneCoordinateSystem.h> #include <stdio.h> #include <MiscLib/NoShrinkVector.h> #include "LevMarLSWeight.h" #include "LevMarFitting.h" #ifndef DLL_LINKAGE #define DLL_LINKAGE #endif class DLL_LINKAGE Cylinder { public: struct ParallelNormalsError : public std::runtime_error { ParallelNormalsError(); }; enum { RequiredSamples = 2 }; Cylinder(); Cylinder(const Vec3f &axisDir, const Vec3f &axisPos, float radius); Cylinder(const Vec3f &pointA, const Vec3f &pointB, const Vec3f &normalA, const Vec3f &normalB); bool Init(const MiscLib::Vector< Vec3f > &samples); bool InitAverage(const MiscLib::Vector< Vec3f > &samples); bool Init(const Vec3f &axisDir, const Vec3f &axisPos, float radius); bool Init(const Vec3f &pointA, const Vec3f &pointB, const Vec3f &normalA, const Vec3f &normalB); bool Init(bool binary, std::istream *i); void Init(FILE *i); void Init(float* array); inline float Distance(const Vec3f &p) const; inline void Normal(const Vec3f &p, Vec3f *normal) const; inline float DistanceAndNormal(const Vec3f &p, Vec3f *normal) const; inline float SignedDistance(const Vec3f &p) const; void Project(const Vec3f &p, Vec3f *pp) const; // parameters are (height, angle) void Parameters(const Vec3f &p, std::pair< float, float > *param) const; float Radius() const; float &Radius(); const Vec3f &AxisDirection() const; Vec3f &AxisDirection(); const Vec3f &AxisPosition() const; Vec3f &AxisPosition(); const Vec3f AngularDirection() const; void RotateAngularDirection(float radians); bool LeastSquaresFit(const PointCloud &pc, MiscLib::Vector< size_t >::const_iterator begin, MiscLib::Vector< size_t >::const_iterator end); template< class IteratorT > bool LeastSquaresFit(IteratorT begin, IteratorT end); bool Fit(const PointCloud &pc, MiscLib::Vector< size_t >::const_iterator begin, MiscLib::Vector< size_t >::const_iterator end) { return LeastSquaresFit(pc, begin, end); } static bool Interpolate(const MiscLib::Vector< Cylinder > &cylinders, const MiscLib::Vector< float > &weights, Cylinder *ic); void Serialize(bool binary, std::ostream *o) const; static size_t SerializedSize(); void Serialize(FILE *o) const; void Serialize(float* array) const; static size_t SerializedFloatSize(); void Transform(float scale, const Vec3f &translate); void Transform(const GfxTL::MatrixXX< 3, 3, float > &rot, const GfxTL::Vector3Df &trans); inline unsigned int Intersect(const Vec3f &p, const Vec3f &r, float *first, float *second) const; private: template< class WeightT > class LevMarCylinder : public WeightT { public: enum { NumParams = 7 }; typedef float ScalarType; template< class IteratorT > ScalarType Chi(const ScalarType *params, IteratorT begin, IteratorT end, ScalarType *values, ScalarType *temp) const { ScalarType chi = 0; int size = end - begin; #ifdef DOPARALLEL #pragma omp parallel for schedule(static) reduction(+:chi) #endif for(int idx = 0; idx < size; ++idx) { Vec3f s; for(unsigned int j = 0; j < 3; ++j) s[j] = begin[idx][j] - params[j]; ScalarType u = params[5] * s[1] - params[4] * s[2]; u *= u; ScalarType v = params[3] * s[2] - params[5] * s[0]; u += v * v; v = params[4] * s[0] - params[3] * s[1]; u += v * v; temp[idx] = std::sqrt(u); chi += (values[idx] = WeightT::Weigh(temp[idx] - params[6])) * values[idx]; } return chi; } template< class IteratorT > void Derivatives(const ScalarType *params, IteratorT begin, IteratorT end, const ScalarType *values, const ScalarType *temp, ScalarType *matrix) const { int size = end - begin; #ifdef DOPARALLEL #pragma omp parallel for schedule(static) #endif for(int idx = 0; idx < size; ++idx) { Vec3f s; for(unsigned int j = 0; j < 3; ++j) s[j] = begin[idx][j] - params[j]; ScalarType g = s[0] * begin[idx][0] + s[1] * begin[idx][1] + s[2] * begin[idx][2]; if(temp[idx] < 1.0e-6) { matrix[idx * NumParams + 0] = std::sqrt(1 - params[3] * params[3]); matrix[idx * NumParams + 1] = std::sqrt(1 - params[4] * params[4]); matrix[idx * NumParams + 2] = std::sqrt(1 - params[5] * params[5]); } else { matrix[idx * NumParams + 0] = (params[3] * g - s[0]) / temp[idx]; matrix[idx * NumParams + 1] = (params[4] * g - s[1]) / temp[idx]; matrix[idx * NumParams + 2] = (params[5] * g - s[2]) / temp[idx]; } matrix[idx * NumParams + 3] = g * matrix[idx * NumParams + 0]; matrix[idx * NumParams + 4] = g * matrix[idx * NumParams + 1]; matrix[idx * NumParams + 5] = g * matrix[idx * NumParams + 2]; matrix[idx * NumParams + 6] = -1; WeightT::template DerivWeigh< NumParams >(temp[idx] - params[6], matrix + idx * NumParams); } } void Normalize(ScalarType *params) const { ScalarType l = std::sqrt(params[3] * params[3] + params[4] * params[4] + params[5] * params[5]); for(unsigned int i = 3; i < 6; ++i) params[i] /= l; // find point on axis closest to origin float lambda = -(params[0] * params[3] + params[1] * params[4] + params[2] * params[5]); for(unsigned int i = 0; i < 3; ++i) params[i] = params[i] + lambda * params[i + 3]; } }; private: Vec3f m_axisDir; Vec3f m_axisPos; float m_radius; GfxTL::HyperplaneCoordinateSystem< float, 3 > m_hcs; float m_angularRotatedRadians; }; inline float Cylinder::Distance(const Vec3f &p) const { Vec3f diff = p - m_axisPos; float lambda = m_axisDir.dot(diff); float axisDist = (diff - lambda * m_axisDir).length(); return fabs(axisDist - m_radius); } inline void Cylinder::Normal(const Vec3f &p, Vec3f *normal) const { Vec3f diff = p - m_axisPos; float lambda = m_axisDir.dot(diff); *normal = diff - lambda * m_axisDir; normal->normalize(); } inline float Cylinder::DistanceAndNormal(const Vec3f &p, Vec3f *normal) const { Vec3f diff = p - m_axisPos; float lambda = m_axisDir.dot(diff); *normal = diff - lambda * m_axisDir; float axisDist = normal->length(); if(axisDist > 0) *normal /= axisDist; return fabs(axisDist - m_radius); } inline float Cylinder::SignedDistance(const Vec3f &p) const { Vec3f diff = p - m_axisPos; float lambda = m_axisDir.dot(diff); float axisDist = (diff - lambda * m_axisDir).length(); return axisDist - m_radius; } template< class IteratorT > bool Cylinder::LeastSquaresFit(IteratorT begin, IteratorT end) { float param[7]; for(size_t i = 0; i < 3; ++i) param[i] = m_axisPos[i]; for(size_t i = 0; i < 3; ++i) param[i + 3] = m_axisDir[i]; param[6] = m_radius; LevMarCylinder< LevMarLSWeight > levMarCylinder; if(!LevMar(begin, end, levMarCylinder, param)) return false; for(size_t i = 0; i < 3; ++i) m_axisPos[i] = param[i]; for(size_t i = 0; i < 3; ++i) m_axisDir[i] = param[i + 3]; m_radius = param[6]; m_hcs.FromNormal(m_axisDir); m_angularRotatedRadians = 0; return true; } inline unsigned int Cylinder::Intersect(const Vec3f &p, const Vec3f &r, float *first, float *second) const { using namespace std; // Create a coordinate system for the cylinder. In this system, the // cylinder segment center C is the origin and the cylinder axis direction // W is the z-axis. U and V are the other coordinate axis directions. // If P = x*U+y*V+z*W, the cylinder is x^2 + y^2 = r^2, where r is the // cylinder radius. The end caps are |z| = h/2, where h is the cylinder // height. float fRSqr = m_radius * m_radius; // convert incoming line origin to cylinder coordinates Vec3f kDiff = p - m_axisPos; Vec3f kP(kDiff.dot(m_hcs[0]), kDiff.dot(m_hcs[1]), m_axisDir.dot(kDiff)); // Get the z-value, in cylinder coordinates, of the incoming line's // unit-length direction. float fDz = m_axisDir.dot(r); if(fabs(fDz) >= 1.f - 1e-7f) // The line is parallel to the cylinder axis. return 0; // convert incoming line unit-length direction to cylinder coordinates Vec3f kD(r.dot(m_hcs[0]), r.dot(m_hcs[1]), r.dot(m_axisDir)); float fA0, fA1, fA2, fDiscr, fRoot, fInv; // Test intersection of line P+t*D with infinite cylinder // x^2+y^2 = r^2. This reduces to computing the roots of a // quadratic equation. If P = (px,py,pz) and D = (dx,dy,dz), // then the quadratic equation is // (dx^2+dy^2)*t^2 + 2*(px*dx+py*dy)*t + (px^2+py^2-r^2) = 0 fA0 = kP[0]*kP[0] + kP[1]*kP[1] - fRSqr; fA1 = kP[0]*kD[0] + kP[1]*kD[1]; fA2 = kD[0]*kD[0] + kD[1]*kD[1]; fDiscr = fA1*fA1 - fA0*fA2; if (fDiscr < 0) // line does not intersect cylinder return 0; else if (fDiscr > 1e-7f) { // line intersects cylinder in two places fRoot = sqrt(fDiscr); fInv = (1.f)/fA2; *first = (-fA1 - fRoot)*fInv; *second = (-fA1 + fRoot)*fInv; return 2; } // line is tangent to the cylinder *first = -fA1/fA2; return 1; } #endif
load.c
/* This file is part of ParTI!. ParTI! is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. ParTI! is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with ParTI!. If not, see <http://www.gnu.org/licenses/>. */ #include <HiParTI.h> #include "sptensor.h" #include <assert.h> #include <stdio.h> #include <stdlib.h> struct ftype { char * extension; int type; }; static struct ftype file_extensions[] = { { ".tns", 0 }, { ".coo", 0 }, { ".bin", 1 }, { NULL, 0} }; static int get_file_type( char const * const fname) { /* find last . in filename */ char const * const suffix = strrchr(fname, '.'); if(suffix == NULL) { goto NOT_FOUND; } size_t idx = 0; do { if(strcmp(suffix, file_extensions[idx].extension) == 0) { return file_extensions[idx].type; } } while(file_extensions[++idx].extension != NULL); /* default to text coordinate format */ NOT_FOUND: fprintf(stderr, "SPLATT: extension for '%s' not recognized. " "Defaulting to ASCII coordinate form.\n", fname); return 0; } static int p_tt_read_file(ptiSparseTensor *tsr, ptiIndex start_index, FILE *fp) { int iores, retval; ptiIndex mode; iores = fscanf(fp, "%u", &tsr->nmodes); pti_CheckOSError(iores < 0, "SpTns Load"); /* Only allocate space for sortorder, initialized to 0s. */ tsr->sortorder = malloc(tsr->nmodes * sizeof tsr->sortorder[0]); pti_CheckOSError(!tsr->sortorder, "SpTns Load"); memset(tsr->sortorder, 0, tsr->nmodes * sizeof tsr->sortorder[0]); tsr->ndims = malloc(tsr->nmodes * sizeof *tsr->ndims); pti_CheckOSError(!tsr->ndims, "SpTns Load"); for(mode = 0; mode < tsr->nmodes; ++mode) { iores = fscanf(fp, "%u", &tsr->ndims[mode]); pti_CheckOSError(iores != 1, "SpTns Load"); } tsr->nnz = 0; tsr->inds = malloc(tsr->nmodes * sizeof *tsr->inds); pti_CheckOSError(!tsr->inds, "SpTns Load"); for(mode = 0; mode < tsr->nmodes; ++mode) { retval = ptiNewIndexVector(&tsr->inds[mode], 0, 0); pti_CheckError(retval, "SpTns Load", NULL); } retval = ptiNewValueVector(&tsr->values, 0, 0); pti_CheckError(retval, "SpTns Load", NULL); while(retval == 0) { double value; for(mode = 0; mode < tsr->nmodes; ++mode) { ptiIndex index; iores = fscanf(fp, "%u", &index); if(iores != 1) { retval = -1; break; } if(index < start_index) { pti_CheckError(PTIERR_VALUE_ERROR, "SpTns Load", "index < start_index"); } ptiAppendIndexVector(&tsr->inds[mode], index-start_index); } if(retval == 0) { iores = fscanf(fp, "%lf", &value); if(iores != 1) { retval = -1; break; } ptiAppendValueVector(&tsr->values, value); ++tsr->nnz; } } for(mode = 0; mode < tsr->nmodes; ++mode) { tsr->inds[mode].len = tsr->nnz; } // ptiSparseTensorCollectZeros(tsr); return 0; } static void read_binary_header( FILE * fin, bin_header * header) { fread(&(header->magic), sizeof(header->magic), 1, fin); fread(&(header->idx_width), sizeof(header->idx_width), 1, fin); fread(&(header->val_width), sizeof(header->val_width), 1, fin); if(header->idx_width > HIPARTI_INDEX_TYPEWIDTH / 8) { fprintf(stderr, "SPLATT: ERROR input has %lu-bit integers. " "Build with HIPARTI_INDEX_TYPEWIDTH %lu\n", header->idx_width * 8, header->idx_width * 8); exit(-1); } if(header->val_width > HIPARTI_VALUE_TYPEWIDTH / 8) { fprintf(stderr, "SPLATT: WARNING input has %lu-bit floating-point values. " "Build with HIPARTI_VALUE_TYPEWIDTH %lu for full precision\n", header->val_width * 8, header->val_width * 8); } } static void fill_binary_idx( ptiIndex * const buffer, ptiIndex const count, bin_header const * const header, FILE * fin) { if(header->idx_width == sizeof(ptiIndex)) { fread(buffer, sizeof(ptiIndex), count, fin); } else { /* read in uint32_t in a buffered fashion */ ptiIndex const BUF_LEN = 1024*1024; uint32_t * ubuf = (uint32_t*)malloc(BUF_LEN * sizeof(*ubuf)); for(ptiIndex n=0; n < count; n += BUF_LEN) { ptiIndex const read_count = BUF_LEN < count - n ? BUF_LEN : count - n; fread(ubuf, sizeof(*ubuf), read_count, fin); #pragma omp parallel for schedule(static) for(ptiIndex i=0; i < read_count; ++i) { buffer[n + i] = ubuf[i]; } } free(ubuf); } } static void fill_binary_nnzidx( ptiNnzIndex * const buffer, ptiIndex const count, bin_header const * const header, FILE * fin) { if(header->idx_width == sizeof(ptiNnzIndex)) { fread(buffer, sizeof(ptiNnzIndex), count, fin); } else { /* read in uint32_t in a buffered fashion */ ptiIndex const BUF_LEN = 1024*1024; uint32_t * ubuf = (uint32_t*)malloc(BUF_LEN * sizeof(*ubuf)); for(ptiIndex n=0; n < count; n += BUF_LEN) { ptiIndex const read_count = BUF_LEN < count - n ? BUF_LEN : count - n; fread(ubuf, sizeof(*ubuf), read_count, fin); #pragma omp parallel for schedule(static) for(ptiIndex i=0; i < read_count; ++i) { buffer[n + i] = ubuf[i]; } } free(ubuf); } } static void fill_binary_val( ptiValue * const buffer, ptiIndex const count, bin_header const * const header, FILE * fin) { if(header->val_width == sizeof(ptiValue)) { fread(buffer, sizeof(ptiValue), count, fin); } else { /* read in float in a buffered fashion */ ptiIndex const BUF_LEN = 1024*1024; /* select whichever SPLATT *is not* configured with. */ #if HIPARTI_VALUE_TYPEWIDTH == 64 float * ubuf = (float*)malloc(BUF_LEN * sizeof(*ubuf)); #else double * ubuf = (double*)malloc(BUF_LEN * sizeof(*ubuf)); #endif for(ptiIndex n=0; n < count; n += BUF_LEN) { ptiIndex const read_count = BUF_LEN < count - n ? BUF_LEN : count - n; fread(ubuf, sizeof(*ubuf), read_count, fin); #pragma omp parallel for schedule(static) for(ptiIndex i=0; i < read_count; ++i) { buffer[n + i] = ubuf[i]; } } free(ubuf); } } /** * @brief Read a COORD tensor from a binary file, converting from smaller idx or * val precision if necessary. * * @param fin The file to read from. * * @return The parsed tensor. */ static int p_tt_read_binary_file(ptiSparseTensor *tsr, FILE * fin) { int result; bin_header header; read_binary_header(fin, &header); // printf("header.magic: %d\n", header.magic); // printf("header.idx_width: %lu\n", header.idx_width); // printf("header.val_width: %lu\n", header.val_width); ptiNnzIndex nnz = 0; ptiIndex nmodes = 0; fill_binary_idx(&nmodes, 1, &header, fin); ptiIndex * dims = (ptiIndex *) malloc (nmodes * sizeof(*dims)); fill_binary_idx(dims, nmodes, &header, fin); fill_binary_nnzidx(&nnz, 1, &header, fin); /* allocate structures */ ptiNewSparseTensor(tsr, nmodes, dims); tsr->nnz = nnz; for(ptiIndex m=0; m < nmodes; ++m) { result = ptiResizeIndexVector(&tsr->inds[m], nnz); pti_CheckError(result, "SpTns Read", NULL); } result = ptiResizeValueVector(&tsr->values, nnz); pti_CheckError(result, "SpTns Read", NULL); /* fill in tensor data */ for(ptiIndex m=0; m < nmodes; ++m) { fill_binary_idx(tsr->inds[m].data, nnz, &header, fin); } fill_binary_val(tsr->values.data, nnz, &header, fin); return 0; } /** * Load the contents of a sparse tensor fro a text file * @param tsr th sparse tensor to store into * @param start_index the index of the first element in array. Set to 1 for MATLAB compability, else set to 0 * @param fp the file to read from */ int ptiLoadSparseTensor(ptiSparseTensor *tsr, ptiIndex start_index, char const * const fname) { FILE * fp = fopen(fname, "r"); ptiAssert(fp != NULL); int iores; switch(get_file_type(fname)) { case 0: iores = p_tt_read_file(tsr, start_index, fp); pti_CheckOSError(iores != 0, "SpTns Load"); break; case 1: iores = p_tt_read_binary_file(tsr, fp); pti_CheckOSError(iores != 0, "SpTns Load"); break; } fclose(fp); return 0; } void ptiLoadShuffleFile(ptiSparseTensor *tsr, FILE *fs, ptiIndex ** map_inds) { ptiNnzIndex line_count = 0; ptiNnzIndex dim_count = 0; int iores; for(ptiIndex mode = 0; mode < tsr->nmodes; ++mode) { dim_count += tsr->ndims[mode]; for(ptiIndex i = 0; i < tsr->ndims[mode]; ++i) { iores = fscanf(fs, "%u", &(map_inds[mode][i])); -- map_inds[mode][i]; ++ line_count; } } ptiAssert(dim_count == line_count); return; }
dense_pairwise.c
/* Copyright (c) 2016 Drew Schmidt All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ // Functions for computing covariance, (pearson) correlation, and cosine similarity #include <math.h> #include <stdlib.h> #include <string.h> #include "utils/safeomp.h" #include "coop.h" #include "utils/fill.h" #include "utils/inverse.h" #include "utils/special_vals.h" static inline void compute_sums(const int m, const size_t mi, const double * const restrict vec, const double * const restrict x, double *restrict sumx, double *restrict sumy, int *restrict len) { int k; *sumx = 0; *sumy = 0; *len = 0; PLEASE_VECTORIZE for (k=0; k<m; k++) { if (!isnan(vec[k]) && !isnan(x[k + mi])) { *sumx += vec[k]; *sumy += x[k + mi]; (*len)++; } } } int coop_cosine_mat_inplace_pairwise(const bool inv, const int m, const int n, const double * const restrict x, double *restrict cos) { int check; double *vec = malloc(m * sizeof(*vec)); CHECKMALLOC(vec); for (int j=0; j<n; j++) { const size_t mj = (size_t)m*j; memcpy(vec, x+mj, m*sizeof(*vec)); const size_t nj = (size_t)n*j; #pragma omp parallel for shared(j, vec, cos) if(m*n > OMP_MIN_SIZE) for (int i=j; i<n; i++) { const size_t mi = (size_t)m*i; double xx, xy, yy; xx = xy = yy = 0.0; int len = 0; SAFE_SIMD for (int k=0; k<m; k++) { if (!isnan(vec[k]) && !isnan(x[k + mi])) { const double xval = vec[k]; const double yval = x[k + mi]; xx += xval * xval; yy += yval * yval; xy += xval * yval; len++; } } if (len == 0) { set_na_real(cos + (i + nj)); continue; } cos[i + nj] = xy / sqrt(xx * yy); } } free(vec); if (inv) { check = inv_sym_chol(n, cos); CHECKRET(check); } symmetrize(n, cos); return COOP_OK; } int coop_pcor_mat_inplace_pairwise(const bool inv, const int m, const int n, const double * const restrict x, double *restrict cor) { int check; double *vec = malloc(m * sizeof(*vec)); CHECKMALLOC(vec); for (int j=0; j<n; j++) { const size_t mj = (size_t)m*j; memcpy(vec, x+mj, m*sizeof(*vec)); const size_t nj = (size_t)n*j; #pragma omp parallel for shared(j, vec, cor) if(m*n > OMP_MIN_SIZE) for (int i=j; i<n; i++) { const size_t mi = (size_t)m*i; int len; double meanx, meany; compute_sums(m, mi, vec, x, &meanx, &meany, &len); if (len == 0 || len == 1) { set_na_real(cor + (i + nj)); set_na_real(cor + (j + (size_t)n*i)); continue; } const double dlen = (double) len; meanx /= dlen; meany /= dlen; double sdx = 0.; double sdy = 0.; SAFE_SIMD for (int k=0; k<m; k++) { if (!isnan(vec[k]) && !isnan(x[k + mi])) { sdx += (vec[k] - meanx)*(vec[k] - meanx); sdy += (x[k + mi] - meany)*(x[k + mi] - meany); } } sdx = sqrt(sdx/(dlen-1.)); sdy = sqrt(sdy/(dlen-1.)); double mmcp = 0.0; SAFE_SIMD for (int k=0; k<m; k++) { if (!isnan(vec[k]) && !isnan(x[k + mi])) mmcp += (vec[k] - meanx) * (x[k + mi] - meany); } cor[i + nj] = mmcp / sdx / sdy / (dlen - 1.0);; } } free(vec); if (inv) { check = inv_sym_chol(n, cor); CHECKRET(check); } symmetrize(n, cor); return COOP_OK; } int coop_covar_mat_inplace_pairwise(const bool inv, const int m, const int n, const double * const restrict x, double *restrict cov) { int check; double *vec = malloc(m * sizeof(*vec)); CHECKMALLOC(vec); for (int j=0; j<n; j++) { const size_t mj = (size_t)m*j; memcpy(vec, x+mj, m*sizeof(*vec)); const size_t nj = (size_t)n*j; #pragma omp parallel for shared(j, vec, cov) if(m*n > OMP_MIN_SIZE) for (int i=j; i<n; i++) { const size_t mi = (size_t)m*i; int len; double meanx, meany; compute_sums(m, mi, vec, x, &meanx, &meany, &len); if (len == 0) { set_na_real(cov + (i + nj)); set_na_real(cov + (j + (size_t)n*i)); continue; } meanx /= (double) len; meany /= (double) len; double mmcp = 0.0; SAFE_SIMD for (int k=0; k<m; k++) { if (!isnan(vec[k]) && !isnan(x[k + mi])) mmcp += (vec[k] - meanx) * (x[k + mi] - meany); } cov[i + nj] = mmcp * ((double) 1.0/(len-1)); } } free(vec); if (inv) { check = inv_sym_chol(n, cov); CHECKRET(check); } symmetrize(n, cov); return COOP_OK; }
DRB071-targetparallelfor-orig-no.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* use of omp target: len is not mapped. It should be firstprivate within target. */ int main(int argc, char* argv[]) { int i; int len = 1000; int a[len]; for (i=0; i<len; i++) a[i]= i; #pragma omp target map(a[0:len]) #pragma omp parallel for for (i=0;i< len;i++) a[i]=a[i]+1; return 0; }
quantize.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % QQQ U U AAA N N TTTTT IIIII ZZZZZ EEEEE % % Q Q U U A A NN N T I ZZ E % % Q Q U U AAAAA N N N T I ZZZ EEEEE % % Q QQ U U A A N NN T I ZZ E % % QQQQ UUU A A N N T IIIII ZZZZZ EEEEE % % % % % % MagickCore Methods to Reduce the Number of Unique Colors in an Image % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Realism in computer graphics typically requires using 24 bits/pixel to % generate an image. Yet many graphic display devices do not contain the % amount of memory necessary to match the spatial and color resolution of % the human eye. The Quantize methods takes a 24 bit image and reduces % the number of colors so it can be displayed on raster device with less % bits per pixel. In most instances, the quantized image closely % resembles the original reference image. % % A reduction of colors in an image is also desirable for image % transmission and real-time animation. % % QuantizeImage() takes a standard RGB or monochrome images and quantizes % them down to some fixed number of colors. % % For purposes of color allocation, an image is a set of n pixels, where % each pixel is a point in RGB space. RGB space is a 3-dimensional % vector space, and each pixel, Pi, is defined by an ordered triple of % red, green, and blue coordinates, (Ri, Gi, Bi). % % Each primary color component (red, green, or blue) represents an % intensity which varies linearly from 0 to a maximum value, Cmax, which % corresponds to full saturation of that color. Color allocation is % defined over a domain consisting of the cube in RGB space with opposite % vertices at (0,0,0) and (Cmax, Cmax, Cmax). QUANTIZE requires Cmax = % 255. % % The algorithm maps this domain onto a tree in which each node % represents a cube within that domain. In the following discussion % these cubes are defined by the coordinate of two opposite vertices (vertex % nearest the origin in RGB space and the vertex farthest from the origin). % % The tree's root node represents the entire domain, (0,0,0) through % (Cmax,Cmax,Cmax). Each lower level in the tree is generated by % subdividing one node's cube into eight smaller cubes of equal size. % This corresponds to bisecting the parent cube with planes passing % through the midpoints of each edge. % % The basic algorithm operates in three phases: Classification, % Reduction, and Assignment. Classification builds a color description % tree for the image. Reduction collapses the tree until the number it % represents, at most, the number of colors desired in the output image. % Assignment defines the output image's color map and sets each pixel's % color by restorage_class in the reduced tree. Our goal is to minimize % the numerical discrepancies between the original colors and quantized % colors (quantization error). % % Classification begins by initializing a color description tree of % sufficient depth to represent each possible input color in a leaf. % However, it is impractical to generate a fully-formed color description % tree in the storage_class phase for realistic values of Cmax. If % colors components in the input image are quantized to k-bit precision, % so that Cmax= 2k-1, the tree would need k levels below the root node to % allow representing each possible input color in a leaf. This becomes % prohibitive because the tree's total number of nodes is 1 + % sum(i=1, k, 8k). % % A complete tree would require 19,173,961 nodes for k = 8, Cmax = 255. % Therefore, to avoid building a fully populated tree, QUANTIZE: (1) % Initializes data structures for nodes only as they are needed; (2) % Chooses a maximum depth for the tree as a function of the desired % number of colors in the output image (currently log2(colormap size)). % % For each pixel in the input image, storage_class scans downward from % the root of the color description tree. At each level of the tree it % identifies the single node which represents a cube in RGB space % containing the pixel's color. It updates the following data for each % such node: % % n1: Number of pixels whose color is contained in the RGB cube which % this node represents; % % n2: Number of pixels whose color is not represented in a node at % lower depth in the tree; initially, n2 = 0 for all nodes except % leaves of the tree. % % Sr, Sg, Sb: Sums of the red, green, and blue component values for all % pixels not classified at a lower depth. The combination of these sums % and n2 will ultimately characterize the mean color of a set of pixels % represented by this node. % % E: the distance squared in RGB space between each pixel contained % within a node and the nodes' center. This represents the % quantization error for a node. % % Reduction repeatedly prunes the tree until the number of nodes with n2 % > 0 is less than or equal to the maximum number of colors allowed in % the output image. On any given iteration over the tree, it selects % those nodes whose E count is minimal for pruning and merges their color % statistics upward. It uses a pruning threshold, Ep, to govern node % selection as follows: % % Ep = 0 % while number of nodes with (n2 > 0) > required maximum number of colors % prune all nodes such that E <= Ep % Set Ep to minimum E in remaining nodes % % This has the effect of minimizing any quantization error when merging % two nodes together. % % When a node to be pruned has offspring, the pruning procedure invokes % itself recursively in order to prune the tree from the leaves upward. % n2, Sr, Sg, and Sb in a node being pruned are always added to the % corresponding data in that node's parent. This retains the pruned % node's color characteristics for later averaging. % % For each node, n2 pixels exist for which that node represents the % smallest volume in RGB space containing those pixel's colors. When n2 % > 0 the node will uniquely define a color in the output image. At the % beginning of reduction, n2 = 0 for all nodes except a the leaves of % the tree which represent colors present in the input image. % % The other pixel count, n1, indicates the total number of colors within % the cubic volume which the node represents. This includes n1 - n2 % pixels whose colors should be defined by nodes at a lower level in the % tree. % % Assignment generates the output image from the pruned tree. The output % image consists of two parts: (1) A color map, which is an array of % color descriptions (RGB triples) for each color present in the output % image; (2) A pixel array, which represents each pixel as an index % into the color map array. % % First, the assignment phase makes one pass over the pruned color % description tree to establish the image's color map. For each node % with n2 > 0, it divides Sr, Sg, and Sb by n2 . This produces the mean % color of all pixels that classify no lower than this node. Each of % these colors becomes an entry in the color map. % % Finally, the assignment phase reclassifies each pixel in the pruned % tree to identify the deepest node containing the pixel's color. The % pixel's value in the pixel array becomes the index of this node's mean % color in the color map. % % This method is based on a similar algorithm written by Paul Raveling. % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/cache-view.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colormap.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/histogram.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/memory_.h" #include "MagickCore/memory-private.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/pixel-private.h" #include "MagickCore/quantize.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/resource_.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" /* Define declarations. */ #if !defined(__APPLE__) && !defined(TARGET_OS_IPHONE) #define CacheShift 2 #else #define CacheShift 3 #endif #define ErrorQueueLength 16 #define MaxNodes 266817 #define MaxTreeDepth 8 #define NodesInAList 1920 /* Typdef declarations. */ typedef struct _DoublePixelPacket { double red, green, blue, alpha; } DoublePixelPacket; typedef struct _NodeInfo { struct _NodeInfo *parent, *child[16]; MagickSizeType number_unique; DoublePixelPacket total_color; double quantize_error; size_t color_number, id, level; } NodeInfo; typedef struct _Nodes { NodeInfo *nodes; struct _Nodes *next; } Nodes; typedef struct _CubeInfo { NodeInfo *root; size_t colors, maximum_colors; ssize_t transparent_index; MagickSizeType transparent_pixels; DoublePixelPacket target; double distance, pruning_threshold, next_threshold; size_t nodes, free_nodes, color_number; NodeInfo *next_node; Nodes *node_queue; MemoryInfo *memory_info; ssize_t *cache; DoublePixelPacket error[ErrorQueueLength]; double weights[ErrorQueueLength]; QuantizeInfo *quantize_info; MagickBooleanType associate_alpha; ssize_t x, y; size_t depth; MagickOffsetType offset; MagickSizeType span; } CubeInfo; /* Method prototypes. */ static CubeInfo *GetCubeInfo(const QuantizeInfo *,const size_t,const size_t); static NodeInfo *GetNodeInfo(CubeInfo *,const size_t,const size_t,NodeInfo *); static MagickBooleanType AssignImageColors(Image *,CubeInfo *,ExceptionInfo *), ClassifyImageColors(CubeInfo *,const Image *,ExceptionInfo *), DitherImage(Image *,CubeInfo *,ExceptionInfo *), SetGrayscaleImage(Image *,ExceptionInfo *); static size_t DefineImageColormap(Image *,CubeInfo *,NodeInfo *); static void ClosestColor(const Image *,CubeInfo *,const NodeInfo *), DestroyCubeInfo(CubeInfo *), PruneLevel(CubeInfo *,const NodeInfo *), PruneToCubeDepth(CubeInfo *,const NodeInfo *), ReduceImageColors(const Image *,CubeInfo *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e Q u a n t i z e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireQuantizeInfo() allocates the QuantizeInfo structure. % % The format of the AcquireQuantizeInfo method is: % % QuantizeInfo *AcquireQuantizeInfo(const ImageInfo *image_info) % % A description of each parameter follows: % % o image_info: the image info. % */ MagickExport QuantizeInfo *AcquireQuantizeInfo(const ImageInfo *image_info) { QuantizeInfo *quantize_info; quantize_info=(QuantizeInfo *) AcquireCriticalMemory(sizeof(*quantize_info)); GetQuantizeInfo(quantize_info); if (image_info != (ImageInfo *) NULL) { const char *option; quantize_info->dither_method=image_info->dither == MagickFalse ? NoDitherMethod : RiemersmaDitherMethod; option=GetImageOption(image_info,"dither"); if (option != (const char *) NULL) quantize_info->dither_method=(DitherMethod) ParseCommandOption( MagickDitherOptions,MagickFalse,option); quantize_info->measure_error=image_info->verbose; } return(quantize_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + A s s i g n I m a g e C o l o r s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AssignImageColors() generates the output image from the pruned tree. The % output image consists of two parts: (1) A color map, which is an array % of color descriptions (RGB triples) for each color present in the % output image; (2) A pixel array, which represents each pixel as an % index into the color map array. % % First, the assignment phase makes one pass over the pruned color % description tree to establish the image's color map. For each node % with n2 > 0, it divides Sr, Sg, and Sb by n2 . This produces the mean % color of all pixels that classify no lower than this node. Each of % these colors becomes an entry in the color map. % % Finally, the assignment phase reclassifies each pixel in the pruned % tree to identify the deepest node containing the pixel's color. The % pixel's value in the pixel array becomes the index of this node's mean % color in the color map. % % The format of the AssignImageColors() method is: % % MagickBooleanType AssignImageColors(Image *image,CubeInfo *cube_info) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % */ static inline void AssociateAlphaPixel(const Image *image, const CubeInfo *cube_info,const Quantum *pixel,DoublePixelPacket *alpha_pixel) { double alpha; if ((cube_info->associate_alpha == MagickFalse) || (GetPixelAlpha(image,pixel) == OpaqueAlpha)) { alpha_pixel->red=(double) GetPixelRed(image,pixel); alpha_pixel->green=(double) GetPixelGreen(image,pixel); alpha_pixel->blue=(double) GetPixelBlue(image,pixel); alpha_pixel->alpha=(double) GetPixelAlpha(image,pixel); return; } alpha=(double) (QuantumScale*GetPixelAlpha(image,pixel)); alpha_pixel->red=alpha*GetPixelRed(image,pixel); alpha_pixel->green=alpha*GetPixelGreen(image,pixel); alpha_pixel->blue=alpha*GetPixelBlue(image,pixel); alpha_pixel->alpha=(double) GetPixelAlpha(image,pixel); } static inline void AssociateAlphaPixelInfo(const CubeInfo *cube_info, const PixelInfo *pixel,DoublePixelPacket *alpha_pixel) { double alpha; if ((cube_info->associate_alpha == MagickFalse) || (pixel->alpha == OpaqueAlpha)) { alpha_pixel->red=(double) pixel->red; alpha_pixel->green=(double) pixel->green; alpha_pixel->blue=(double) pixel->blue; alpha_pixel->alpha=(double) pixel->alpha; return; } alpha=(double) (QuantumScale*pixel->alpha); alpha_pixel->red=alpha*pixel->red; alpha_pixel->green=alpha*pixel->green; alpha_pixel->blue=alpha*pixel->blue; alpha_pixel->alpha=(double) pixel->alpha; } static inline size_t ColorToNodeId(const CubeInfo *cube_info, const DoublePixelPacket *pixel,size_t index) { size_t id; id=(size_t) (((ScaleQuantumToChar(ClampPixel(pixel->red)) >> index) & 0x01) | ((ScaleQuantumToChar(ClampPixel(pixel->green)) >> index) & 0x01) << 1 | ((ScaleQuantumToChar(ClampPixel(pixel->blue)) >> index) & 0x01) << 2); if (cube_info->associate_alpha != MagickFalse) id|=((ScaleQuantumToChar(ClampPixel(pixel->alpha)) >> index) & 0x1) << 3; return(id); } static MagickBooleanType AssignImageColors(Image *image,CubeInfo *cube_info, ExceptionInfo *exception) { #define AssignImageTag "Assign/Image" ColorspaceType colorspace; ssize_t y; /* Allocate image colormap. */ colorspace=image->colorspace; if (cube_info->quantize_info->colorspace != UndefinedColorspace) (void) TransformImageColorspace(image,cube_info->quantize_info->colorspace, exception); if (AcquireImageColormap(image,cube_info->colors,exception) == MagickFalse) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); image->colors=0; cube_info->transparent_pixels=0; cube_info->transparent_index=(-1); (void) DefineImageColormap(image,cube_info,cube_info->root); /* Create a reduced color image. */ if (cube_info->quantize_info->dither_method != NoDitherMethod) (void) DitherImage(image,cube_info,exception); else { CacheView *image_view; MagickBooleanType status; status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { CubeInfo cube; register Quantum *magick_restrict q; register ssize_t x; ssize_t count; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } cube=(*cube_info); for (x=0; x < (ssize_t) image->columns; x+=count) { DoublePixelPacket pixel; register const NodeInfo *node_info; register ssize_t i; size_t id, index; /* Identify the deepest node containing the pixel's color. */ for (count=1; (x+count) < (ssize_t) image->columns; count++) { PixelInfo packet; GetPixelInfoPixel(image,q+count*GetPixelChannels(image),&packet); if (IsPixelEquivalent(image,q,&packet) == MagickFalse) break; } AssociateAlphaPixel(image,&cube,q,&pixel); node_info=cube.root; for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--) { id=ColorToNodeId(&cube,&pixel,index); if (node_info->child[id] == (NodeInfo *) NULL) break; node_info=node_info->child[id]; } /* Find closest color among siblings and their children. */ cube.target=pixel; cube.distance=(double) (4.0*(QuantumRange+1.0)*(QuantumRange+1.0)+ 1.0); ClosestColor(image,&cube,node_info->parent); index=cube.color_number; for (i=0; i < (ssize_t) count; i++) { if (image->storage_class == PseudoClass) SetPixelIndex(image,(Quantum) index,q); if (cube.quantize_info->measure_error == MagickFalse) { SetPixelRed(image,ClampToQuantum( image->colormap[index].red),q); SetPixelGreen(image,ClampToQuantum( image->colormap[index].green),q); SetPixelBlue(image,ClampToQuantum( image->colormap[index].blue),q); if (cube.associate_alpha != MagickFalse) SetPixelAlpha(image,ClampToQuantum( image->colormap[index].alpha),q); } q+=GetPixelChannels(image); } } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); } if (cube_info->quantize_info->measure_error != MagickFalse) (void) GetImageQuantizeError(image,exception); if ((cube_info->quantize_info->number_colors == 2) && ((cube_info->quantize_info->colorspace == LinearGRAYColorspace) || (cube_info->quantize_info->colorspace == GRAYColorspace))) { double intensity; /* Monochrome image. */ intensity=0.0; if ((image->colors > 1) && (GetPixelInfoLuma(image->colormap+0) > GetPixelInfoLuma(image->colormap+1))) intensity=(double) QuantumRange; image->colormap[0].red=intensity; image->colormap[0].green=intensity; image->colormap[0].blue=intensity; if (image->colors > 1) { image->colormap[1].red=(double) QuantumRange-intensity; image->colormap[1].green=(double) QuantumRange-intensity; image->colormap[1].blue=(double) QuantumRange-intensity; } } (void) SyncImage(image,exception); if ((cube_info->quantize_info->colorspace != UndefinedColorspace) && (IssRGBCompatibleColorspace(colorspace) == MagickFalse)) (void) TransformImageColorspace(image,colorspace,exception); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l a s s i f y I m a g e C o l o r s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClassifyImageColors() begins by initializing a color description tree % of sufficient depth to represent each possible input color in a leaf. % However, it is impractical to generate a fully-formed color % description tree in the storage_class phase for realistic values of % Cmax. If colors components in the input image are quantized to k-bit % precision, so that Cmax= 2k-1, the tree would need k levels below the % root node to allow representing each possible input color in a leaf. % This becomes prohibitive because the tree's total number of nodes is % 1 + sum(i=1,k,8k). % % A complete tree would require 19,173,961 nodes for k = 8, Cmax = 255. % Therefore, to avoid building a fully populated tree, QUANTIZE: (1) % Initializes data structures for nodes only as they are needed; (2) % Chooses a maximum depth for the tree as a function of the desired % number of colors in the output image (currently log2(colormap size)). % % For each pixel in the input image, storage_class scans downward from % the root of the color description tree. At each level of the tree it % identifies the single node which represents a cube in RGB space % containing It updates the following data for each such node: % % n1 : Number of pixels whose color is contained in the RGB cube % which this node represents; % % n2 : Number of pixels whose color is not represented in a node at % lower depth in the tree; initially, n2 = 0 for all nodes except % leaves of the tree. % % Sr, Sg, Sb : Sums of the red, green, and blue component values for % all pixels not classified at a lower depth. The combination of % these sums and n2 will ultimately characterize the mean color of a % set of pixels represented by this node. % % E: the distance squared in RGB space between each pixel contained % within a node and the nodes' center. This represents the quantization % error for a node. % % The format of the ClassifyImageColors() method is: % % MagickBooleanType ClassifyImageColors(CubeInfo *cube_info, % const Image *image,ExceptionInfo *exception) % % A description of each parameter follows. % % o cube_info: A pointer to the Cube structure. % % o image: the image. % */ static inline void SetAssociatedAlpha(const Image *image,CubeInfo *cube_info) { MagickBooleanType associate_alpha; associate_alpha=image->alpha_trait == BlendPixelTrait ? MagickTrue : MagickFalse; if ((cube_info->quantize_info->number_colors == 2) && ((cube_info->quantize_info->colorspace == LinearGRAYColorspace) || (cube_info->quantize_info->colorspace == GRAYColorspace))) associate_alpha=MagickFalse; cube_info->associate_alpha=associate_alpha; } static MagickBooleanType ClassifyImageColors(CubeInfo *cube_info, const Image *image,ExceptionInfo *exception) { #define ClassifyImageTag "Classify/Image" CacheView *image_view; DoublePixelPacket error, mid, midpoint, pixel; MagickBooleanType proceed; double bisect; NodeInfo *node_info; size_t count, id, index, level; ssize_t y; /* Classify the first cube_info->maximum_colors colors to a tree depth of 8. */ SetAssociatedAlpha(image,cube_info); if ((cube_info->quantize_info->colorspace != UndefinedColorspace) && (cube_info->quantize_info->colorspace != CMYKColorspace)) (void) TransformImageColorspace((Image *) image, cube_info->quantize_info->colorspace,exception); else if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) (void) TransformImageColorspace((Image *) image,sRGBColorspace,exception); midpoint.red=(double) QuantumRange/2.0; midpoint.green=(double) QuantumRange/2.0; midpoint.blue=(double) QuantumRange/2.0; midpoint.alpha=(double) QuantumRange/2.0; error.alpha=0.0; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; if (cube_info->nodes > MaxNodes) { /* Prune one level if the color tree is too large. */ PruneLevel(cube_info,cube_info->root); cube_info->depth--; } for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) count) { /* Start at the root and descend the color cube tree. */ for (count=1; (x+(ssize_t) count) < (ssize_t) image->columns; count++) { PixelInfo packet; GetPixelInfoPixel(image,p+count*GetPixelChannels(image),&packet); if (IsPixelEquivalent(image,p,&packet) == MagickFalse) break; } AssociateAlphaPixel(image,cube_info,p,&pixel); index=MaxTreeDepth-1; bisect=((double) QuantumRange+1.0)/2.0; mid=midpoint; node_info=cube_info->root; for (level=1; level <= MaxTreeDepth; level++) { double distance; bisect*=0.5; id=ColorToNodeId(cube_info,&pixel,index); mid.red+=(id & 1) != 0 ? bisect : -bisect; mid.green+=(id & 2) != 0 ? bisect : -bisect; mid.blue+=(id & 4) != 0 ? bisect : -bisect; mid.alpha+=(id & 8) != 0 ? bisect : -bisect; if (node_info->child[id] == (NodeInfo *) NULL) { /* Set colors of new node to contain pixel. */ node_info->child[id]=GetNodeInfo(cube_info,id,level,node_info); if (node_info->child[id] == (NodeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); continue; } if (level == MaxTreeDepth) cube_info->colors++; } /* Approximate the quantization error represented by this node. */ node_info=node_info->child[id]; error.red=QuantumScale*(pixel.red-mid.red); error.green=QuantumScale*(pixel.green-mid.green); error.blue=QuantumScale*(pixel.blue-mid.blue); if (cube_info->associate_alpha != MagickFalse) error.alpha=QuantumScale*(pixel.alpha-mid.alpha); distance=(double) (error.red*error.red+error.green*error.green+ error.blue*error.blue+error.alpha*error.alpha); if (IsNaN(distance)) distance=0.0; node_info->quantize_error+=count*sqrt(distance); cube_info->root->quantize_error+=node_info->quantize_error; index--; } /* Sum RGB for this leaf for later derivation of the mean cube color. */ node_info->number_unique+=count; node_info->total_color.red+=count*QuantumScale*ClampPixel(pixel.red); node_info->total_color.green+=count*QuantumScale*ClampPixel(pixel.green); node_info->total_color.blue+=count*QuantumScale*ClampPixel(pixel.blue); if (cube_info->associate_alpha != MagickFalse) node_info->total_color.alpha+=count*QuantumScale* ClampPixel(pixel.alpha); else node_info->total_color.alpha+=count*QuantumScale* ClampPixel((MagickRealType) OpaqueAlpha); p+=count*GetPixelChannels(image); } if (cube_info->colors > cube_info->maximum_colors) { PruneToCubeDepth(cube_info,cube_info->root); break; } proceed=SetImageProgress(image,ClassifyImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) break; } for (y++; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; if (cube_info->nodes > MaxNodes) { /* Prune one level if the color tree is too large. */ PruneLevel(cube_info,cube_info->root); cube_info->depth--; } for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) count) { /* Start at the root and descend the color cube tree. */ for (count=1; (x+(ssize_t) count) < (ssize_t) image->columns; count++) { PixelInfo packet; GetPixelInfoPixel(image,p+count*GetPixelChannels(image),&packet); if (IsPixelEquivalent(image,p,&packet) == MagickFalse) break; } AssociateAlphaPixel(image,cube_info,p,&pixel); index=MaxTreeDepth-1; bisect=((double) QuantumRange+1.0)/2.0; mid=midpoint; node_info=cube_info->root; for (level=1; level <= cube_info->depth; level++) { double distance; bisect*=0.5; id=ColorToNodeId(cube_info,&pixel,index); mid.red+=(id & 1) != 0 ? bisect : -bisect; mid.green+=(id & 2) != 0 ? bisect : -bisect; mid.blue+=(id & 4) != 0 ? bisect : -bisect; mid.alpha+=(id & 8) != 0 ? bisect : -bisect; if (node_info->child[id] == (NodeInfo *) NULL) { /* Set colors of new node to contain pixel. */ node_info->child[id]=GetNodeInfo(cube_info,id,level,node_info); if (node_info->child[id] == (NodeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","%s", image->filename); continue; } if (level == cube_info->depth) cube_info->colors++; } /* Approximate the quantization error represented by this node. */ node_info=node_info->child[id]; error.red=QuantumScale*(pixel.red-mid.red); error.green=QuantumScale*(pixel.green-mid.green); error.blue=QuantumScale*(pixel.blue-mid.blue); if (cube_info->associate_alpha != MagickFalse) error.alpha=QuantumScale*(pixel.alpha-mid.alpha); distance=(double) (error.red*error.red+error.green*error.green+ error.blue*error.blue+error.alpha*error.alpha); if (IsNaN(distance) != MagickFalse) distance=0.0; node_info->quantize_error+=count*sqrt(distance); cube_info->root->quantize_error+=node_info->quantize_error; index--; } /* Sum RGB for this leaf for later derivation of the mean cube color. */ node_info->number_unique+=count; node_info->total_color.red+=count*QuantumScale*ClampPixel(pixel.red); node_info->total_color.green+=count*QuantumScale*ClampPixel(pixel.green); node_info->total_color.blue+=count*QuantumScale*ClampPixel(pixel.blue); if (cube_info->associate_alpha != MagickFalse) node_info->total_color.alpha+=count*QuantumScale* ClampPixel(pixel.alpha); else node_info->total_color.alpha+=count*QuantumScale* ClampPixel((MagickRealType) OpaqueAlpha); p+=count*GetPixelChannels(image); } proceed=SetImageProgress(image,ClassifyImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) break; } image_view=DestroyCacheView(image_view); if ((cube_info->quantize_info->colorspace != UndefinedColorspace) && (cube_info->quantize_info->colorspace != CMYKColorspace)) (void) TransformImageColorspace((Image *) image,sRGBColorspace,exception); return(y < (ssize_t) image->rows ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e Q u a n t i z e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneQuantizeInfo() makes a duplicate of the given quantize info structure, % or if quantize info is NULL, a new one. % % The format of the CloneQuantizeInfo method is: % % QuantizeInfo *CloneQuantizeInfo(const QuantizeInfo *quantize_info) % % A description of each parameter follows: % % o clone_info: Method CloneQuantizeInfo returns a duplicate of the given % quantize info, or if image info is NULL a new one. % % o quantize_info: a structure of type info. % */ MagickExport QuantizeInfo *CloneQuantizeInfo(const QuantizeInfo *quantize_info) { QuantizeInfo *clone_info; clone_info=(QuantizeInfo *) AcquireCriticalMemory(sizeof(*clone_info)); GetQuantizeInfo(clone_info); if (quantize_info == (QuantizeInfo *) NULL) return(clone_info); clone_info->number_colors=quantize_info->number_colors; clone_info->tree_depth=quantize_info->tree_depth; clone_info->dither_method=quantize_info->dither_method; clone_info->colorspace=quantize_info->colorspace; clone_info->measure_error=quantize_info->measure_error; return(clone_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l o s e s t C o l o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClosestColor() traverses the color cube tree at a particular node and % determines which colormap entry best represents the input color. % % The format of the ClosestColor method is: % % void ClosestColor(const Image *image,CubeInfo *cube_info, % const NodeInfo *node_info) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % % o node_info: the address of a structure of type NodeInfo which points to a % node in the color cube tree that is to be pruned. % */ static void ClosestColor(const Image *image,CubeInfo *cube_info, const NodeInfo *node_info) { register ssize_t i; size_t number_children; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) ClosestColor(image,cube_info,node_info->child[i]); if (node_info->number_unique != 0) { double pixel; register double alpha, beta, distance; register DoublePixelPacket *magick_restrict q; register PixelInfo *magick_restrict p; /* Determine if this color is "closest". */ p=image->colormap+node_info->color_number; q=(&cube_info->target); alpha=1.0; beta=1.0; if (cube_info->associate_alpha != MagickFalse) { alpha=(double) (QuantumScale*p->alpha); beta=(double) (QuantumScale*q->alpha); } pixel=alpha*p->red-beta*q->red; distance=pixel*pixel; if (distance <= cube_info->distance) { pixel=alpha*p->green-beta*q->green; distance+=pixel*pixel; if (distance <= cube_info->distance) { pixel=alpha*p->blue-beta*q->blue; distance+=pixel*pixel; if (distance <= cube_info->distance) { if (cube_info->associate_alpha != MagickFalse) { pixel=p->alpha-q->alpha; distance+=pixel*pixel; } if (distance <= cube_info->distance) { cube_info->distance=distance; cube_info->color_number=node_info->color_number; } } } } } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o m p r e s s I m a g e C o l o r m a p % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CompressImageColormap() compresses an image colormap by removing any % duplicate or unused color entries. % % The format of the CompressImageColormap method is: % % MagickBooleanType CompressImageColormap(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType CompressImageColormap(Image *image, ExceptionInfo *exception) { QuantizeInfo quantize_info; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (IsPaletteImage(image) == MagickFalse) return(MagickFalse); GetQuantizeInfo(&quantize_info); quantize_info.number_colors=image->colors; quantize_info.tree_depth=MaxTreeDepth; return(QuantizeImage(&quantize_info,image,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e f i n e I m a g e C o l o r m a p % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DefineImageColormap() traverses the color cube tree and notes each colormap % entry. A colormap entry is any node in the color cube tree where the % of unique colors is not zero. DefineImageColormap() returns the number of % colors in the image colormap. % % The format of the DefineImageColormap method is: % % size_t DefineImageColormap(Image *image,CubeInfo *cube_info, % NodeInfo *node_info) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % % o node_info: the address of a structure of type NodeInfo which points to a % node in the color cube tree that is to be pruned. % */ static size_t DefineImageColormap(Image *image,CubeInfo *cube_info, NodeInfo *node_info) { register ssize_t i; size_t number_children; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) (void) DefineImageColormap(image,cube_info,node_info->child[i]); if (node_info->number_unique != 0) { register double alpha; register PixelInfo *magick_restrict q; /* Colormap entry is defined by the mean color in this cube. */ q=image->colormap+image->colors; alpha=(double) ((MagickOffsetType) node_info->number_unique); alpha=PerceptibleReciprocal(alpha); if (cube_info->associate_alpha == MagickFalse) { q->red=(double) ClampToQuantum(alpha*QuantumRange* node_info->total_color.red); q->green=(double) ClampToQuantum(alpha*QuantumRange* node_info->total_color.green); q->blue=(double) ClampToQuantum(alpha*QuantumRange* node_info->total_color.blue); q->alpha=(double) OpaqueAlpha; } else { double opacity; opacity=(double) (alpha*QuantumRange*node_info->total_color.alpha); q->alpha=(double) ClampToQuantum(opacity); if (q->alpha == OpaqueAlpha) { q->red=(double) ClampToQuantum(alpha*QuantumRange* node_info->total_color.red); q->green=(double) ClampToQuantum(alpha*QuantumRange* node_info->total_color.green); q->blue=(double) ClampToQuantum(alpha*QuantumRange* node_info->total_color.blue); } else { double gamma; gamma=(double) (QuantumScale*q->alpha); gamma=PerceptibleReciprocal(gamma); q->red=(double) ClampToQuantum(alpha*gamma*QuantumRange* node_info->total_color.red); q->green=(double) ClampToQuantum(alpha*gamma*QuantumRange* node_info->total_color.green); q->blue=(double) ClampToQuantum(alpha*gamma*QuantumRange* node_info->total_color.blue); if (node_info->number_unique > cube_info->transparent_pixels) { cube_info->transparent_pixels=node_info->number_unique; cube_info->transparent_index=(ssize_t) image->colors; } } } node_info->color_number=image->colors++; } return(image->colors); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y C u b e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyCubeInfo() deallocates memory associated with an image. % % The format of the DestroyCubeInfo method is: % % DestroyCubeInfo(CubeInfo *cube_info) % % A description of each parameter follows: % % o cube_info: the address of a structure of type CubeInfo. % */ static void DestroyCubeInfo(CubeInfo *cube_info) { register Nodes *nodes; /* Release color cube tree storage. */ do { nodes=cube_info->node_queue->next; cube_info->node_queue->nodes=(NodeInfo *) RelinquishMagickMemory( cube_info->node_queue->nodes); cube_info->node_queue=(Nodes *) RelinquishMagickMemory( cube_info->node_queue); cube_info->node_queue=nodes; } while (cube_info->node_queue != (Nodes *) NULL); if (cube_info->memory_info != (MemoryInfo *) NULL) cube_info->memory_info=RelinquishVirtualMemory(cube_info->memory_info); cube_info->quantize_info=DestroyQuantizeInfo(cube_info->quantize_info); cube_info=(CubeInfo *) RelinquishMagickMemory(cube_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y Q u a n t i z e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyQuantizeInfo() deallocates memory associated with an QuantizeInfo % structure. % % The format of the DestroyQuantizeInfo method is: % % QuantizeInfo *DestroyQuantizeInfo(QuantizeInfo *quantize_info) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % */ MagickExport QuantizeInfo *DestroyQuantizeInfo(QuantizeInfo *quantize_info) { (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(quantize_info != (QuantizeInfo *) NULL); assert(quantize_info->signature == MagickCoreSignature); quantize_info->signature=(~MagickCoreSignature); quantize_info=(QuantizeInfo *) RelinquishMagickMemory(quantize_info); return(quantize_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D i t h e r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DitherImage() distributes the difference between an original image and % the corresponding color reduced algorithm to neighboring pixels using % serpentine-scan Floyd-Steinberg error diffusion. DitherImage returns % MagickTrue if the image is dithered otherwise MagickFalse. % % The format of the DitherImage method is: % % MagickBooleanType DitherImage(Image *image,CubeInfo *cube_info, % ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % % o exception: return any errors or warnings in this structure. % */ static DoublePixelPacket **DestroyPixelThreadSet(DoublePixelPacket **pixels) { register ssize_t i; assert(pixels != (DoublePixelPacket **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (pixels[i] != (DoublePixelPacket *) NULL) pixels[i]=(DoublePixelPacket *) RelinquishMagickMemory(pixels[i]); pixels=(DoublePixelPacket **) RelinquishMagickMemory(pixels); return(pixels); } static DoublePixelPacket **AcquirePixelThreadSet(const size_t count) { DoublePixelPacket **pixels; register ssize_t i; size_t number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); pixels=(DoublePixelPacket **) AcquireQuantumMemory(number_threads, sizeof(*pixels)); if (pixels == (DoublePixelPacket **) NULL) return((DoublePixelPacket **) NULL); (void) memset(pixels,0,number_threads*sizeof(*pixels)); for (i=0; i < (ssize_t) number_threads; i++) { pixels[i]=(DoublePixelPacket *) AcquireQuantumMemory(count,2* sizeof(**pixels)); if (pixels[i] == (DoublePixelPacket *) NULL) return(DestroyPixelThreadSet(pixels)); } return(pixels); } static inline ssize_t CacheOffset(CubeInfo *cube_info, const DoublePixelPacket *pixel) { #define RedShift(pixel) (((pixel) >> CacheShift) << (0*(8-CacheShift))) #define GreenShift(pixel) (((pixel) >> CacheShift) << (1*(8-CacheShift))) #define BlueShift(pixel) (((pixel) >> CacheShift) << (2*(8-CacheShift))) #define AlphaShift(pixel) (((pixel) >> CacheShift) << (3*(8-CacheShift))) ssize_t offset; offset=(ssize_t) (RedShift(ScaleQuantumToChar(ClampPixel(pixel->red))) | GreenShift(ScaleQuantumToChar(ClampPixel(pixel->green))) | BlueShift(ScaleQuantumToChar(ClampPixel(pixel->blue)))); if (cube_info->associate_alpha != MagickFalse) offset|=AlphaShift(ScaleQuantumToChar(ClampPixel(pixel->alpha))); return(offset); } static MagickBooleanType FloydSteinbergDither(Image *image,CubeInfo *cube_info, ExceptionInfo *exception) { #define DitherImageTag "Dither/Image" CacheView *image_view; const char *artifact; double amount; DoublePixelPacket **pixels; MagickBooleanType status; ssize_t y; /* Distribute quantization error using Floyd-Steinberg. */ pixels=AcquirePixelThreadSet(image->columns); if (pixels == (DoublePixelPacket **) NULL) return(MagickFalse); status=MagickTrue; amount=1.0; artifact=GetImageArtifact(image,"dither:diffusion-amount"); if (artifact != (const char *) NULL) amount=StringToDoubleInterval(artifact,1.0); image_view=AcquireAuthenticCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); CubeInfo cube; DoublePixelPacket *current, *previous; register Quantum *magick_restrict q; register ssize_t x; size_t index; ssize_t v; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } cube=(*cube_info); current=pixels[id]+(y & 0x01)*image->columns; previous=pixels[id]+((y+1) & 0x01)*image->columns; v=(ssize_t) ((y & 0x01) != 0 ? -1 : 1); for (x=0; x < (ssize_t) image->columns; x++) { DoublePixelPacket color, pixel; register ssize_t i; ssize_t u; u=(y & 0x01) != 0 ? (ssize_t) image->columns-1-x : x; AssociateAlphaPixel(image,&cube,q+u*GetPixelChannels(image),&pixel); if (x > 0) { pixel.red+=7.0*amount*current[u-v].red/16; pixel.green+=7.0*amount*current[u-v].green/16; pixel.blue+=7.0*amount*current[u-v].blue/16; if (cube.associate_alpha != MagickFalse) pixel.alpha+=7.0*amount*current[u-v].alpha/16; } if (y > 0) { if (x < (ssize_t) (image->columns-1)) { pixel.red+=previous[u+v].red/16; pixel.green+=previous[u+v].green/16; pixel.blue+=previous[u+v].blue/16; if (cube.associate_alpha != MagickFalse) pixel.alpha+=previous[u+v].alpha/16; } pixel.red+=5.0*amount*previous[u].red/16; pixel.green+=5.0*amount*previous[u].green/16; pixel.blue+=5.0*amount*previous[u].blue/16; if (cube.associate_alpha != MagickFalse) pixel.alpha+=5.0*amount*previous[u].alpha/16; if (x > 0) { pixel.red+=3.0*amount*previous[u-v].red/16; pixel.green+=3.0*amount*previous[u-v].green/16; pixel.blue+=3.0*amount*previous[u-v].blue/16; if (cube.associate_alpha != MagickFalse) pixel.alpha+=3.0*amount*previous[u-v].alpha/16; } } pixel.red=(double) ClampPixel(pixel.red); pixel.green=(double) ClampPixel(pixel.green); pixel.blue=(double) ClampPixel(pixel.blue); if (cube.associate_alpha != MagickFalse) pixel.alpha=(double) ClampPixel(pixel.alpha); i=CacheOffset(&cube,&pixel); if (cube.cache[i] < 0) { register NodeInfo *node_info; register size_t node_id; /* Identify the deepest node containing the pixel's color. */ node_info=cube.root; for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--) { node_id=ColorToNodeId(&cube,&pixel,index); if (node_info->child[node_id] == (NodeInfo *) NULL) break; node_info=node_info->child[node_id]; } /* Find closest color among siblings and their children. */ cube.target=pixel; cube.distance=(double) (4.0*(QuantumRange+1.0)*(QuantumRange+1.0)+ 1.0); ClosestColor(image,&cube,node_info->parent); cube.cache[i]=(ssize_t) cube.color_number; } /* Assign pixel to closest colormap entry. */ index=(size_t) cube.cache[i]; if (image->storage_class == PseudoClass) SetPixelIndex(image,(Quantum) index,q+u*GetPixelChannels(image)); if (cube.quantize_info->measure_error == MagickFalse) { SetPixelRed(image,ClampToQuantum(image->colormap[index].red), q+u*GetPixelChannels(image)); SetPixelGreen(image,ClampToQuantum(image->colormap[index].green), q+u*GetPixelChannels(image)); SetPixelBlue(image,ClampToQuantum(image->colormap[index].blue), q+u*GetPixelChannels(image)); if (cube.associate_alpha != MagickFalse) SetPixelAlpha(image,ClampToQuantum(image->colormap[index].alpha), q+u*GetPixelChannels(image)); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; /* Store the error. */ AssociateAlphaPixelInfo(&cube,image->colormap+index,&color); current[u].red=pixel.red-color.red; current[u].green=pixel.green-color.green; current[u].blue=pixel.blue-color.blue; if (cube.associate_alpha != MagickFalse) current[u].alpha=pixel.alpha-color.alpha; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,DitherImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } } image_view=DestroyCacheView(image_view); pixels=DestroyPixelThreadSet(pixels); return(MagickTrue); } static MagickBooleanType RiemersmaDither(Image *,CacheView *,CubeInfo *,const unsigned int, ExceptionInfo *); static void Riemersma(Image *image,CacheView *image_view,CubeInfo *cube_info, const size_t level,const unsigned int direction,ExceptionInfo *exception) { if (level == 1) switch (direction) { case WestGravity: { (void) RiemersmaDither(image,image_view,cube_info,EastGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,SouthGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,WestGravity, exception); break; } case EastGravity: { (void) RiemersmaDither(image,image_view,cube_info,WestGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,NorthGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,EastGravity, exception); break; } case NorthGravity: { (void) RiemersmaDither(image,image_view,cube_info,SouthGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,EastGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,NorthGravity, exception); break; } case SouthGravity: { (void) RiemersmaDither(image,image_view,cube_info,NorthGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,WestGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,SouthGravity, exception); break; } default: break; } else switch (direction) { case WestGravity: { Riemersma(image,image_view,cube_info,level-1,NorthGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,EastGravity, exception); Riemersma(image,image_view,cube_info,level-1,WestGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,SouthGravity, exception); Riemersma(image,image_view,cube_info,level-1,WestGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,WestGravity, exception); Riemersma(image,image_view,cube_info,level-1,SouthGravity, exception); break; } case EastGravity: { Riemersma(image,image_view,cube_info,level-1,SouthGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,WestGravity, exception); Riemersma(image,image_view,cube_info,level-1,EastGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,NorthGravity, exception); Riemersma(image,image_view,cube_info,level-1,EastGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,EastGravity, exception); Riemersma(image,image_view,cube_info,level-1,NorthGravity, exception); break; } case NorthGravity: { Riemersma(image,image_view,cube_info,level-1,WestGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,SouthGravity, exception); Riemersma(image,image_view,cube_info,level-1,NorthGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,EastGravity, exception); Riemersma(image,image_view,cube_info,level-1,NorthGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,NorthGravity, exception); Riemersma(image,image_view,cube_info,level-1,EastGravity, exception); break; } case SouthGravity: { Riemersma(image,image_view,cube_info,level-1,EastGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,NorthGravity, exception); Riemersma(image,image_view,cube_info,level-1,SouthGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,WestGravity, exception); Riemersma(image,image_view,cube_info,level-1,SouthGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,SouthGravity, exception); Riemersma(image,image_view,cube_info,level-1,WestGravity, exception); break; } default: break; } } static MagickBooleanType RiemersmaDither(Image *image,CacheView *image_view, CubeInfo *cube_info,const unsigned int direction,ExceptionInfo *exception) { #define DitherImageTag "Dither/Image" DoublePixelPacket color, pixel; MagickBooleanType proceed; register CubeInfo *p; size_t index; p=cube_info; if ((p->x >= 0) && (p->x < (ssize_t) image->columns) && (p->y >= 0) && (p->y < (ssize_t) image->rows)) { register Quantum *magick_restrict q; register ssize_t i; /* Distribute error. */ q=GetCacheViewAuthenticPixels(image_view,p->x,p->y,1,1,exception); if (q == (Quantum *) NULL) return(MagickFalse); AssociateAlphaPixel(image,cube_info,q,&pixel); for (i=0; i < ErrorQueueLength; i++) { pixel.red+=p->weights[i]*p->error[i].red; pixel.green+=p->weights[i]*p->error[i].green; pixel.blue+=p->weights[i]*p->error[i].blue; if (cube_info->associate_alpha != MagickFalse) pixel.alpha+=p->weights[i]*p->error[i].alpha; } pixel.red=(double) ClampPixel(pixel.red); pixel.green=(double) ClampPixel(pixel.green); pixel.blue=(double) ClampPixel(pixel.blue); if (cube_info->associate_alpha != MagickFalse) pixel.alpha=(double) ClampPixel(pixel.alpha); i=CacheOffset(cube_info,&pixel); if (p->cache[i] < 0) { register NodeInfo *node_info; register size_t id; /* Identify the deepest node containing the pixel's color. */ node_info=p->root; for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--) { id=ColorToNodeId(cube_info,&pixel,index); if (node_info->child[id] == (NodeInfo *) NULL) break; node_info=node_info->child[id]; } /* Find closest color among siblings and their children. */ p->target=pixel; p->distance=(double) (4.0*(QuantumRange+1.0)*((double) QuantumRange+1.0)+1.0); ClosestColor(image,p,node_info->parent); p->cache[i]=(ssize_t) p->color_number; } /* Assign pixel to closest colormap entry. */ index=(size_t) p->cache[i]; if (image->storage_class == PseudoClass) SetPixelIndex(image,(Quantum) index,q); if (cube_info->quantize_info->measure_error == MagickFalse) { SetPixelRed(image,ClampToQuantum(image->colormap[index].red),q); SetPixelGreen(image,ClampToQuantum(image->colormap[index].green),q); SetPixelBlue(image,ClampToQuantum(image->colormap[index].blue),q); if (cube_info->associate_alpha != MagickFalse) SetPixelAlpha(image,ClampToQuantum(image->colormap[index].alpha),q); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) return(MagickFalse); /* Propagate the error as the last entry of the error queue. */ (void) memmove(p->error,p->error+1,(ErrorQueueLength-1)* sizeof(p->error[0])); AssociateAlphaPixelInfo(cube_info,image->colormap+index,&color); p->error[ErrorQueueLength-1].red=pixel.red-color.red; p->error[ErrorQueueLength-1].green=pixel.green-color.green; p->error[ErrorQueueLength-1].blue=pixel.blue-color.blue; if (cube_info->associate_alpha != MagickFalse) p->error[ErrorQueueLength-1].alpha=pixel.alpha-color.alpha; proceed=SetImageProgress(image,DitherImageTag,p->offset,p->span); if (proceed == MagickFalse) return(MagickFalse); p->offset++; } switch (direction) { case WestGravity: p->x--; break; case EastGravity: p->x++; break; case NorthGravity: p->y--; break; case SouthGravity: p->y++; break; } return(MagickTrue); } static MagickBooleanType DitherImage(Image *image,CubeInfo *cube_info, ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; register ssize_t i; size_t depth; if (cube_info->quantize_info->dither_method != RiemersmaDitherMethod) return(FloydSteinbergDither(image,cube_info,exception)); /* Distribute quantization error along a Hilbert curve. */ (void) memset(cube_info->error,0,ErrorQueueLength*sizeof(*cube_info->error)); cube_info->x=0; cube_info->y=0; i=MagickMax((ssize_t) image->columns,(ssize_t) image->rows); for (depth=1; i != 0; depth++) i>>=1; if ((ssize_t) (1L << depth) < MagickMax((ssize_t) image->columns,(ssize_t) image->rows)) depth++; cube_info->offset=0; cube_info->span=(MagickSizeType) image->columns*image->rows; image_view=AcquireAuthenticCacheView(image,exception); if (depth > 1) Riemersma(image,image_view,cube_info,depth-1,NorthGravity,exception); status=RiemersmaDither(image,image_view,cube_info,ForgetGravity,exception); image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t C u b e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetCubeInfo() initialize the Cube data structure. % % The format of the GetCubeInfo method is: % % CubeInfo GetCubeInfo(const QuantizeInfo *quantize_info, % const size_t depth,const size_t maximum_colors) % % A description of each parameter follows. % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % % o depth: Normally, this integer value is zero or one. A zero or % one tells Quantize to choose a optimal tree depth of Log4(number_colors). % A tree of this depth generally allows the best representation of the % reference image with the least amount of memory and the fastest % computational speed. In some cases, such as an image with low color % dispersion (a few number of colors), a value other than % Log4(number_colors) is required. To expand the color tree completely, % use a value of 8. % % o maximum_colors: maximum colors. % */ static CubeInfo *GetCubeInfo(const QuantizeInfo *quantize_info, const size_t depth,const size_t maximum_colors) { CubeInfo *cube_info; double sum, weight; register ssize_t i; size_t length; /* Initialize tree to describe color cube_info. */ cube_info=(CubeInfo *) AcquireMagickMemory(sizeof(*cube_info)); if (cube_info == (CubeInfo *) NULL) return((CubeInfo *) NULL); (void) memset(cube_info,0,sizeof(*cube_info)); cube_info->depth=depth; if (cube_info->depth > MaxTreeDepth) cube_info->depth=MaxTreeDepth; if (cube_info->depth < 2) cube_info->depth=2; cube_info->maximum_colors=maximum_colors; /* Initialize root node. */ cube_info->root=GetNodeInfo(cube_info,0,0,(NodeInfo *) NULL); if (cube_info->root == (NodeInfo *) NULL) return((CubeInfo *) NULL); cube_info->root->parent=cube_info->root; cube_info->quantize_info=CloneQuantizeInfo(quantize_info); if (cube_info->quantize_info->dither_method == NoDitherMethod) return(cube_info); /* Initialize dither resources. */ length=(size_t) (1UL << (4*(8-CacheShift))); cube_info->memory_info=AcquireVirtualMemory(length,sizeof(*cube_info->cache)); if (cube_info->memory_info == (MemoryInfo *) NULL) return((CubeInfo *) NULL); cube_info->cache=(ssize_t *) GetVirtualMemoryBlob(cube_info->memory_info); /* Initialize color cache. */ (void) memset(cube_info->cache,(-1),sizeof(*cube_info->cache)*length); /* Distribute weights along a curve of exponential decay. */ weight=1.0; for (i=0; i < ErrorQueueLength; i++) { cube_info->weights[ErrorQueueLength-i-1]=PerceptibleReciprocal(weight); weight*=exp(log(((double) QuantumRange+1.0))/(ErrorQueueLength-1.0)); } /* Normalize the weighting factors. */ weight=0.0; for (i=0; i < ErrorQueueLength; i++) weight+=cube_info->weights[i]; sum=0.0; for (i=0; i < ErrorQueueLength; i++) { cube_info->weights[i]/=weight; sum+=cube_info->weights[i]; } cube_info->weights[0]+=1.0-sum; return(cube_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t N o d e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetNodeInfo() allocates memory for a new node in the color cube tree and % presets all fields to zero. % % The format of the GetNodeInfo method is: % % NodeInfo *GetNodeInfo(CubeInfo *cube_info,const size_t id, % const size_t level,NodeInfo *parent) % % A description of each parameter follows. % % o node: The GetNodeInfo method returns a pointer to a queue of nodes. % % o id: Specifies the child number of the node. % % o level: Specifies the level in the storage_class the node resides. % */ static NodeInfo *GetNodeInfo(CubeInfo *cube_info,const size_t id, const size_t level,NodeInfo *parent) { NodeInfo *node_info; if (cube_info->free_nodes == 0) { Nodes *nodes; /* Allocate a new queue of nodes. */ nodes=(Nodes *) AcquireMagickMemory(sizeof(*nodes)); if (nodes == (Nodes *) NULL) return((NodeInfo *) NULL); nodes->nodes=(NodeInfo *) AcquireQuantumMemory(NodesInAList, sizeof(*nodes->nodes)); if (nodes->nodes == (NodeInfo *) NULL) return((NodeInfo *) NULL); nodes->next=cube_info->node_queue; cube_info->node_queue=nodes; cube_info->next_node=nodes->nodes; cube_info->free_nodes=NodesInAList; } cube_info->nodes++; cube_info->free_nodes--; node_info=cube_info->next_node++; (void) memset(node_info,0,sizeof(*node_info)); node_info->parent=parent; node_info->id=id; node_info->level=level; return(node_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e Q u a n t i z e E r r o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageQuantizeError() measures the difference between the original % and quantized images. This difference is the total quantization error. % The error is computed by summing over all pixels in an image the distance % squared in RGB space between each reference pixel value and its quantized % value. These values are computed: % % o mean_error_per_pixel: This value is the mean error for any single % pixel in the image. % % o normalized_mean_square_error: This value is the normalized mean % quantization error for any single pixel in the image. This distance % measure is normalized to a range between 0 and 1. It is independent % of the range of red, green, and blue values in the image. % % o normalized_maximum_square_error: Thsi value is the normalized % maximum quantization error for any single pixel in the image. This % distance measure is normalized to a range between 0 and 1. It is % independent of the range of red, green, and blue values in your image. % % The format of the GetImageQuantizeError method is: % % MagickBooleanType GetImageQuantizeError(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetImageQuantizeError(Image *image, ExceptionInfo *exception) { CacheView *image_view; double alpha, area, beta, distance, maximum_error, mean_error, mean_error_per_pixel; ssize_t index, y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); image->total_colors=GetNumberColors(image,(FILE *) NULL,exception); (void) memset(&image->error,0,sizeof(image->error)); if (image->storage_class == DirectClass) return(MagickTrue); alpha=1.0; beta=1.0; area=3.0*image->columns*image->rows; maximum_error=0.0; mean_error_per_pixel=0.0; mean_error=0.0; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { index=(ssize_t) GetPixelIndex(image,p); if (image->alpha_trait == BlendPixelTrait) { alpha=(double) (QuantumScale*GetPixelAlpha(image,p)); beta=(double) (QuantumScale*image->colormap[index].alpha); } distance=fabs((double) (alpha*GetPixelRed(image,p)-beta* image->colormap[index].red)); mean_error_per_pixel+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; distance=fabs((double) (alpha*GetPixelGreen(image,p)-beta* image->colormap[index].green)); mean_error_per_pixel+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; distance=fabs((double) (alpha*GetPixelBlue(image,p)-beta* image->colormap[index].blue)); mean_error_per_pixel+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; p+=GetPixelChannels(image); } } image_view=DestroyCacheView(image_view); image->error.mean_error_per_pixel=(double) mean_error_per_pixel/area; image->error.normalized_mean_error=(double) QuantumScale*QuantumScale* mean_error/area; image->error.normalized_maximum_error=(double) QuantumScale*maximum_error; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t Q u a n t i z e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetQuantizeInfo() initializes the QuantizeInfo structure. % % The format of the GetQuantizeInfo method is: % % GetQuantizeInfo(QuantizeInfo *quantize_info) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to a QuantizeInfo structure. % */ MagickExport void GetQuantizeInfo(QuantizeInfo *quantize_info) { (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(quantize_info != (QuantizeInfo *) NULL); (void) memset(quantize_info,0,sizeof(*quantize_info)); quantize_info->number_colors=256; quantize_info->dither_method=RiemersmaDitherMethod; quantize_info->colorspace=UndefinedColorspace; quantize_info->measure_error=MagickFalse; quantize_info->signature=MagickCoreSignature; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P o s t e r i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PosterizeImage() reduces the image to a limited number of colors for a % "poster" effect. % % The format of the PosterizeImage method is: % % MagickBooleanType PosterizeImage(Image *image,const size_t levels, % const DitherMethod dither_method,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: Specifies a pointer to an Image structure. % % o levels: Number of color levels allowed in each channel. Very low values % (2, 3, or 4) have the most visible effect. % % o dither_method: choose from UndefinedDitherMethod, NoDitherMethod, % RiemersmaDitherMethod, FloydSteinbergDitherMethod. % % o exception: return any errors or warnings in this structure. % */ static inline double MagickRound(double x) { /* Round the fraction to nearest integer. */ if ((x-floor(x)) < (ceil(x)-x)) return(floor(x)); return(ceil(x)); } MagickExport MagickBooleanType PosterizeImage(Image *image,const size_t levels, const DitherMethod dither_method,ExceptionInfo *exception) { #define PosterizeImageTag "Posterize/Image" #define PosterizePixel(pixel) ClampToQuantum((MagickRealType) QuantumRange*( \ MagickRound(QuantumScale*pixel*(levels-1)))/MagickMax((ssize_t) levels-1,1)) CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; QuantizeInfo *quantize_info; register ssize_t i; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (image->storage_class == PseudoClass) #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->colors,1) #endif for (i=0; i < (ssize_t) image->colors; i++) { /* Posterize colormap. */ if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].red=(double) PosterizePixel(image->colormap[i].red); if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].green=(double) PosterizePixel(image->colormap[i].green); if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].blue=(double) PosterizePixel(image->colormap[i].blue); if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].alpha=(double) PosterizePixel(image->colormap[i].alpha); } /* Posterize image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) SetPixelRed(image,PosterizePixel(GetPixelRed(image,q)),q); if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) SetPixelGreen(image,PosterizePixel(GetPixelGreen(image,q)),q); if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) SetPixelBlue(image,PosterizePixel(GetPixelBlue(image,q)),q); if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) SetPixelBlack(image,PosterizePixel(GetPixelBlack(image,q)),q); if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait == BlendPixelTrait)) SetPixelAlpha(image,PosterizePixel(GetPixelAlpha(image,q)),q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,PosterizeImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); quantize_info=AcquireQuantizeInfo((ImageInfo *) NULL); quantize_info->number_colors=(size_t) MagickMin((ssize_t) levels*levels* levels,MaxColormapSize+1); quantize_info->dither_method=dither_method; quantize_info->tree_depth=MaxTreeDepth; status=QuantizeImage(quantize_info,image,exception); quantize_info=DestroyQuantizeInfo(quantize_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + P r u n e C h i l d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PruneChild() deletes the given node and merges its statistics into its % parent. % % The format of the PruneSubtree method is: % % PruneChild(CubeInfo *cube_info,const NodeInfo *node_info) % % A description of each parameter follows. % % o cube_info: A pointer to the Cube structure. % % o node_info: pointer to node in color cube tree that is to be pruned. % */ static void PruneChild(CubeInfo *cube_info,const NodeInfo *node_info) { NodeInfo *parent; register ssize_t i; size_t number_children; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) PruneChild(cube_info,node_info->child[i]); /* Merge color statistics into parent. */ parent=node_info->parent; parent->number_unique+=node_info->number_unique; parent->total_color.red+=node_info->total_color.red; parent->total_color.green+=node_info->total_color.green; parent->total_color.blue+=node_info->total_color.blue; parent->total_color.alpha+=node_info->total_color.alpha; parent->child[node_info->id]=(NodeInfo *) NULL; cube_info->nodes--; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + P r u n e L e v e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PruneLevel() deletes all nodes at the bottom level of the color tree merging % their color statistics into their parent node. % % The format of the PruneLevel method is: % % PruneLevel(CubeInfo *cube_info,const NodeInfo *node_info) % % A description of each parameter follows. % % o cube_info: A pointer to the Cube structure. % % o node_info: pointer to node in color cube tree that is to be pruned. % */ static void PruneLevel(CubeInfo *cube_info,const NodeInfo *node_info) { register ssize_t i; size_t number_children; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) PruneLevel(cube_info,node_info->child[i]); if (node_info->level == cube_info->depth) PruneChild(cube_info,node_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + P r u n e T o C u b e D e p t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PruneToCubeDepth() deletes any nodes at a depth greater than % cube_info->depth while merging their color statistics into their parent % node. % % The format of the PruneToCubeDepth method is: % % PruneToCubeDepth(CubeInfo *cube_info,const NodeInfo *node_info) % % A description of each parameter follows. % % o cube_info: A pointer to the Cube structure. % % o node_info: pointer to node in color cube tree that is to be pruned. % */ static void PruneToCubeDepth(CubeInfo *cube_info,const NodeInfo *node_info) { register ssize_t i; size_t number_children; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) PruneToCubeDepth(cube_info,node_info->child[i]); if (node_info->level > cube_info->depth) PruneChild(cube_info,node_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % Q u a n t i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QuantizeImage() analyzes the colors within a reference image and chooses a % fixed number of colors to represent the image. The goal of the algorithm % is to minimize the color difference between the input and output image while % minimizing the processing time. % % The format of the QuantizeImage method is: % % MagickBooleanType QuantizeImage(const QuantizeInfo *quantize_info, % Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType QuantizeImage(const QuantizeInfo *quantize_info, Image *image,ExceptionInfo *exception) { CubeInfo *cube_info; MagickBooleanType status; size_t depth, maximum_colors; assert(quantize_info != (const QuantizeInfo *) NULL); assert(quantize_info->signature == MagickCoreSignature); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); maximum_colors=quantize_info->number_colors; if (maximum_colors == 0) maximum_colors=MaxColormapSize; if (maximum_colors > MaxColormapSize) maximum_colors=MaxColormapSize; if (image->alpha_trait != BlendPixelTrait) { if (SetImageGray(image,exception) != MagickFalse) (void) SetGrayscaleImage(image,exception); } if ((quantize_info->dither_method == NoDitherMethod) && (image->storage_class == PseudoClass) && (image->colors <= maximum_colors)) { if ((quantize_info->colorspace != UndefinedColorspace) && (quantize_info->colorspace != CMYKColorspace)) (void) TransformImageColorspace(image,quantize_info->colorspace, exception); return(MagickTrue); } depth=quantize_info->tree_depth; if (depth == 0) { size_t colors; /* Depth of color tree is: Log4(colormap size)+2. */ colors=maximum_colors; for (depth=1; colors != 0; depth++) colors>>=2; if ((quantize_info->dither_method != NoDitherMethod) && (depth > 2)) depth--; if ((image->alpha_trait == BlendPixelTrait) && (depth > 5)) depth--; if (SetImageGray(image,exception) != MagickFalse) depth=MaxTreeDepth; } /* Initialize color cube. */ cube_info=GetCubeInfo(quantize_info,depth,maximum_colors); if (cube_info == (CubeInfo *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); status=ClassifyImageColors(cube_info,image,exception); if (status != MagickFalse) { /* Reduce the number of colors in the image. */ if (cube_info->colors > cube_info->maximum_colors) ReduceImageColors(image,cube_info); status=AssignImageColors(image,cube_info,exception); } DestroyCubeInfo(cube_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % Q u a n t i z e I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QuantizeImages() analyzes the colors within a set of reference images and % chooses a fixed number of colors to represent the set. The goal of the % algorithm is to minimize the color difference between the input and output % images while minimizing the processing time. % % The format of the QuantizeImages method is: % % MagickBooleanType QuantizeImages(const QuantizeInfo *quantize_info, % Image *images,ExceptionInfo *exception) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % % o images: Specifies a pointer to a list of Image structures. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType QuantizeImages(const QuantizeInfo *quantize_info, Image *images,ExceptionInfo *exception) { CubeInfo *cube_info; Image *image; MagickBooleanType proceed, status; MagickProgressMonitor progress_monitor; register ssize_t i; size_t depth, maximum_colors, number_images; assert(quantize_info != (const QuantizeInfo *) NULL); assert(quantize_info->signature == MagickCoreSignature); assert(images != (Image *) NULL); assert(images->signature == MagickCoreSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (GetNextImageInList(images) == (Image *) NULL) { /* Handle a single image with QuantizeImage. */ status=QuantizeImage(quantize_info,images,exception); return(status); } status=MagickFalse; maximum_colors=quantize_info->number_colors; if (maximum_colors == 0) maximum_colors=MaxColormapSize; if (maximum_colors > MaxColormapSize) maximum_colors=MaxColormapSize; depth=quantize_info->tree_depth; if (depth == 0) { size_t colors; /* Depth of color tree is: Log4(colormap size)+2. */ colors=maximum_colors; for (depth=1; colors != 0; depth++) colors>>=2; if (quantize_info->dither_method != NoDitherMethod) depth--; } /* Initialize color cube. */ cube_info=GetCubeInfo(quantize_info,depth,maximum_colors); if (cube_info == (CubeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",images->filename); return(MagickFalse); } number_images=GetImageListLength(images); image=images; for (i=0; image != (Image *) NULL; i++) { progress_monitor=SetImageProgressMonitor(image,(MagickProgressMonitor) NULL, image->client_data); status=ClassifyImageColors(cube_info,image,exception); if (status == MagickFalse) break; (void) SetImageProgressMonitor(image,progress_monitor,image->client_data); proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) i, number_images); if (proceed == MagickFalse) break; image=GetNextImageInList(image); } if (status != MagickFalse) { /* Reduce the number of colors in an image sequence. */ ReduceImageColors(images,cube_info); image=images; for (i=0; image != (Image *) NULL; i++) { progress_monitor=SetImageProgressMonitor(image,(MagickProgressMonitor) NULL,image->client_data); status=AssignImageColors(image,cube_info,exception); if (status == MagickFalse) break; (void) SetImageProgressMonitor(image,progress_monitor, image->client_data); proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) i, number_images); if (proceed == MagickFalse) break; image=GetNextImageInList(image); } } DestroyCubeInfo(cube_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + Q u a n t i z e E r r o r F l a t t e n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QuantizeErrorFlatten() traverses the color cube and flattens the quantization % error into a sorted 1D array. This accelerates the color reduction process. % % Contributed by Yoya. % % The format of the QuantizeErrorFlatten method is: % % size_t QuantizeErrorFlatten(const CubeInfo *cube_info, % const NodeInfo *node_info,const ssize_t offset, % double *quantize_error) % % A description of each parameter follows. % % o cube_info: A pointer to the Cube structure. % % o node_info: pointer to node in color cube tree that is current pointer. % % o offset: quantize error offset. % % o quantize_error: the quantization error vector. % */ static size_t QuantizeErrorFlatten(const CubeInfo *cube_info, const NodeInfo *node_info,const ssize_t offset,double *quantize_error) { register ssize_t i; size_t n, number_children; if (offset >= (ssize_t) cube_info->nodes) return(0); quantize_error[offset]=node_info->quantize_error; n=1; number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children ; i++) if (node_info->child[i] != (NodeInfo *) NULL) n+=QuantizeErrorFlatten(cube_info,node_info->child[i],offset+n, quantize_error); return(n); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e d u c e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Reduce() traverses the color cube tree and prunes any node whose % quantization error falls below a particular threshold. % % The format of the Reduce method is: % % Reduce(CubeInfo *cube_info,const NodeInfo *node_info) % % A description of each parameter follows. % % o cube_info: A pointer to the Cube structure. % % o node_info: pointer to node in color cube tree that is to be pruned. % */ static void Reduce(CubeInfo *cube_info,const NodeInfo *node_info) { register ssize_t i; size_t number_children; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) Reduce(cube_info,node_info->child[i]); if (node_info->quantize_error <= cube_info->pruning_threshold) PruneChild(cube_info,node_info); else { /* Find minimum pruning threshold. */ if (node_info->number_unique > 0) cube_info->colors++; if (node_info->quantize_error < cube_info->next_threshold) cube_info->next_threshold=node_info->quantize_error; } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e d u c e I m a g e C o l o r s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReduceImageColors() repeatedly prunes the tree until the number of nodes % with n2 > 0 is less than or equal to the maximum number of colors allowed % in the output image. On any given iteration over the tree, it selects % those nodes whose E value is minimal for pruning and merges their % color statistics upward. It uses a pruning threshold, Ep, to govern % node selection as follows: % % Ep = 0 % while number of nodes with (n2 > 0) > required maximum number of colors % prune all nodes such that E <= Ep % Set Ep to minimum E in remaining nodes % % This has the effect of minimizing any quantization error when merging % two nodes together. % % When a node to be pruned has offspring, the pruning procedure invokes % itself recursively in order to prune the tree from the leaves upward. % n2, Sr, Sg, and Sb in a node being pruned are always added to the % corresponding data in that node's parent. This retains the pruned % node's color characteristics for later averaging. % % For each node, n2 pixels exist for which that node represents the % smallest volume in RGB space containing those pixel's colors. When n2 % > 0 the node will uniquely define a color in the output image. At the % beginning of reduction, n2 = 0 for all nodes except a the leaves of % the tree which represent colors present in the input image. % % The other pixel count, n1, indicates the total number of colors % within the cubic volume which the node represents. This includes n1 - % n2 pixels whose colors should be defined by nodes at a lower level in % the tree. % % The format of the ReduceImageColors method is: % % ReduceImageColors(const Image *image,CubeInfo *cube_info) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % */ static int QuantizeErrorCompare(const void *error_p,const void *error_q) { double *p, *q; p=(double *) error_p; q=(double *) error_q; if (*p > *q) return(1); if (fabs(*q-*p) <= MagickEpsilon) return(0); return(-1); } static void ReduceImageColors(const Image *image,CubeInfo *cube_info) { #define ReduceImageTag "Reduce/Image" MagickBooleanType proceed; MagickOffsetType offset; size_t span; cube_info->next_threshold=0.0; if (cube_info->colors > cube_info->maximum_colors) { double *quantize_error; /* Enable rapid reduction of the number of unique colors. */ quantize_error=(double *) AcquireQuantumMemory(cube_info->nodes, sizeof(*quantize_error)); if (quantize_error != (double *) NULL) { (void) QuantizeErrorFlatten(cube_info,cube_info->root,0, quantize_error); qsort(quantize_error,cube_info->nodes,sizeof(double), QuantizeErrorCompare); if (cube_info->nodes > (110*(cube_info->maximum_colors+1)/100)) cube_info->next_threshold=quantize_error[cube_info->nodes-110* (cube_info->maximum_colors+1)/100]; quantize_error=(double *) RelinquishMagickMemory(quantize_error); } } for (span=cube_info->colors; cube_info->colors > cube_info->maximum_colors; ) { cube_info->pruning_threshold=cube_info->next_threshold; cube_info->next_threshold=cube_info->root->quantize_error-1; cube_info->colors=0; Reduce(cube_info,cube_info->root); offset=(MagickOffsetType) span-cube_info->colors; proceed=SetImageProgress(image,ReduceImageTag,offset,span- cube_info->maximum_colors+1); if (proceed == MagickFalse) break; } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e m a p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RemapImage() replaces the colors of an image with the closest of the colors % from the reference image. % % The format of the RemapImage method is: % % MagickBooleanType RemapImage(const QuantizeInfo *quantize_info, % Image *image,const Image *remap_image,ExceptionInfo *exception) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % % o image: the image. % % o remap_image: the reference image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType RemapImage(const QuantizeInfo *quantize_info, Image *image,const Image *remap_image,ExceptionInfo *exception) { CubeInfo *cube_info; MagickBooleanType status; /* Initialize color cube. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(remap_image != (Image *) NULL); assert(remap_image->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); cube_info=GetCubeInfo(quantize_info,MaxTreeDepth, quantize_info->number_colors); if (cube_info == (CubeInfo *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); status=ClassifyImageColors(cube_info,remap_image,exception); if (status != MagickFalse) { /* Classify image colors from the reference image. */ cube_info->quantize_info->number_colors=cube_info->colors; status=AssignImageColors(image,cube_info,exception); } DestroyCubeInfo(cube_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e m a p I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RemapImages() replaces the colors of a sequence of images with the % closest color from a reference image. % % The format of the RemapImage method is: % % MagickBooleanType RemapImages(const QuantizeInfo *quantize_info, % Image *images,Image *remap_image,ExceptionInfo *exception) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % % o images: the image sequence. % % o remap_image: the reference image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType RemapImages(const QuantizeInfo *quantize_info, Image *images,const Image *remap_image,ExceptionInfo *exception) { CubeInfo *cube_info; Image *image; MagickBooleanType status; assert(images != (Image *) NULL); assert(images->signature == MagickCoreSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image=images; if (remap_image == (Image *) NULL) { /* Create a global colormap for an image sequence. */ status=QuantizeImages(quantize_info,images,exception); return(status); } /* Classify image colors from the reference image. */ cube_info=GetCubeInfo(quantize_info,MaxTreeDepth, quantize_info->number_colors); if (cube_info == (CubeInfo *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); status=ClassifyImageColors(cube_info,remap_image,exception); if (status != MagickFalse) { /* Classify image colors from the reference image. */ cube_info->quantize_info->number_colors=cube_info->colors; image=images; for ( ; image != (Image *) NULL; image=GetNextImageInList(image)) { status=AssignImageColors(image,cube_info,exception); if (status == MagickFalse) break; } } DestroyCubeInfo(cube_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t G r a y s c a l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetGrayscaleImage() converts an image to a PseudoClass grayscale image. % % The format of the SetGrayscaleImage method is: % % MagickBooleanType SetGrayscaleImage(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: The image. % % o exception: return any errors or warnings in this structure. % */ #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif static int IntensityCompare(const void *x,const void *y) { double intensity; PixelInfo *color_1, *color_2; color_1=(PixelInfo *) x; color_2=(PixelInfo *) y; intensity=GetPixelInfoIntensity((const Image *) NULL,color_1)- GetPixelInfoIntensity((const Image *) NULL,color_2); if (intensity < (double) INT_MIN) intensity=(double) INT_MIN; if (intensity > (double) INT_MAX) intensity=(double) INT_MAX; return((int) intensity); } #if defined(__cplusplus) || defined(c_plusplus) } #endif static MagickBooleanType SetGrayscaleImage(Image *image, ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; PixelInfo *colormap; register ssize_t i; size_t extent; ssize_t *colormap_index, j, y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->type != GrayscaleType) (void) TransformImageColorspace(image,GRAYColorspace,exception); extent=MagickMax(image->colors+1,MagickMax(MaxColormapSize,MaxMap+1)); colormap_index=(ssize_t *) AcquireQuantumMemory(extent, sizeof(*colormap_index)); if (colormap_index == (ssize_t *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); if (image->storage_class != PseudoClass) { (void) memset(colormap_index,(-1),extent*sizeof(*colormap_index)); if (AcquireImageColormap(image,MaxColormapSize,exception) == MagickFalse) { colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } image->colors=0; status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register size_t intensity; intensity=ScaleQuantumToMap(GetPixelRed(image,q)); if (colormap_index[intensity] < 0) { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SetGrayscaleImage) #endif if (colormap_index[intensity] < 0) { colormap_index[intensity]=(ssize_t) image->colors; image->colormap[image->colors].red=(double) GetPixelRed(image,q); image->colormap[image->colors].green=(double) GetPixelGreen(image,q); image->colormap[image->colors].blue=(double) GetPixelBlue(image,q); image->colors++; } } SetPixelIndex(image,(Quantum) colormap_index[intensity],q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); } (void) memset(colormap_index,0,extent*sizeof(*colormap_index)); for (i=0; i < (ssize_t) image->colors; i++) image->colormap[i].alpha=(double) i; qsort((void *) image->colormap,image->colors,sizeof(PixelInfo), IntensityCompare); colormap=(PixelInfo *) AcquireQuantumMemory(image->colors,sizeof(*colormap)); if (colormap == (PixelInfo *) NULL) { colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } j=0; colormap[j]=image->colormap[0]; for (i=0; i < (ssize_t) image->colors; i++) { if (IsPixelInfoEquivalent(&colormap[j],&image->colormap[i]) == MagickFalse) { j++; colormap[j]=image->colormap[i]; } colormap_index[(ssize_t) image->colormap[i].alpha]=j; } image->colors=(size_t) (j+1); image->colormap=(PixelInfo *) RelinquishMagickMemory(image->colormap); image->colormap=colormap; status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { SetPixelIndex(image,(Quantum) colormap_index[ScaleQuantumToMap( GetPixelIndex(image,q))],q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index); image->type=GrayscaleType; if (SetImageMonochrome(image,exception) != MagickFalse) image->type=BilevelType; return(status); }
omp-loop03.c
extern void abort (void); int a; void foo () { int i; a = 30; #pragma omp barrier #pragma omp for lastprivate (a) for (i = 0; i < 1024; i++) { a = i; } if (a != 1023) abort (); } int main (void) { #pragma omp parallel num_threads (64) foo (); return 0; }
ch_common.c
#define MAIN #include <stdio.h> #include <stdlib.h> #include <string.h> #include <unistd.h> #include <errno.h> #include <assert.h> #include "ch_common.h" #include "cholesky.h" static void get_block_rank(int *block_rank, int nt); void omp_potrf(double * const A, int ts, int ld) { static int INFO; static const char L = 'L'; dpotrf_(&L, &ts, A, &ld, &INFO); } void omp_trsm(double *A, double *B, int ts, int ld) { static char LO = 'L', TR = 'T', NU = 'N', RI = 'R'; static double DONE = 1.0; dtrsm_(&RI, &LO, &TR, &NU, &ts, &ts, &DONE, A, &ld, B, &ld ); } void omp_gemm(double *A, double *B, double *C, int ts, int ld) { static const char TR = 'T', NT = 'N'; static double DONE = 1.0, DMONE = -1.0; dgemm_(&NT, &TR, &ts, &ts, &ts, &DMONE, A, &ld, B, &ld, &DONE, C, &ld); } void omp_syrk(double *A, double *B, int ts, int ld) { static char LO = 'L', NT = 'N'; static double DONE = 1.0, DMONE = -1.0; dsyrk_(&LO, &NT, &ts, &ts, &DMONE, A, &ld, &DONE, B, &ld ); } void cholesky_single(const int ts, const int nt, double* A[nt][nt]) { for (int k = 0; k < nt; k++) { #pragma omp task depend(out: A[k][k]) { omp_potrf(A[k][k], ts, ts); #ifdef DEBUG if (mype == 0) printf("potrf:out:A[%d][%d]\n", k, k); #endif } for (int i = k + 1; i < nt; i++) { #pragma omp task depend(in: A[k][k]) depend(out: A[k][i]) { omp_trsm(A[k][k], A[k][i], ts, ts); #ifdef DEBUG if (mype == 0) printf("trsm :in:A[%d][%d]:out:A[%d][%d]\n", k, k, k, i); #endif } } for (int i = k + 1; i < nt; i++) { for (int j = k + 1; j < i; j++) { #pragma omp task depend(in: A[k][i], A[k][j]) depend(out: A[j][i]) { omp_gemm(A[k][i], A[k][j], A[j][i], ts, ts); #ifdef DEBUG if (mype == 0) printf("gemm :in:A[%d][%d]:A[%d][%d]:out:A[%d][%d]\n", k, i, k, j, j, i); #endif } } #pragma omp task depend(in: A[k][i]) depend(out: A[i][i]) { omp_syrk(A[k][i], A[i][i], ts, ts); #ifdef DEBUG if (mype == 0) printf("syrk :in:A[%d][%d]:out:A[%d][%d]\n", k, i, i, i); #endif } } } #pragma omp taskwait } inline void wait(MPI_Request *comm_req) { int comm_comp = 0; MPI_Test(comm_req, &comm_comp, MPI_STATUS_IGNORE); while (!comm_comp) { #pragma omp taskyield MPI_Test(comm_req, &comm_comp, MPI_STATUS_IGNORE); } // MPI_Wait(comm_req, MPI_STATUS_IGNORE); } inline void reset_send_flags(char *send_flags) { for (int i = 0; i < np; i++) send_flags[i] = 0; } int main(int argc, char *argv[]) { /* MPI Initialize */ int provided; MPI_Init_thread(&argc, &argv, MPI_THREAD_MULTIPLE, &provided); if (provided != MPI_THREAD_MULTIPLE) { printf("This Compiler does not support MPI_THREAD_MULTIPLE\n"); exit(0); } MPI_Comm_rank(MPI_COMM_WORLD, &mype); MPI_Comm_size(MPI_COMM_WORLD, &np); /* cholesky init */ const char *result[3] = {"n/a","successful","UNSUCCESSFUL"}; const double eps = BLAS_dfpinfo(blas_eps); if (argc < 4) { printf("cholesky matrix_size block_size check\n"); exit(-1); } const int n = atoi(argv[1]); // matrix size const int ts = atoi(argv[2]); // tile size int check = atoi(argv[3]); // check result? const int nt = n / ts; if (mype == 0) printf("nt = %d, ts = %d\n", nt, ts); /* Set block rank */ int *block_rank = malloc(nt * nt * sizeof(int)); get_block_rank(block_rank, nt); #ifdef DEBUG if (mype == 0) { for (int i = 0; i < nt; i++) { for (int j = 0; j < nt; j++) { printf("%d ", block_rank[i * nt + j]); } printf("\n"); } } #endif double *A[nt][nt], *B, *C[nt], *Ans[nt][nt]; #pragma omp parallel { #pragma omp single { for (int i = 0; i < nt; i++) { for (int j = 0; j < nt; j++) { #pragma omp task depend(out: A[i][j]) shared(Ans, A) { if (check) { MPI_Alloc_mem(ts * ts * sizeof(double), MPI_INFO_NULL, &Ans[i][j]); initialize_tile(ts, Ans[i][j]); } if (block_rank[i*nt+j] == mype) { MPI_Alloc_mem(ts * ts * sizeof(double), MPI_INFO_NULL, &A[i][j]); if (!check) { initialize_tile(ts, A[i][j]); } else { for (int k = 0; k < ts * ts; k++) { A[i][j][k] = Ans[i][j][k]; } } } } } #pragma omp task depend(inout: A[i][i]) shared(Ans, A) { // add to diagonal if (check) { Ans[i][i][ts/2*ts+ts/2] = (double)nt; } if (block_rank[i*nt+i] == mype) { A[i][i][ts/2*ts+ts/2] = (double)nt; } } } } // omp single } // omp parallel MPI_Alloc_mem(ts * ts * sizeof(double), MPI_INFO_NULL, &B); for (int i = 0; i < nt; i++) { MPI_Alloc_mem(ts * ts * sizeof(double), MPI_INFO_NULL, &C[i]); } #pragma omp single num_threads = omp_get_max_threads(); const float t3 = get_time(); if (check) cholesky_single(ts, nt, (double* (*)[nt]) Ans); const float t4 = get_time() - t3; MPI_Barrier(MPI_COMM_WORLD); if (mype == 0) printf("Starting parallel computation\n"); const float t1 = get_time(); cholesky_mpi(ts, nt, (double* (*)[nt])A, B, C, block_rank); const float t2 = get_time() - t1; if (mype == 0) printf("Finished parallel computation\n"); MPI_Barrier(MPI_COMM_WORLD); /* Verification */ if (check) { for (int i = 0; i < nt; i++) { for (int j = 0; j < nt; j++) { if (block_rank[i * nt + j] == mype) { for (int k = 0; k < ts*ts; k++) { if (Ans[i][j][k] != A[i][j][k]) check = 2; } } } } } float time_mpi = t2; float gflops_mpi = (((1.0 / 3.0) * n * n * n) / ((time_mpi) * 1.0e+9)); float time_ser = t4; float gflops_ser = (((1.0 / 3.0) * n * n * n) / ((time_ser) * 1.0e+9)); printf("test:%s-%d-%d-%d:mype:%2d:np:%2d:threads:%2d:result:%s:gflops:%f:time:%f:gflops_ser:%f:time_ser:%f\n", argv[0], n, ts, num_threads, mype, np, num_threads, result[check], gflops_mpi, t2, gflops_ser, t4); for (int i = 0; i < nt; i++) { for (int j = 0; j < nt; j++) { if (block_rank[i*nt+j] == mype) { free(A[i][j]); } if (check) free(Ans[i][j]); } free(C[i]); } free(B); free(block_rank); MPI_Finalize(); return 0; } static void get_block_rank(int *block_rank, int nt) { int row, col; row = col = np; if (np != 1) { while (1) { row = row / 2; if (row * col == np) break; col = col / 2; if (row * col == np) break; } } if (mype == 0) printf("row = %d, col = %d\n", row, col); int i, j, tmp_rank = 0, offset = 0; for (i = 0; i < nt; i++) { for (j = 0; j < nt; j++) { block_rank[i*nt + j] = tmp_rank + offset; tmp_rank++; if (tmp_rank >= col) tmp_rank = 0; } tmp_rank = 0; offset = (offset + col >= np) ? 0 : offset + col; } }
GB_unop__log2_fc64_fc64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__log2_fc64_fc64) // op(A') function: GB (_unop_tran__log2_fc64_fc64) // C type: GxB_FC64_t // A type: GxB_FC64_t // cast: GxB_FC64_t cij = aij // unaryop: cij = GB_clog2 (aij) #define GB_ATYPE \ GxB_FC64_t #define GB_CTYPE \ GxB_FC64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_clog2 (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC64_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC64_t z = aij ; \ Cx [pC] = GB_clog2 (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LOG2 || GxB_NO_FC64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__log2_fc64_fc64) ( GxB_FC64_t *Cx, // Cx and Ax may be aliased const GxB_FC64_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = aij ; Cx [p] = GB_clog2 (z) ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = aij ; Cx [p] = GB_clog2 (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__log2_fc64_fc64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
average.c
#include<stdio.h> #include<omp.h> #define MAX 5 int main() { double ave=0.0, A[MAX]; int i; for (i=0; i<MAX; i++) { A[i] = i+1.0; } #pragma omp parallel for for (i=0; i<MAX; i++) { ave += A[i]; } ave /= MAX; printf("%f\n",ave); return 0; }
GB_unaryop__one_int16_int16.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__one_int16_int16 // op(A') function: GB_tran__one_int16_int16 // C type: int16_t // A type: int16_t // cast: ; // unaryop: cij = 1 #define GB_ATYPE \ int16_t #define GB_CTYPE \ int16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ ; #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = 1 ; // casting #define GB_CASTING(z, x) \ ; ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ONE || GxB_NO_INT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__one_int16_int16 ( int16_t *restrict Cx, const int16_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__one_int16_int16 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
box_coder_op.h
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #pragma once #include <string> #include <vector> #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/math/math_function.h" namespace paddle { namespace operators { enum class BoxCodeType { kEncodeCenterSize = 0, kDecodeCenterSize = 1 }; inline BoxCodeType GetBoxCodeType(const std::string& type) { if (type == "encode_center_size") { return BoxCodeType::kEncodeCenterSize; } else if (type == "decode_center_size") { return BoxCodeType::kDecodeCenterSize; } PADDLE_THROW("Not support type %s.", type); } template <typename DeviceContext, typename T> class BoxCoderKernel : public framework::OpKernel<T> { public: void EncodeCenterSize(const framework::Tensor* target_box, const framework::Tensor* prior_box, const framework::Tensor* prior_box_var, const bool normalized, const std::vector<float> variance, T* output) const { int64_t row = target_box->dims()[0]; int64_t col = prior_box->dims()[0]; int64_t len = prior_box->dims()[1]; auto* target_box_data = target_box->data<T>(); auto* prior_box_data = prior_box->data<T>(); const T* prior_box_var_data = nullptr; if (prior_box_var) prior_box_var_data = prior_box_var->data<T>(); #ifdef PADDLE_WITH_MKLML #pragma omp parallel for collapse(2) #endif for (int64_t i = 0; i < row; ++i) { for (int64_t j = 0; j < col; ++j) { T prior_box_width = prior_box_data[j * len + 2] - prior_box_data[j * len] + (normalized == false); T prior_box_height = prior_box_data[j * len + 3] - prior_box_data[j * len + 1] + (normalized == false); T prior_box_center_x = prior_box_data[j * len] + prior_box_width / 2; T prior_box_center_y = prior_box_data[j * len + 1] + prior_box_height / 2; T target_box_center_x = (target_box_data[i * len + 2] + target_box_data[i * len]) / 2; T target_box_center_y = (target_box_data[i * len + 3] + target_box_data[i * len + 1]) / 2; T target_box_width = target_box_data[i * len + 2] - target_box_data[i * len] + (normalized == false); T target_box_height = target_box_data[i * len + 3] - target_box_data[i * len + 1] + (normalized == false); size_t offset = i * col * len + j * len; output[offset] = (target_box_center_x - prior_box_center_x) / prior_box_width; output[offset + 1] = (target_box_center_y - prior_box_center_y) / prior_box_height; output[offset + 2] = std::log(std::fabs(target_box_width / prior_box_width)); output[offset + 3] = std::log(std::fabs(target_box_height / prior_box_height)); if (prior_box_var) { int prior_var_offset = 0; if (prior_box_var->dims().size() == 2) { prior_var_offset = j * len; } output[offset] /= prior_box_var_data[prior_var_offset]; output[offset + 1] /= prior_box_var_data[prior_var_offset + 1]; output[offset + 2] /= prior_box_var_data[prior_var_offset + 2]; output[offset + 3] /= prior_box_var_data[prior_var_offset + 3]; } else if (!(variance.empty())) { for (int k = 0; k < 4; ++k) { output[offset + k] /= static_cast<T>(variance[k]); } } } } } void DecodeCenterSize(const framework::Tensor* target_box, const framework::Tensor* prior_box, const framework::Tensor* prior_box_var, const bool normalized, const int axis, const std::vector<float> variance, T* output) const { int64_t row = target_box->dims()[0]; int64_t col = target_box->dims()[1]; int64_t len = target_box->dims()[2]; auto* target_box_data = target_box->data<T>(); auto* prior_box_data = prior_box->data<T>(); const T* prior_box_var_data = nullptr; if (prior_box_var) prior_box_var_data = prior_box_var->data<T>(); int prior_box_offset = 0; #ifdef PADDLE_WITH_MKLML #pragma omp parallel for collapse(2) #endif for (int64_t i = 0; i < row; ++i) { for (int64_t j = 0; j < col; ++j) { size_t offset = i * col * len + j * len; if (axis == 0) { prior_box_offset = j * len; } else if (axis == 1) { prior_box_offset = i * len; } T prior_box_width = prior_box_data[prior_box_offset + 2] - prior_box_data[prior_box_offset] + (normalized == false); T prior_box_height = prior_box_data[prior_box_offset + 3] - prior_box_data[prior_box_offset + 1] + (normalized == false); T prior_box_center_x = prior_box_data[prior_box_offset] + prior_box_width / 2; T prior_box_center_y = prior_box_data[prior_box_offset + 1] + prior_box_height / 2; T target_box_center_x = 0, target_box_center_y = 0; T target_box_width = 0, target_box_height = 0; T box_var_x = T(1), box_var_y = T(1); T box_var_w = T(1), box_var_h = T(1); if (prior_box_var) { int prior_var_offset = 0; if (prior_box_var->dims().size() == 2) { if (axis == 0) prior_var_offset = j * len; else if (axis == 1) prior_var_offset = i * len; } box_var_x = prior_box_var_data[prior_var_offset]; box_var_y = prior_box_var_data[prior_var_offset + 1]; box_var_w = prior_box_var_data[prior_var_offset + 2]; box_var_h = prior_box_var_data[prior_var_offset + 3]; } else if (!(variance.empty())) { box_var_x = static_cast<T>(variance[0]); box_var_y = static_cast<T>(variance[1]); box_var_w = static_cast<T>(variance[2]); box_var_h = static_cast<T>(variance[3]); } target_box_center_x = box_var_x * target_box_data[offset] * prior_box_width + prior_box_center_x; target_box_center_y = box_var_y * target_box_data[offset + 1] * prior_box_height + prior_box_center_y; target_box_width = std::exp(box_var_w * target_box_data[offset + 2]) * prior_box_width; target_box_height = std::exp(box_var_h * target_box_data[offset + 3]) * prior_box_height; output[offset] = target_box_center_x - target_box_width / 2; output[offset + 1] = target_box_center_y - target_box_height / 2; output[offset + 2] = target_box_center_x + target_box_width / 2 - (normalized == false); output[offset + 3] = target_box_center_y + target_box_height / 2 - (normalized == false); } } } void Compute(const framework::ExecutionContext& context) const override { auto* prior_box = context.Input<framework::Tensor>("PriorBox"); auto* prior_box_var = context.Input<framework::Tensor>("PriorBoxVar"); auto* target_box = context.Input<framework::LoDTensor>("TargetBox"); auto* output_box = context.Output<framework::Tensor>("OutputBox"); std::vector<float> variance = context.Attr<std::vector<float>>("variance"); const int axis = context.Attr<int>("axis"); if (target_box->lod().size()) { PADDLE_ENFORCE_EQ(target_box->lod().size(), 1UL, "Only support 1 level of LoD."); } if (prior_box_var) { PADDLE_ENFORCE(variance.empty(), "Input 'PriorBoxVar' and attribute 'variance' should not" "be used at the same time."); } if (!(variance.empty())) { PADDLE_ENFORCE(static_cast<int>(variance.size()) == 4, "Size of attribute 'variance' should be 4"); } auto code_type = GetBoxCodeType(context.Attr<std::string>("code_type")); bool normalized = context.Attr<bool>("box_normalized"); auto row = target_box->dims()[0]; auto col = prior_box->dims()[0]; if (code_type == BoxCodeType::kDecodeCenterSize) { col = target_box->dims()[1]; } auto len = prior_box->dims()[1]; output_box->mutable_data<T>({row, col, len}, context.GetPlace()); T* output = output_box->data<T>(); if (code_type == BoxCodeType::kEncodeCenterSize) { EncodeCenterSize(target_box, prior_box, prior_box_var, normalized, variance, output); } else if (code_type == BoxCodeType::kDecodeCenterSize) { DecodeCenterSize(target_box, prior_box, prior_box_var, normalized, axis, variance, output); } } }; } // namespace operators } // namespace paddle
c_md.c
/* *********************************************************************** This program is part of the OpenMP Source Code Repository http://www.pcg.ull.es/ompscr/ e-mail: ompscr@etsii.ull.es This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License (LICENSE file) along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA FILE: c_md.c VERSION: 1.0 DATE: May 2004 AUTHOR: Bill Magro, Kuck and Associates, Inc. (KAI), 1998 COMMENTS TO: sande@csi.ull.es DESCRIPTION: This program implements a simple molecular dynamics simulation, using the velocity Verlet time integration scheme. The particles interact with a central pair potential. COMMENTS: REFERENCES: W. C. Swope and H. C. Andersen and P. H. Berens and K. R. Wilson A Computer Simulation Method for the Calculation of Equilibrium Constants for the Formation of Physical Clusters of Molecules: Application to Small Water Clusters Journal of Chemical Physics, 1982 vol. 76 pg 637-649 BASIC PRAGMAS: parallel for USAGE: ./c_md.par 8192 10 INPUT: Number of particles Number of simulation steps OUTPUT: - FILE FORMATS: - RESTRICTIONS: - REVISION HISTORY: **************************************************************************/ //#include "OmpSCR.h" #include <math.h> #include <omp.h> // following added by sfsiegel due to use of "calloc": #include <stdlib.h> // following added by sfsiegel due to use of "printf": #include <stdio.h> #ifndef RAND_MAX #define RAND_MAX 0x7fff #endif #ifndef M_PI_2 #define M_PI_2 1.57079632679489661923 /* pi/2 */ #endif #define NUM_ARGS 2 #define NUM_TIMERS 1 #define DEFAULT_NPARTS 8192 #define DEFAULT_NSTEPS 10 #define USAGE_STR "NPARTS NSTEPS" #define NDIM 3 #define NPARTSINIT 10 #define NSTEPSINIT 4 int NPARTS; /* No. of particles */ int NSTEPS; /* No. of simulation steps */ typedef double vnd_t[NDIM]; /* ----------------------------------------------------------------------- PROTOTYPES * ----------------------------------------------------------------------- */ double v(double x); double dv(double x); void initialize(int np, int nd, vnd_t box, vnd_t *pos, vnd_t *vel, vnd_t *acc); double dist(int nd, vnd_t r1, vnd_t r2, vnd_t dr); double dot_prod(int n, vnd_t x,vnd_t y); void compute(int np, int nd, vnd_t *pos, vnd_t *vel, double mass, vnd_t *f, double *pot_p, double *kin_p); void update(int np, int nd, vnd_t *pos, vnd_t *vel, vnd_t *f, vnd_t *a, double mass, double dt); int main (int argc, char **argv); /* ----------------------------------------------------------------------- IMPLEMENTATION * ----------------------------------------------------------------------- */ /* ----------------------------------------------------------------------- statement function for the pair potential. This potential is a harmonic well which smoothly saturates to a maximum value at PI/2. * ----------------------------------------------------------------------- */ double v(double x) { if (x < M_PI_2) return pow(sin(x), 2.0); else return 1.0; } /* ----------------------------------------------------------------------- statement function for the derivative of the pair potential * ----------------------------------------------------------------------- */ double dv(double x) { if (x < M_PI_2) return 2.0 * sin(x) * cos(x); else return 0.0; } /* ----------------------------------------------------------------------- Initialize the positions, velocities, and accelerations. * ----------------------------------------------------------------------- */ void initialize(int np, int nd, vnd_t box, vnd_t *pos, vnd_t *vel, vnd_t *acc) { int i, j; double x; //srand(4711L); int r = 42; // REPLACE RANDOM NUMBER GENERATION for (i = 0; i < np; i++) { for (j = 0; j < nd; j++) { x = (r++) % 10000 / (double)10000.0; pos[i][j] = box[j] * x; vel[i][j] = 0.0; acc[i][j] = 0.0; } } } /* ----------------------------------------------------------------------- Compute the displacement vector (and its norm) between two particles. * ----------------------------------------------------------------------- */ double dist(int nd, vnd_t r1, vnd_t r2, vnd_t dr) { int i; double d; d = 0.0; for (i = 0; i < nd; i++) { dr[i] = r1[i] - r2[i]; d += dr[i] * dr[i]; } return sqrt(d); } /* ----------------------------------------------------------------------- Return the dot product between two vectors of type double and length n * ----------------------------------------------------------------------- */ double dot_prod(int n, vnd_t x, vnd_t y) { int i; double t = 0.0; for (i = 0; i < n; i++) { t += x[i] * y[i]; } return t; } /* ----------------------------------------------------------------------- Compute the forces and energies, given positions, masses, and velocities * ----------------------------------------------------------------------- */ void compute(int np, int nd, vnd_t *pos, vnd_t *vel, double mass, vnd_t *f, double *pot_p, double *kin_p) { int i, j, k; vnd_t rij; double d; double pot, kin; pot = 0.0; kin = 0.0; /* The computation of forces and energies is fully parallel. */ #pragma omp parallel for default(shared) private(i, j, k, rij, d) reduction(+ : pot, kin) for (i = 0; i < np; i++) { /* compute potential energy and forces */ for (j = 0; j < nd; j++) f[i][j] = 0.0; for (j = 0; j < np; j++) { if (i != j) { d = dist(nd, pos[i], pos[j], rij); /* attribute half of the potential energy to particle 'j' */ pot = pot + 0.5 * v(d); for (k = 0; k < nd; k++) { f[i][k] = f[i][k] - rij[k]* dv(d) /d; } } } /* compute kinetic energy */ kin = kin + dot_prod(nd, vel[i], vel[j]); } kin = kin * 0.5 * mass; *pot_p = pot; *kin_p = kin; } /* ----------------------------------------------------------------------- Perform the time integration, using a velocity Verlet algorithm * ----------------------------------------------------------------------- */ void update(int np, int nd, vnd_t *pos, vnd_t *vel, vnd_t *f, vnd_t *a, double mass, double dt) { int i, j; double rmass; rmass = 1.0/mass; /* The time integration is fully parallel */ #pragma omp parallel for default(shared) private(i, j) firstprivate(rmass, dt) for (i = 0; i < np; i++) { for (j = 0; j < nd; j++) { pos[i][j] = pos[i][j] + vel[i][j]*dt + 0.5*dt*dt*a[i][j]; vel[i][j] = vel[i][j] + 0.5*dt*(f[i][j]*rmass + a[i][j]); a[i][j] = f[i][j]*rmass; } } } /* ----------------------------------------------------------------------- */ int main (int argc, char **argv) { /* simulation parameters */ double mass = 1.0; double dt = 1.0e-4; vnd_t box; vnd_t *position; vnd_t *velocity; vnd_t *force; vnd_t *accel; double potential, kinetic, E0; int i; int NUMTHREADS; double total_time; char *PARAM_NAMES[NUM_ARGS] = {"Nparts", "Nsteps"}; char *TIMERS_NAMES[NUM_TIMERS] = {"Total_time" }; char *DEFAULT_VALUES[NUM_ARGS] = {"8192", "10"}; NUMTHREADS = 1; //omp_get_num_threads(); //OSCR_init (NUMTHREADS, "Molecular dynamic simulation", "Use md <Nparts> <Nsteps>", NUM_ARGS, // PARAM_NAMES, DEFAULT_VALUES , NUM_TIMERS, NUM_TIMERS, TIMERS_NAMES, //argc, argv); NPARTS = NPARTSINIT; //OSCR_getarg_int(1); NSTEPS = NSTEPSINIT; //OSCR_getarg_int(2); /* Default: DEFAULT_NPARTS, DEFAULT_NSTEPS */ /* Memory allocation */ position = calloc(NPARTS, sizeof(vnd_t)); velocity = calloc(NPARTS, sizeof(vnd_t)); force = calloc(NPARTS, sizeof(vnd_t)); accel = calloc(NPARTS, sizeof(vnd_t)); NUMTHREADS = 1; //omp_get_num_threads(); for (i = 0; i < NDIM; i++) box[i] = 10.0; /* set initial positions, velocities, and accelerations */ initialize(NPARTS, NDIM, box, position, velocity, accel); //OSCR_timer_start(0); /* compute the forces and energies */ compute(NPARTS, NDIM, position, velocity, mass, force, &potential, &kinetic); E0 = potential + kinetic; /* This is the main time stepping loop */ for (i = 0; i < NSTEPS; i++) { compute(NPARTS, NDIM, position, velocity, mass, force, &potential, &kinetic); #if 0 printf("%17.9e %17.9e %17.9e\n", potential, kinetic, (potential + kinetic - E0) / E0); #endif update(NPARTS, NDIM, position, velocity, force, accel, mass, dt); } //OSCR_timer_stop(0); total_time = 1; //OSCR_timer_read(0); //OSCR_report(1, TIMERS_NAMES); printf("\n \t# THREADS \tTIME (secs.) \n"); printf("\t %d \t\t%14.6lf\n", NUMTHREADS, total_time); return 0; } /* * vim:ts=2:sw=2: */
parallel_measurement.c
#include<stdio.h> #include<math.h> #include<omp.h> #include<time.h> #include<string.h> #include<stdlib.h> int p; // Using the MONOTONIC clock #define CLK CLOCK_MONOTONIC struct timespec diff(struct timespec start, struct timespec end){ struct timespec temp; if((end.tv_nsec-start.tv_nsec)<0){ temp.tv_sec = end.tv_sec-start.tv_sec-1; temp.tv_nsec = 1000000000+end.tv_nsec-start.tv_nsec; } else{ temp.tv_sec = end.tv_sec-start.tv_sec; temp.tv_nsec = end.tv_nsec-start.tv_nsec; } return temp; } typedef struct { unsigned char red,green,blue; } PPMPixel; typedef struct { int x, y; PPMPixel *data; } PPMImage; typedef struct { unsigned char gs; } PPMPixelGS; typedef struct { int x, y; PPMPixelGS *data; } PPMImageGS; #define RGB_COMPONENT_COLOR 255 static PPMImage *readPPM(const char *filename) { char buff[16]; PPMImage *img; FILE *fp; int c, rgb_comp_color; //open PPM file for reading fp = fopen(filename, "rb"); if (!fp) { fprintf(stderr, "Unable to open file '%s'\n", filename); exit(1); } //read image format if (!fgets(buff, sizeof(buff), fp)) { perror(filename); exit(1); } //check the image format if (buff[0] != 'P' || buff[1] != '6') { fprintf(stderr, "Invalid image format (must be 'P6')\n"); exit(1); } //alloc memory form image img = (PPMImage *)malloc(sizeof(PPMImage)); if (!img) { fprintf(stderr, "Unable to allocate memory\n"); exit(1); } //check for comments c = getc(fp); while (c == '#') { while (getc(fp) != '\n') ; c = getc(fp); } ungetc(c, fp); //read image size information if (fscanf(fp, "%d %d", &img->x, &img->y) != 2) { fprintf(stderr, "Invalid image size (error loading '%s')\n", filename); exit(1); } //read rgb component if (fscanf(fp, "%d", &rgb_comp_color) != 1) { fprintf(stderr, "Invalid rgb component (error loading '%s')\n", filename); exit(1); } //check rgb component depth if (rgb_comp_color!= RGB_COMPONENT_COLOR) { fprintf(stderr, "'%s' does not have 8-bits components\n", filename); exit(1); } while (fgetc(fp) != '\n') ; //memory allocation for pixel data img->data = (PPMPixel*)malloc(img->x * img->y * sizeof(PPMPixel)); if (!img) { fprintf(stderr, "Unable to allocate memory\n"); exit(1); } //read pixel data from file if (fread(img->data, 3 * img->x, img->y, fp) != img->y) { fprintf(stderr, "Error loading image '%s'\n", filename); exit(1); } fclose(fp); return img; } void writePPM(const char *filename, PPMImage *img) { FILE *fp; //open file for output fp = fopen(filename, "wb"); if (!fp) { fprintf(stderr, "Unable to open file '%s'\n", filename); exit(1); } //write the header file //image format fprintf(fp, "P6\n"); //comments //image size fprintf(fp, "%d %d\n",img->x,img->y); // rgb component depth fprintf(fp, "%d\n",255); // pixel data fwrite(img->data, 3 * img->x, img->y, fp); fclose(fp); } PPMImage* changeImage(PPMImage* im) { int rows = im->x; int cols = im->y; int x, y; PPMImage *im2 = (PPMImage*) malloc(sizeof(PPMImage)); // Output image im2->x = rows; // Set number of rows im2->y = cols; // Set number of columns im2->data = (PPMPixel *) malloc(rows * cols * sizeof(PPMPixel)); // Allocate memory for (rows * cols) pixels /* Generate output image 1) Find mean 2) Find standard deviation 3) In the output image, write each pixel as ABS((original_pixel - mean) / standard_deviation) */ double red_mean = 0.0, red_std_dev = 0.0; double grn_mean = 0.0, grn_std_dev = 0.0; double blu_mean = 0.0, blu_std_dev = 0.0; omp_set_num_threads(p); #pragma omp parallel private(x, y) { double temp_red_mean = 0.0; double temp_grn_mean = 0.0; double temp_blu_mean = 0.0; #pragma omp for for(x = 0; x < rows; x++) { for(y = 0; y < cols; y++) { int idx = (x * cols) + y; temp_red_mean += ((double) (im->data + idx)->red); temp_grn_mean += ((double) (im->data + idx)->green); temp_blu_mean += ((double) (im->data + idx)->blue); } } #pragma omp critical { red_mean += temp_red_mean; grn_mean += temp_grn_mean; blu_mean += temp_blu_mean; } } red_mean /= (rows * cols); grn_mean /= (rows * cols); blu_mean /= (rows * cols); #pragma omp parallel private(x, y) { double private_red_std_dev = 0.0; double private_grn_std_dev = 0.0; double private_blu_std_dev = 0.0; #pragma omp for for(x = 0; x < rows; x++) { for(y = 0; y < cols; y++) { int idx = (x * cols) + y; private_red_std_dev += ((((double) (im->data + idx)->red) - red_mean) * (((double) (im->data + idx)->red) - red_mean)); private_grn_std_dev += ((((double) (im->data + idx)->green) - grn_mean) * (((double) (im->data + idx)->green) - grn_mean)); private_blu_std_dev += ((((double) (im->data + idx)->blue) - blu_mean) * (((double) (im->data + idx)->blue) - blu_mean)); } } #pragma omp critical { red_std_dev += private_red_std_dev; grn_std_dev += private_grn_std_dev; blu_std_dev += private_blu_std_dev; } } red_std_dev /= (rows * cols); grn_std_dev /= (rows * cols); blu_std_dev /= (rows * cols); red_std_dev = sqrt(red_std_dev); grn_std_dev = sqrt(grn_std_dev); blu_std_dev = sqrt(blu_std_dev); red_std_dev = 1.0; grn_std_dev = 1.0; blu_std_dev = 1.0; // printf("Red:%lf %lf Grn: %lf %lf Blu: %lf %lf\n", red_mean, red_std_dev, grn_mean, grn_std_dev, blu_mean, blu_std_dev); #pragma omp parallel private(x, y) { #pragma omp for for(x = 0; x < rows; x++) { for(y = 0; y < cols; y++) { int idx = (x * cols) + y; double red_old = (double) ((im->data + idx)->red); double grn_old = (double) ((im->data + idx)->green); double blu_old = (double) ((im->data + idx)->blue); unsigned char red_new = (unsigned char) abs((red_old - red_mean) / red_std_dev); unsigned char grn_new = (unsigned char) abs((grn_old - grn_mean) / grn_std_dev); unsigned char blu_new = (unsigned char) abs((blu_old - blu_mean) / blu_std_dev); (im2->data + idx)->red = red_new; (im2->data + idx)->green = grn_new; (im2->data + idx)->blue = blu_new; // printf("Old:%d %d %d New:%d %d %d\n", (int) red_old, (int) grn_old, (int)blu_old, red_new, grn_new, blu_new); } } } return im2; } int main(int argc, char* argv[]) { struct timespec start_e2e, end_e2e, start_alg, end_alg, e2e, alg; /* Should start before anything else */ clock_gettime(CLK, &start_e2e); /* Check if enough command-line arguments are taken in. */ if(argc < 3){ printf( "Usage: %s n p \n", argv[0] ); return -1; } int n=atoi(argv[1]); /* size of input array */ p=atoi(argv[2]); /* number of processors*/ char *problem_name = "image_warping"; char *approach_name = "collapsed_directive"; FILE* outputFile; char* c=argv[1]; char* str="../../Lenna"; char* str2=malloc(15); strcpy(str2,str); strcat(str2,c); char* str3=".ppm"; strcat(str2,str3); char* filename=str2; PPMImage *im; im = readPPM(filename); char outputFileName[50]; sprintf(outputFileName,"output/%s_%s_%s_%s_output.txt",problem_name,approach_name,argv[1],argv[2]); clock_gettime(CLK, &start_alg); /* Start the algo timer */ /*----------------------Core algorithm starts here----------------------------------------------*/ double start_time = omp_get_wtime(); PPMImage* im2 = changeImage(im); double end_time = omp_get_wtime(); /*----------------------Core algorithm finished--------------------------------------------------*/ clock_gettime(CLK, &end_alg); /* End the algo timer */ /* Ensure that only the algorithm is present between these two timers. Further, the whole algorithm should be present. */ char outputImageName[1024]; outputImageName[0] = '\0'; strcat(outputImageName, "../../Lenna_"); strcat(outputImageName, argv[1]); strcat(outputImageName,"_normalization_parallel.ppm"); writePPM(outputImageName,im2); /* Should end before anything else (printing comes later) */ clock_gettime(CLK, &end_e2e); e2e = diff(start_e2e, end_e2e); alg = diff(start_alg, end_alg); printf("%s,%s,%d,%d,%d,%ld,%d,%ld\n", problem_name, approach_name, n, p, e2e.tv_sec, e2e.tv_nsec, alg.tv_sec, alg.tv_nsec); return 0; }
libgomp.h
/* Copyright (C) 2005-2017 Free Software Foundation, Inc. Contributed by Richard Henderson <rth@redhat.com>. This file is part of the GNU Offloading and Multi Processing Library (libgomp). Libgomp is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. Under Section 7 of GPL version 3, you are granted additional permissions described in the GCC Runtime Library Exception, version 3.1, as published by the Free Software Foundation. You should have received a copy of the GNU General Public License and a copy of the GCC Runtime Library Exception along with this program; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see <http://www.gnu.org/licenses/>. */ /* This file contains data types and function declarations that are not part of the official OpenACC or OpenMP user interfaces. There are declarations in here that are part of the GNU Offloading and Multi Processing ABI, in that the compiler is required to know about them and use them. The convention is that the all caps prefix "GOMP" is used group items that are part of the external ABI, and the lower case prefix "gomp" is used group items that are completely private to the library. */ #ifndef LIBGOMP_H #define LIBGOMP_H 1 #ifndef _LIBGOMP_CHECKING_ /* Define to 1 to perform internal sanity checks. */ #define _LIBGOMP_CHECKING_ 0 #endif #include "config.h" #include "gstdint.h" #include "libgomp-plugin.h" #ifdef HAVE_PTHREAD_H #include <pthread.h> #endif #include <stdbool.h> #include <stdlib.h> #include <stdio.h> #include <stdarg.h> /* Needed for memset in priority_queue.c. */ #if _LIBGOMP_CHECKING_ # ifdef STRING_WITH_STRINGS # include <string.h> # include <strings.h> # else # ifdef HAVE_STRING_H # include <string.h> # else # ifdef HAVE_STRINGS_H # include <strings.h> # endif # endif # endif #endif #ifdef HAVE_ATTRIBUTE_VISIBILITY # pragma GCC visibility push(hidden) #endif /* If we were a C++ library, we'd get this from <std/atomic>. */ enum memmodel { MEMMODEL_RELAXED = 0, MEMMODEL_CONSUME = 1, MEMMODEL_ACQUIRE = 2, MEMMODEL_RELEASE = 3, MEMMODEL_ACQ_REL = 4, MEMMODEL_SEQ_CST = 5 }; /* alloc.c */ extern void *gomp_malloc (size_t) __attribute__((malloc)); extern void *gomp_malloc_cleared (size_t) __attribute__((malloc)); extern void *gomp_realloc (void *, size_t); /* Avoid conflicting prototypes of alloca() in system headers by using GCC's builtin alloca(). */ #define gomp_alloca(x) __builtin_alloca(x) /* error.c */ extern void gomp_vdebug (int, const char *, va_list); extern void gomp_debug (int, const char *, ...) __attribute__ ((format (printf, 2, 3))); #define gomp_vdebug(KIND, FMT, VALIST) \ do { \ if (__builtin_expect (gomp_debug_var, 0)) \ (gomp_vdebug) ((KIND), (FMT), (VALIST)); \ } while (0) #define gomp_debug(KIND, ...) \ do { \ if (__builtin_expect (gomp_debug_var, 0)) \ (gomp_debug) ((KIND), __VA_ARGS__); \ } while (0) extern void gomp_verror (const char *, va_list); extern void gomp_error (const char *, ...) __attribute__ ((format (printf, 1, 2))); extern void gomp_vfatal (const char *, va_list) __attribute__ ((noreturn)); extern void gomp_fatal (const char *, ...) __attribute__ ((noreturn, format (printf, 1, 2))); struct gomp_task; struct gomp_taskgroup; struct htab; #include "priority_queue.h" #include "sem.h" #include "mutex.h" #include "bar.h" #include "simple-bar.h" #include "ptrlock.h" /* This structure contains the data to control one work-sharing construct, either a LOOP (FOR/DO) or a SECTIONS. */ enum gomp_schedule_type { GFS_RUNTIME, GFS_STATIC, GFS_DYNAMIC, GFS_GUIDED, GFS_AUTO, GFS_HETPROBE, GFS_HIERARCHY_STATIC, GFS_HIERARCHY_DYNAMIC, }; struct gomp_doacross_work_share { union { /* chunk_size copy, as ws->chunk_size is multiplied by incr for GFS_DYNAMIC. */ long chunk_size; /* Likewise, but for ull implementation. */ unsigned long long chunk_size_ull; /* For schedule(static,0) this is the number of iterations assigned to the last thread, i.e. number of iterations / number of threads. */ long q; /* Likewise, but for ull implementation. */ unsigned long long q_ull; }; /* Size of each array entry (padded to cache line size). */ unsigned long elt_sz; /* Number of dimensions in sink vectors. */ unsigned int ncounts; /* True if the iterations can be flattened. */ bool flattened; /* Actual array (of elt_sz sized units), aligned to cache line size. This is indexed by team_id for GFS_STATIC and outermost iteration / chunk_size for other schedules. */ unsigned char *array; /* These two are only used for schedule(static,0). */ /* This one is number of iterations % number of threads. */ long t; union { /* And this one is cached t * (q + 1). */ long boundary; /* Likewise, but for the ull implementation. */ unsigned long long boundary_ull; }; /* Array of shift counts for each dimension if they can be flattened. */ unsigned int shift_counts[]; }; struct gomp_work_share { /* This member records the SCHEDULE clause to be used for this construct. The user specification of "runtime" will already have been resolved. If this is a SECTIONS construct, this value will always be DYNAMIC. */ enum gomp_schedule_type sched; int mode; union { struct { /* This is the chunk_size argument to the SCHEDULE clause. */ long chunk_size; /* This is the iteration end point. If this is a SECTIONS construct, this is the number of contained sections. */ long end; /* This is the iteration step. If this is a SECTIONS construct, this is always 1. */ long incr; }; struct { /* The same as above, but for the unsigned long long loop variants. */ unsigned long long chunk_size_ull; unsigned long long end_ull; unsigned long long incr_ull; }; }; union { /* This is a circular queue that details which threads will be allowed into the ordered region and in which order. When a thread allocates iterations on which it is going to work, it also registers itself at the end of the array. When a thread reaches the ordered region, it checks to see if it is the one at the head of the queue. If not, it blocks on its RELEASE semaphore. */ unsigned *ordered_team_ids; /* This is a pointer to DOACROSS work share data. */ struct gomp_doacross_work_share *doacross; }; /* This is the number of threads that have registered themselves in the circular queue ordered_team_ids. */ unsigned ordered_num_used; /* This is the team_id of the currently acknowledged owner of the ordered section, or -1u if the ordered section has not been acknowledged by any thread. This is distinguished from the thread that is *allowed* to take the section next. */ unsigned ordered_owner; /* This is the index into the circular queue ordered_team_ids of the current thread that's allowed into the ordered reason. */ unsigned ordered_cur; /* This is a chain of allocated gomp_work_share blocks, valid only in the first gomp_work_share struct in the block. */ struct gomp_work_share *next_alloc; /* The above fields are written once during workshare initialization, or related to ordered worksharing. Make sure the following fields are in a different cache line. */ /* This lock protects the update of the following members. */ gomp_mutex_t lock __attribute__((aligned (64))); /* This is the count of the number of threads that have exited the work share construct. If the construct was marked nowait, they have moved on to other work; otherwise they're blocked on a barrier. The last member of the team to exit the work share construct must deallocate it. */ unsigned threads_completed; union { /* This is the next iteration value to be allocated. In the case of GFS_STATIC loops, this the iteration start point and never changes. */ long next; /* The same, but with unsigned long long type. */ unsigned long long next_ull; /* This is the returned data structure for SINGLE COPYPRIVATE. */ void *copyprivate; }; union { /* Link to gomp_work_share struct for next work sharing construct encountered after this one. */ gomp_ptrlock_t next_ws; /* gomp_work_share structs are chained in the free work share cache through this. */ struct gomp_work_share *next_free; }; /* If only few threads are in the team, ordered_team_ids can point to this array which fills the padding at the end of this struct. */ unsigned inline_ordered_team_ids[0]; }; /* This structure contains all of the thread-local data associated with a thread team. This is the data that must be saved when a thread encounters a nested PARALLEL construct. */ struct gomp_team_state { /* This is the team of which the thread is currently a member. */ struct gomp_team *team; /* This is the work share construct which this thread is currently processing. Recall that with NOWAIT, not all threads may be processing the same construct. */ struct gomp_work_share *work_share; /* This is the previous work share construct or NULL if there wasn't any. When all threads are done with the current work sharing construct, the previous one can be freed. The current one can't, as its next_ws field is used. */ struct gomp_work_share *last_work_share; /* This is the ID of this thread within the team. This value is guaranteed to be between 0 and N-1, where N is the number of threads in the team. */ unsigned team_id; /* Nesting level. */ unsigned level; /* Active nesting level. Only active parallel regions are counted. */ unsigned active_level; /* Place-partition-var, offset and length into gomp_places_list array. */ unsigned place_partition_off; unsigned place_partition_len; #ifdef HAVE_SYNC_BUILTINS /* Number of single stmts encountered. */ unsigned long single_count; #endif /* For GFS_RUNTIME loops that resolved to GFS_STATIC, this is the trip number through the loop. So first time a particular loop is encountered this number is 0, the second time through the loop is 1, etc. This is unused when the compiler knows in advance that the loop is statically scheduled. */ unsigned long static_trip; }; struct target_mem_desc; /* These are the OpenMP 4.0 Internal Control Variables described in section 2.3.1. Those described as having one copy per task are stored within the structure; those described as having one copy for the whole program are (naturally) global variables. */ struct gomp_task_icv { unsigned long nthreads_var; enum gomp_schedule_type run_sched_var; int run_sched_chunk_size; int default_device_var; unsigned int thread_limit_var; bool dyn_var; bool nest_var; char bind_var; /* Internal ICV. */ struct target_mem_desc *target_data; }; extern struct gomp_task_icv gomp_global_icv; #ifndef HAVE_SYNC_BUILTINS extern gomp_mutex_t gomp_managed_threads_lock; extern gomp_mutex_t popcorn_tid_lock; #endif extern unsigned long gomp_max_active_levels_var; extern bool gomp_cancel_var; extern int gomp_max_task_priority_var; extern unsigned long long gomp_spin_count_var, gomp_throttled_spin_count_var; extern unsigned long gomp_available_cpus, gomp_managed_threads; extern unsigned long *gomp_nthreads_var_list, gomp_nthreads_var_list_len; extern char *gomp_bind_var_list; extern unsigned long gomp_bind_var_list_len; extern void **gomp_places_list; extern unsigned long gomp_places_list_len; extern unsigned int gomp_num_teams_var; extern int gomp_debug_var; extern int goacc_device_num; extern char *goacc_device_type; /* Popcorn profiling machinery. */ extern bool popcorn_profiling; extern const char *popcorn_prof_fn; extern FILE *popcorn_prof_fp; enum gomp_task_kind { /* Implicit task. */ GOMP_TASK_IMPLICIT, /* Undeferred task. */ GOMP_TASK_UNDEFERRED, /* Task created by GOMP_task and waiting to be run. */ GOMP_TASK_WAITING, /* Task currently executing or scheduled and about to execute. */ GOMP_TASK_TIED, /* Used for target tasks that have vars mapped and async run started, but not yet completed. Once that completes, they will be readded into the queues as GOMP_TASK_WAITING in order to perform the var unmapping. */ GOMP_TASK_ASYNC_RUNNING }; struct gomp_task_depend_entry { /* Address of dependency. */ void *addr; struct gomp_task_depend_entry *next; struct gomp_task_depend_entry *prev; /* Task that provides the dependency in ADDR. */ struct gomp_task *task; /* Depend entry is of type "IN". */ bool is_in; bool redundant; bool redundant_out; }; struct gomp_dependers_vec { size_t n_elem; size_t allocated; struct gomp_task *elem[]; }; /* Used when in GOMP_taskwait or in gomp_task_maybe_wait_for_dependencies. */ struct gomp_taskwait { bool in_taskwait; bool in_depend_wait; /* Number of tasks we are waiting for. */ size_t n_depend; gomp_sem_t taskwait_sem; }; /* This structure describes a "task" to be run by a thread. */ struct gomp_task { /* Parent of this task. */ struct gomp_task *parent; /* Children of this task. */ struct priority_queue children_queue; /* Taskgroup this task belongs in. */ struct gomp_taskgroup *taskgroup; /* Tasks that depend on this task. */ struct gomp_dependers_vec *dependers; struct htab *depend_hash; struct gomp_taskwait *taskwait; /* Number of items in DEPEND. */ size_t depend_count; /* Number of tasks this task depends on. Once this counter reaches 0, we have no unsatisfied dependencies, and this task can be put into the various queues to be scheduled. */ size_t num_dependees; /* Priority of this task. */ int priority; /* The priority node for this task in each of the different queues. We put this here to avoid allocating space for each priority node. Then we play offsetof() games to convert between pnode[] entries and the gomp_task in which they reside. */ struct priority_node pnode[3]; struct gomp_task_icv icv; void (*fn) (void *); void *fn_data; enum gomp_task_kind kind; bool in_tied_task; bool final_task; bool copy_ctors_done; /* Set for undeferred tasks with unsatisfied dependencies which block further execution of their parent until the dependencies are satisfied. */ bool parent_depends_on; /* Dependencies provided and/or needed for this task. DEPEND_COUNT is the number of items available. */ struct gomp_task_depend_entry depend[]; }; /* This structure describes a single #pragma omp taskgroup. */ struct gomp_taskgroup { struct gomp_taskgroup *prev; /* Queue of tasks that belong in this taskgroup. */ struct priority_queue taskgroup_queue; bool in_taskgroup_wait; bool cancelled; gomp_sem_t taskgroup_sem; size_t num_children; }; /* Various state of OpenMP async offloading tasks. */ enum gomp_target_task_state { GOMP_TARGET_TASK_DATA, GOMP_TARGET_TASK_BEFORE_MAP, GOMP_TARGET_TASK_FALLBACK, GOMP_TARGET_TASK_READY_TO_RUN, GOMP_TARGET_TASK_RUNNING, GOMP_TARGET_TASK_FINISHED }; /* This structure describes a target task. */ struct gomp_target_task { struct gomp_device_descr *devicep; void (*fn) (void *); size_t mapnum; size_t *sizes; unsigned short *kinds; unsigned int flags; enum gomp_target_task_state state; struct target_mem_desc *tgt; struct gomp_task *task; struct gomp_team *team; /* Device-specific target arguments. */ void **args; void *hostaddrs[]; }; /* This structure describes a "team" of threads. These are the threads that are spawned by a PARALLEL constructs, as well as the work sharing constructs that the team encounters. */ struct gomp_team { /* This is the number of threads in the current team. */ unsigned nthreads; /* This is number of gomp_work_share structs that have been allocated as a block last time. */ unsigned work_share_chunk; /* This is the saved team state that applied to a master thread before the current thread was created. */ struct gomp_team_state prev_ts; /* This semaphore should be used by the master thread instead of its "native" semaphore in the thread structure. Required for nested parallels, as the master is a member of two teams. */ gomp_sem_t master_release; /* This points to an array with pointers to the release semaphore of the threads in the team. */ gomp_sem_t **ordered_release; /* List of work shares on which gomp_fini_work_share hasn't been called yet. If the team hasn't been cancelled, this should be equal to each thr->ts.work_share, but otherwise it can be a possibly long list of workshares. */ struct gomp_work_share *work_shares_to_free; /* List of gomp_work_share structs chained through next_free fields. This is populated and taken off only by the first thread in the team encountering a new work sharing construct, in a critical section. */ struct gomp_work_share *work_share_list_alloc; /* List of gomp_work_share structs freed by free_work_share. New entries are atomically added to the start of the list, and alloc_work_share can safely only move all but the first entry to work_share_list alloc, as free_work_share can happen concurrently with alloc_work_share. */ struct gomp_work_share *work_share_list_free; #ifdef HAVE_SYNC_BUILTINS /* Number of simple single regions encountered by threads in this team. */ unsigned long single_count; #else /* Mutex protecting addition of workshares to work_share_list_free. */ gomp_mutex_t work_share_list_free_lock; #endif /* This barrier is used for most synchronization of the team. */ gomp_barrier_t barrier; /* Initial work shares, to avoid allocating any gomp_work_share structs in the common case. */ struct gomp_work_share work_shares[8]; gomp_mutex_t task_lock; /* Scheduled tasks. */ struct priority_queue task_queue; /* Number of all GOMP_TASK_{WAITING,TIED} tasks in the team. */ unsigned int task_count; /* Number of GOMP_TASK_WAITING tasks currently waiting to be scheduled. */ unsigned int task_queued_count; /* Number of GOMP_TASK_{WAITING,TIED} tasks currently running directly in gomp_barrier_handle_tasks; tasks spawned from e.g. GOMP_taskwait or GOMP_taskgroup_end don't count, even when that is called from a task run from gomp_barrier_handle_tasks. task_running_count should be always <= team->nthreads, and if current task isn't in_tied_task, then it will be even < team->nthreads. */ unsigned int task_running_count; int work_share_cancelled; int team_cancelled; /* This array contains structures for implicit tasks. */ struct gomp_task implicit_task[]; }; /* This structure contains all data that is private to libgomp and is allocated per thread. */ struct gomp_thread { /* This is the function that the thread should run upon launch. */ void (*fn) (void *data); void *data; /* This is the current team state for this thread. The ts.team member is NULL only if the thread is idle. */ struct gomp_team_state ts; /* This is the task that the thread is currently executing. */ struct gomp_task *task; /* This semaphore is used for ordered loops. */ gomp_sem_t release; /* Place this thread is bound to plus one, or zero if not bound to any place. */ unsigned int place; /* User pthread thread pool */ struct gomp_thread_pool *thread_pool; /* Popcorn's TID, basically this thread's number out of the total number of threads created by the runtime over the lifetime of the application. */ int popcorn_created_tid; /* Node ID on which this thread is executing in Popcorn. */ int popcorn_nid; /* Reduction method for variables currently being reduced. */ int reduction_method; /* Time stamp for this thread's probe start. */ struct timespec probe_start; }; struct gomp_thread_pool { /* This array manages threads spawned from the top level, which will return to the idle loop once the current PARALLEL construct ends. */ struct gomp_thread **threads; unsigned threads_size; unsigned threads_used; /* The last team is used for non-nested teams to delay their destruction to make sure all the threads in the team move on to the pool's barrier before the team's barrier is destroyed. */ struct gomp_team *last_team; /* Number of threads running in this contention group. */ unsigned long threads_busy; /* This barrier holds and releases threads waiting in thread pools. */ gomp_simple_barrier_t threads_dock; }; enum gomp_cancel_kind { GOMP_CANCEL_PARALLEL = 1, GOMP_CANCEL_LOOP = 2, GOMP_CANCEL_FOR = GOMP_CANCEL_LOOP, GOMP_CANCEL_DO = GOMP_CANCEL_LOOP, GOMP_CANCEL_SECTIONS = 4, GOMP_CANCEL_TASKGROUP = 8 }; /* ... and here is that TLS data. */ #if defined __nvptx__ extern struct gomp_thread *nvptx_thrs __attribute__((shared)); static inline struct gomp_thread *gomp_thread (void) { int tid; asm ("mov.u32 %0, %%tid.y;" : "=r" (tid)); return nvptx_thrs + tid; } #elif defined HAVE_TLS || defined USE_EMUTLS extern __thread struct gomp_thread gomp_tls_data; static inline struct gomp_thread *gomp_thread (void) { return &gomp_tls_data; } #else extern pthread_key_t gomp_tls_key; static inline struct gomp_thread *gomp_thread (void) { return pthread_getspecific (gomp_tls_key); } #endif extern struct gomp_task_icv *gomp_new_icv (void); /* Here's how to access the current copy of the ICVs. */ static inline struct gomp_task_icv *gomp_icv (bool write) { struct gomp_task *task = gomp_thread ()->task; if (task) return &task->icv; else if (write) return gomp_new_icv (); else return &gomp_global_icv; } #ifdef LIBGOMP_USE_PTHREADS /* The attributes to be used during thread creation. */ extern pthread_attr_t gomp_thread_attr; extern pthread_key_t gomp_thread_destructor; #endif /* Function prototypes. */ /* affinity.c */ extern void gomp_init_affinity (void); #ifdef LIBGOMP_USE_PTHREADS extern void gomp_init_thread_affinity (pthread_attr_t *, unsigned int); #endif extern void **gomp_affinity_alloc (unsigned long, bool); extern void gomp_affinity_init_place (void *); extern bool gomp_affinity_add_cpus (void *, unsigned long, unsigned long, long, bool); extern bool gomp_affinity_remove_cpu (void *, unsigned long); extern bool gomp_affinity_copy_place (void *, void *, long); extern bool gomp_affinity_same_place (void *, void *); extern bool gomp_affinity_finalize_place_list (bool); extern bool gomp_affinity_init_level (int, unsigned long, bool); extern void gomp_affinity_print_place (void *); extern void gomp_get_place_proc_ids_8 (int, int64_t *); extern bool popcorn_affinity_init_nodes (unsigned long *, unsigned long, bool); extern bool popcorn_affinity_init_nodes_uniform (unsigned long, bool); extern bool popcorn_affinity_init_node_ratings (unsigned long *, unsigned long, bool); /* iter.c */ extern bool gomp_iter_is_last (long); extern bool gomp_iter_is_last_ull (unsigned long long); extern int gomp_iter_static_next (long *, long *); extern bool gomp_iter_dynamic_next_locked (long *, long *); extern bool gomp_iter_dynamic_next_locked_ws (long *, long *, struct gomp_work_share *); extern bool gomp_iter_dynamic_next_locked_raw (long *, long *, struct gomp_work_share *, long); extern bool gomp_iter_guided_next_locked (long *, long *); #ifdef HAVE_SYNC_BUILTINS extern bool gomp_iter_dynamic_next (long *, long *); extern bool gomp_iter_dynamic_next_ws (long *, long *, struct gomp_work_share *); extern bool gomp_iter_dynamic_next_raw (long *, long *, struct gomp_work_share *, long); extern bool gomp_iter_guided_next (long *, long *); #endif /* iter_ull.c */ extern int gomp_iter_ull_static_next (unsigned long long *, unsigned long long *); extern bool gomp_iter_ull_dynamic_next_locked (unsigned long long *, unsigned long long *); extern bool gomp_iter_ull_dynamic_next_locked_ws (unsigned long long *, unsigned long long *, struct gomp_work_share *); extern bool gomp_iter_ull_dynamic_next_locked_raw (unsigned long long *, unsigned long long *, struct gomp_work_share *, unsigned long long); extern bool gomp_iter_ull_guided_next_locked (unsigned long long *, unsigned long long *); #if defined HAVE_SYNC_BUILTINS && defined __LP64__ extern bool gomp_iter_ull_dynamic_next (unsigned long long *, unsigned long long *); extern bool gomp_iter_ull_dynamic_next_ws (unsigned long long *, unsigned long long *, struct gomp_work_share *); extern bool gomp_iter_ull_dynamic_next_raw (unsigned long long *, unsigned long long *, struct gomp_work_share *, unsigned long long); extern bool gomp_iter_ull_guided_next (unsigned long long *, unsigned long long *); #endif /* ordered.c */ extern void gomp_ordered_first (void); extern void gomp_ordered_last (void); extern void gomp_ordered_next (void); extern void gomp_ordered_static_init (void); extern void gomp_ordered_static_next (void); extern void gomp_ordered_sync (void); extern void gomp_doacross_init (unsigned, long *, long); extern void gomp_doacross_ull_init (unsigned, unsigned long long *, unsigned long long); /* parallel.c */ extern unsigned gomp_resolve_num_threads (unsigned, unsigned); /* proc.c (in config/) */ extern int gomp_parse_cpuinfo(void); extern void gomp_init_num_threads (void); extern unsigned gomp_dynamic_max_threads (void); /* task.c */ extern void gomp_init_task (struct gomp_task *, struct gomp_task *, struct gomp_task_icv *); extern void gomp_end_task (void); extern void gomp_barrier_handle_tasks (gomp_barrier_state_t); extern void gomp_task_maybe_wait_for_dependencies (void **); extern bool gomp_create_target_task (struct gomp_device_descr *, void (*) (void *), size_t, void **, size_t *, unsigned short *, unsigned int, void **, void **, enum gomp_target_task_state); static void inline gomp_finish_task (struct gomp_task *task) { if (__builtin_expect (task->depend_hash != NULL, 0)) free (task->depend_hash); } /* team.c */ extern struct gomp_team *gomp_new_team (unsigned); extern void gomp_team_start (void (*) (void *), void *, unsigned, unsigned, struct gomp_team *); extern void gomp_team_end (void); extern void gomp_free_thread (void *); /* target.c */ extern void gomp_init_targets_once (void); extern int gomp_get_num_devices (void); extern bool gomp_target_task_fn (void *); /* Splay tree definitions. */ typedef struct splay_tree_node_s *splay_tree_node; typedef struct splay_tree_s *splay_tree; typedef struct splay_tree_key_s *splay_tree_key; struct target_var_desc { /* Splay key. */ splay_tree_key key; /* True if data should be copied from device to host at the end. */ bool copy_from; /* True if data always should be copied from device to host at the end. */ bool always_copy_from; /* Relative offset against key host_start. */ uintptr_t offset; /* Actual length. */ uintptr_t length; }; struct target_mem_desc { /* Reference count. */ uintptr_t refcount; /* All the splay nodes allocated together. */ splay_tree_node array; /* Start of the target region. */ uintptr_t tgt_start; /* End of the targer region. */ uintptr_t tgt_end; /* Handle to free. */ void *to_free; /* Previous target_mem_desc. */ struct target_mem_desc *prev; /* Number of items in following list. */ size_t list_count; /* Corresponding target device descriptor. */ struct gomp_device_descr *device_descr; /* List of target items to remove (or decrease refcount) at the end of region. */ struct target_var_desc list[]; }; /* Special value for refcount - infinity. */ #define REFCOUNT_INFINITY (~(uintptr_t) 0) /* Special value for refcount - tgt_offset contains target address of the artificial pointer to "omp declare target link" object. */ #define REFCOUNT_LINK (~(uintptr_t) 1) struct splay_tree_key_s { /* Address of the host object. */ uintptr_t host_start; /* Address immediately after the host object. */ uintptr_t host_end; /* Descriptor of the target memory. */ struct target_mem_desc *tgt; /* Offset from tgt->tgt_start to the start of the target object. */ uintptr_t tgt_offset; /* Reference count. */ uintptr_t refcount; /* Pointer to the original mapping of "omp declare target link" object. */ splay_tree_key link_key; }; /* The comparison function. */ static inline int splay_compare (splay_tree_key x, splay_tree_key y) { if (x->host_start == x->host_end && y->host_start == y->host_end) return 0; if (x->host_end <= y->host_start) return -1; if (x->host_start >= y->host_end) return 1; return 0; } #include "splay-tree.h" typedef struct acc_dispatch_t { /* This is a linked list of data mapped using the acc_map_data/acc_unmap_data or "acc enter data"/"acc exit data" pragmas. Unlike mapped_data in the goacc_thread struct, unmapping can happen out-of-order with respect to mapping. */ /* This is guarded by the lock in the "outer" struct gomp_device_descr. */ struct target_mem_desc *data_environ; /* Execute. */ __typeof (GOMP_OFFLOAD_openacc_exec) *exec_func; /* Async cleanup callback registration. */ __typeof (GOMP_OFFLOAD_openacc_register_async_cleanup) *register_async_cleanup_func; /* Asynchronous routines. */ __typeof (GOMP_OFFLOAD_openacc_async_test) *async_test_func; __typeof (GOMP_OFFLOAD_openacc_async_test_all) *async_test_all_func; __typeof (GOMP_OFFLOAD_openacc_async_wait) *async_wait_func; __typeof (GOMP_OFFLOAD_openacc_async_wait_async) *async_wait_async_func; __typeof (GOMP_OFFLOAD_openacc_async_wait_all) *async_wait_all_func; __typeof (GOMP_OFFLOAD_openacc_async_wait_all_async) *async_wait_all_async_func; __typeof (GOMP_OFFLOAD_openacc_async_set_async) *async_set_async_func; /* Create/destroy TLS data. */ __typeof (GOMP_OFFLOAD_openacc_create_thread_data) *create_thread_data_func; __typeof (GOMP_OFFLOAD_openacc_destroy_thread_data) *destroy_thread_data_func; /* NVIDIA target specific routines. */ struct { __typeof (GOMP_OFFLOAD_openacc_cuda_get_current_device) *get_current_device_func; __typeof (GOMP_OFFLOAD_openacc_cuda_get_current_context) *get_current_context_func; __typeof (GOMP_OFFLOAD_openacc_cuda_get_stream) *get_stream_func; __typeof (GOMP_OFFLOAD_openacc_cuda_set_stream) *set_stream_func; } cuda; } acc_dispatch_t; /* Various state of the accelerator device. */ enum gomp_device_state { GOMP_DEVICE_UNINITIALIZED, GOMP_DEVICE_INITIALIZED, GOMP_DEVICE_FINALIZED }; /* This structure describes accelerator device. It contains name of the corresponding libgomp plugin, function handlers for interaction with the device, ID-number of the device, and information about mapped memory. */ struct gomp_device_descr { /* Immutable data, which is only set during initialization, and which is not guarded by the lock. */ /* The name of the device. */ const char *name; /* Capabilities of device (supports OpenACC, OpenMP). */ unsigned int capabilities; /* This is the ID number of device among devices of the same type. */ int target_id; /* This is the TYPE of device. */ enum offload_target_type type; /* Function handlers. */ __typeof (GOMP_OFFLOAD_get_name) *get_name_func; __typeof (GOMP_OFFLOAD_get_caps) *get_caps_func; __typeof (GOMP_OFFLOAD_get_type) *get_type_func; __typeof (GOMP_OFFLOAD_get_num_devices) *get_num_devices_func; __typeof (GOMP_OFFLOAD_init_device) *init_device_func; __typeof (GOMP_OFFLOAD_fini_device) *fini_device_func; __typeof (GOMP_OFFLOAD_version) *version_func; __typeof (GOMP_OFFLOAD_load_image) *load_image_func; __typeof (GOMP_OFFLOAD_unload_image) *unload_image_func; __typeof (GOMP_OFFLOAD_alloc) *alloc_func; __typeof (GOMP_OFFLOAD_free) *free_func; __typeof (GOMP_OFFLOAD_dev2host) *dev2host_func; __typeof (GOMP_OFFLOAD_host2dev) *host2dev_func; __typeof (GOMP_OFFLOAD_dev2dev) *dev2dev_func; __typeof (GOMP_OFFLOAD_can_run) *can_run_func; __typeof (GOMP_OFFLOAD_run) *run_func; __typeof (GOMP_OFFLOAD_async_run) *async_run_func; /* Splay tree containing information about mapped memory regions. */ struct splay_tree_s mem_map; /* Mutex for the mutable data. */ gomp_mutex_t lock; /* Current state of the device. OpenACC allows to move from INITIALIZED state back to UNINITIALIZED state. OpenMP allows only to move from INITIALIZED to FINALIZED state (at program shutdown). */ enum gomp_device_state state; /* OpenACC-specific data and functions. */ /* This is mutable because of its mutable data_environ and target_data members. */ acc_dispatch_t openacc; }; /* Kind of the pragma, for which gomp_map_vars () is called. */ enum gomp_map_vars_kind { GOMP_MAP_VARS_OPENACC, GOMP_MAP_VARS_TARGET, GOMP_MAP_VARS_DATA, GOMP_MAP_VARS_ENTER_DATA }; extern void gomp_acc_insert_pointer (size_t, void **, size_t *, void *); extern void gomp_acc_remove_pointer (void *, bool, int, int); extern struct target_mem_desc *gomp_map_vars (struct gomp_device_descr *, size_t, void **, void **, size_t *, void *, bool, enum gomp_map_vars_kind); extern void gomp_unmap_vars (struct target_mem_desc *, bool); extern void gomp_init_device (struct gomp_device_descr *); extern void gomp_free_memmap (struct splay_tree_s *); extern void gomp_unload_device (struct gomp_device_descr *); /* work.c */ extern void gomp_init_work_share (struct gomp_work_share *, bool, unsigned); extern void gomp_fini_work_share (struct gomp_work_share *); extern bool gomp_work_share_start (bool); extern void gomp_work_share_end (void); extern bool gomp_work_share_end_cancel (void); extern void gomp_work_share_end_nowait (void); static inline void gomp_work_share_init_done (void) { struct gomp_thread *thr = gomp_thread (); if (__builtin_expect (thr->ts.last_work_share != NULL, 1)) gomp_ptrlock_set (&thr->ts.last_work_share->next_ws, thr->ts.work_share); } #ifdef HAVE_ATTRIBUTE_VISIBILITY # pragma GCC visibility pop #endif /* Now that we're back to default visibility, include the globals. */ #include "libgomp_g.h" /* Include omp.h by parts. */ #include "omp-lock.h" #define _LIBGOMP_OMP_LOCK_DEFINED 1 #include "omp.h.in" #if !defined (HAVE_ATTRIBUTE_VISIBILITY) \ || !defined (HAVE_ATTRIBUTE_ALIAS) \ || !defined (HAVE_AS_SYMVER_DIRECTIVE) \ || !defined (PIC) \ || !defined (HAVE_SYMVER_SYMBOL_RENAMING_RUNTIME_SUPPORT) # undef LIBGOMP_GNU_SYMBOL_VERSIONING #endif #ifdef LIBGOMP_GNU_SYMBOL_VERSIONING extern void gomp_init_lock_30 (omp_lock_t *) __GOMP_NOTHROW; extern void gomp_destroy_lock_30 (omp_lock_t *) __GOMP_NOTHROW; extern void gomp_set_lock_30 (omp_lock_t *) __GOMP_NOTHROW; extern void gomp_unset_lock_30 (omp_lock_t *) __GOMP_NOTHROW; extern int gomp_test_lock_30 (omp_lock_t *) __GOMP_NOTHROW; extern void gomp_init_nest_lock_30 (omp_nest_lock_t *) __GOMP_NOTHROW; extern void gomp_destroy_nest_lock_30 (omp_nest_lock_t *) __GOMP_NOTHROW; extern void gomp_set_nest_lock_30 (omp_nest_lock_t *) __GOMP_NOTHROW; extern void gomp_unset_nest_lock_30 (omp_nest_lock_t *) __GOMP_NOTHROW; extern int gomp_test_nest_lock_30 (omp_nest_lock_t *) __GOMP_NOTHROW; extern void gomp_init_lock_25 (omp_lock_25_t *) __GOMP_NOTHROW; extern void gomp_destroy_lock_25 (omp_lock_25_t *) __GOMP_NOTHROW; extern void gomp_set_lock_25 (omp_lock_25_t *) __GOMP_NOTHROW; extern void gomp_unset_lock_25 (omp_lock_25_t *) __GOMP_NOTHROW; extern int gomp_test_lock_25 (omp_lock_25_t *) __GOMP_NOTHROW; extern void gomp_init_nest_lock_25 (omp_nest_lock_25_t *) __GOMP_NOTHROW; extern void gomp_destroy_nest_lock_25 (omp_nest_lock_25_t *) __GOMP_NOTHROW; extern void gomp_set_nest_lock_25 (omp_nest_lock_25_t *) __GOMP_NOTHROW; extern void gomp_unset_nest_lock_25 (omp_nest_lock_25_t *) __GOMP_NOTHROW; extern int gomp_test_nest_lock_25 (omp_nest_lock_25_t *) __GOMP_NOTHROW; # define strong_alias(fn, al) \ extern __typeof (fn) al __attribute__ ((alias (#fn))); # define omp_lock_symver(fn) \ __asm (".symver g" #fn "_30, " #fn "@@OMP_3.0"); \ __asm (".symver g" #fn "_25, " #fn "@OMP_1.0"); #else # define gomp_init_lock_30 omp_init_lock # define gomp_destroy_lock_30 omp_destroy_lock # define gomp_set_lock_30 omp_set_lock # define gomp_unset_lock_30 omp_unset_lock # define gomp_test_lock_30 omp_test_lock # define gomp_init_nest_lock_30 omp_init_nest_lock # define gomp_destroy_nest_lock_30 omp_destroy_nest_lock # define gomp_set_nest_lock_30 omp_set_nest_lock # define gomp_unset_nest_lock_30 omp_unset_nest_lock # define gomp_test_nest_lock_30 omp_test_nest_lock #endif #ifdef HAVE_ATTRIBUTE_VISIBILITY # define attribute_hidden __attribute__ ((visibility ("hidden"))) #else # define attribute_hidden #endif #ifdef HAVE_ATTRIBUTE_ALIAS # define ialias_ulp ialias_str1(__USER_LABEL_PREFIX__) # define ialias_str1(x) ialias_str2(x) # define ialias_str2(x) #x # define ialias(fn) \ extern __typeof (fn) gomp_ialias_##fn \ __attribute__ ((alias (#fn))) attribute_hidden; # define ialias_redirect(fn) \ extern __typeof (fn) fn __asm__ (ialias_ulp "gomp_ialias_" #fn) attribute_hidden; # define ialias_call(fn) gomp_ialias_ ## fn #else # define ialias(fn) # define ialias_redirect(fn) # define ialias_call(fn) fn #endif /* Helper function for priority_node_to_task() and task_to_priority_node(). Return the offset from a task to its priority_node entry. The priority_node entry is has a type of TYPE. */ static inline size_t priority_queue_offset (enum priority_queue_type type) { return offsetof (struct gomp_task, pnode[(int) type]); } /* Return the task associated with a priority NODE of type TYPE. */ static inline struct gomp_task * priority_node_to_task (enum priority_queue_type type, struct priority_node *node) { return (struct gomp_task *) ((char *) node - priority_queue_offset (type)); } /* Return the priority node of type TYPE for a given TASK. */ static inline struct priority_node * task_to_priority_node (enum priority_queue_type type, struct gomp_task *task) { return (struct priority_node *) ((char *) task + priority_queue_offset (type)); } #define NS( ts ) ((ts.tv_sec * 1000000000ULL) + ts.tv_nsec) #define ELAPSED( start, end ) (NS(end) - NS(start)) /* Time parallel sections & related statistics */ #define _TIME_PARALLEL 1 #if defined _TIME_PARALLEL || defined _TIME_BARRIER # include <time.h> # include <debug/log.h> #endif /* kmp.c */ extern float popcorn_probe_percent; /* hierarchy.c */ extern bool popcorn_log_statistics; extern size_t popcorn_max_probes; extern const char *popcorn_prime_region; extern int popcorn_preferred_node; extern void popcorn_init_workshare_cache(size_t); extern bool popcorn_distributed (); extern bool popcorn_finished (); extern bool popcorn_hybrid_barrier (); extern bool popcorn_hybrid_reduce (); extern bool popcorn_het_workshare (); extern void popcorn_set_distributed (bool); extern void popcorn_set_finished (bool); extern void popcorn_set_hybrid_barrier (bool); extern void popcorn_set_hybrid_reduce (bool); extern void popcorn_set_het_workshare (bool); extern void popcorn_get_page_faults (unsigned long long *, unsigned long long *); extern void hierarchy_hybrid_barrier_final (int, const char *); /* Shorthand to select between hierarchical & normal barriers */ static inline void gomp_team_barrier_wait_final_select (gomp_barrier_t *bar) { struct gomp_thread *thr; if (popcorn_hybrid_barrier()) { thr = gomp_thread (); hierarchy_hybrid_barrier_final (thr->popcorn_nid, "End parallel"); } else gomp_team_barrier_wait_final (bar); } static inline void gomp_simple_barrier_wait_select (gomp_simple_barrier_t *bar) { // TODO make hierarchical nospin if (popcorn_hybrid_barrier()) gomp_simple_barrier_wait_nospin (bar); else gomp_simple_barrier_wait (bar); } #endif /* LIBGOMP_H */
convolution_sgemm_pack4to8_fp16s.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void im2col_sgemm_pack4to8_fp16sa_neon(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { // Mat bottom_im2col(size, maxk, inch, 8u, 4, opt.workspace_allocator); const int size = bottom_im2col.w; const int maxk = bottom_im2col.h; const int inch = bottom_im2col.c; const int outch = top_blob.c; const __fp16* bias = _bias; // permute Mat tmp; if (size >= 8) tmp.create(8 * maxk, inch, size / 8 + size % 8, 8u, 4, opt.workspace_allocator); else tmp.create(maxk, inch, size, 8u, 4, opt.workspace_allocator); { int nn_size = size >> 3; int remain_size_start = 0; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 8; __fp16* tmpptr = tmp.channel(i / 8); for (int q = 0; q < inch; q++) { const __fp16* img0 = (const __fp16*)bottom_im2col.channel(q) + i * 4; for (int k = 0; k < maxk; k++) { // transpose 4x8 asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld4 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0] \n" "st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%1], #64 \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "v0", "v1", "v2", "v3"); img0 += size * 4; } } } remain_size_start += nn_size << 3; #pragma omp parallel for num_threads(opt.num_threads) for (int i = remain_size_start; i < size; i++) { __fp16* tmpptr = tmp.channel(i / 8 + i % 8); for (int q = 0; q < inch; q++) { const __fp16* img0 = (const __fp16*)bottom_im2col.channel(q) + i * 4; for (int k = 0; k < maxk; k++) { asm volatile( "prfm pldl1keep, [%0, #64] \n" "ld1 {v0.4h}, [%0] \n" "st1 {v0.4h}, [%1], #8 \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "v0"); img0 += size * 4; } } } } #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { __fp16* outptr0 = top_blob.channel(p); const __fp16 zeros[8] = {0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f}; const __fp16* biasptr = bias ? bias + p * 8 : zeros; int i = 0; for (; i + 7 < size; i += 8) { __fp16* tmpptr = tmp.channel(i / 8); const __fp16* kptr = kernel.channel(p); int nn = inch * maxk; // inch always > 0 asm volatile( "ld1 {v16.8h}, [%8] \n" "mov v17.16b, v16.16b \n" "mov v18.16b, v16.16b \n" "mov v19.16b, v16.16b \n" "mov v20.16b, v16.16b \n" "mov v21.16b, v16.16b \n" "mov v22.16b, v16.16b \n" "mov v23.16b, v16.16b \n" "0: \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%2], #64 \n" // r0123 "prfm pldl1keep, [%3, #512] \n" "ld1 {v8.8h, v9.8h, v10.8h, v11.8h}, [%3], #64 \n" // w0123 "fmla v16.8h, v8.8h, v0.h[0] \n" "fmla v17.8h, v8.8h, v0.h[1] \n" "fmla v18.8h, v8.8h, v0.h[2] \n" "fmla v19.8h, v8.8h, v0.h[3] \n" "fmla v20.8h, v8.8h, v0.h[4] \n" "fmla v21.8h, v8.8h, v0.h[5] \n" "fmla v22.8h, v8.8h, v0.h[6] \n" "fmla v23.8h, v8.8h, v0.h[7] \n" "fmla v16.8h, v9.8h, v1.h[0] \n" "fmla v17.8h, v9.8h, v1.h[1] \n" "fmla v18.8h, v9.8h, v1.h[2] \n" "fmla v19.8h, v9.8h, v1.h[3] \n" "fmla v20.8h, v9.8h, v1.h[4] \n" "fmla v21.8h, v9.8h, v1.h[5] \n" "fmla v22.8h, v9.8h, v1.h[6] \n" "fmla v23.8h, v9.8h, v1.h[7] \n" "fmla v16.8h, v10.8h, v2.h[0] \n" "fmla v17.8h, v10.8h, v2.h[1] \n" "fmla v18.8h, v10.8h, v2.h[2] \n" "fmla v19.8h, v10.8h, v2.h[3] \n" "fmla v20.8h, v10.8h, v2.h[4] \n" "fmla v21.8h, v10.8h, v2.h[5] \n" "fmla v22.8h, v10.8h, v2.h[6] \n" "fmla v23.8h, v10.8h, v2.h[7] \n" "fmla v16.8h, v11.8h, v3.h[0] \n" "fmla v17.8h, v11.8h, v3.h[1] \n" "fmla v18.8h, v11.8h, v3.h[2] \n" "fmla v19.8h, v11.8h, v3.h[3] \n" "fmla v20.8h, v11.8h, v3.h[4] \n" "fmla v21.8h, v11.8h, v3.h[5] \n" "fmla v22.8h, v11.8h, v3.h[6] \n" "fmla v23.8h, v11.8h, v3.h[7] \n" "subs %w0, %w0, #1 \n" "bne 0b \n" "st1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%1], #64 \n" "st1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%1], #64 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(tmpptr), // %2 "=r"(kptr) // %3 : "0"(nn), "1"(outptr0), "2"(tmpptr), "3"(kptr), "r"(biasptr) // %8 : "cc", "memory", "v0", "v1", "v2", "v3", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"); } for (; i < size; i++) { __fp16* tmpptr = tmp.channel(i / 8 + i % 8); const __fp16* kptr = kernel.channel(p); int nn = inch * maxk; // inch always > 0 float16x8_t _sum0 = vld1q_f16(biasptr); int q = 0; for (; q < nn; q++) { float16x4_t _r0 = vld1_f16(tmpptr); float16x8_t _k0 = vld1q_f16(kptr); float16x8_t _k1 = vld1q_f16(kptr + 8); float16x8_t _k2 = vld1q_f16(kptr + 16); float16x8_t _k3 = vld1q_f16(kptr + 24); _sum0 = vfmaq_lane_f16(_sum0, _k0, _r0, 0); _sum0 = vfmaq_lane_f16(_sum0, _k1, _r0, 1); _sum0 = vfmaq_lane_f16(_sum0, _k2, _r0, 2); _sum0 = vfmaq_lane_f16(_sum0, _k3, _r0, 3); kptr += 32; tmpptr += 4; } vst1q_f16(outptr0, _sum0); outptr0 += 8; } } } static void convolution_im2col_sgemm_transform_kernel_pack4to8_fp16sa_neon(const Mat& _kernel, Mat& kernel_tm, int inch, int outch, int kernel_w, int kernel_h) { const int maxk = kernel_w * kernel_h; // interleave // src = maxk-inch-outch // dst = 8b-4a-maxk-inch/4a-outch/8b Mat kernel = _kernel.reshape(maxk, inch, outch); kernel_tm.create(32 * maxk, inch / 4, outch / 8, (size_t)2u); for (int q = 0; q + 7 < outch; q += 8) { Mat g0 = kernel_tm.channel(q / 8); for (int p = 0; p + 3 < inch; p += 4) { __fp16* g00 = g0.row<__fp16>(p / 4); for (int k = 0; k < maxk; k++) { for (int i = 0; i < 4; i++) { for (int j = 0; j < 8; j++) { const float* k00 = kernel.channel(q + j).row(p + i); g00[0] = (__fp16)k00[k]; g00++; } } } } } } static void convolution_im2col_sgemm_pack4to8_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; const int size = outw * outh; const int maxk = kernel_w * kernel_h; // im2col Mat bottom_im2col(size, maxk, inch, 8u, 4, opt.workspace_allocator); { const int gap = (w * stride_h - outw * stride_w) * 4; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < inch; p++) { const Mat img = bottom_blob.channel(p); __fp16* ptr = bottom_im2col.channel(p); for (int u = 0; u < kernel_h; u++) { for (int v = 0; v < kernel_w; v++) { const __fp16* sptr = img.row<const __fp16>(dilation_h * u) + dilation_w * v * 4; for (int i = 0; i < outh; i++) { int j = 0; for (; j + 3 < outw; j += 4) { float16x4_t _val0 = vld1_f16(sptr); float16x4_t _val1 = vld1_f16(sptr + stride_w * 4); float16x4_t _val2 = vld1_f16(sptr + stride_w * 8); float16x4_t _val3 = vld1_f16(sptr + stride_w * 12); vst1_f16(ptr, _val0); vst1_f16(ptr + 4, _val1); vst1_f16(ptr + 8, _val2); vst1_f16(ptr + 12, _val3); sptr += stride_w * 16; ptr += 16; } for (; j + 1 < outw; j += 2) { float16x4_t _val0 = vld1_f16(sptr); float16x4_t _val1 = vld1_f16(sptr + stride_w * 4); vst1_f16(ptr, _val0); vst1_f16(ptr + 4, _val1); sptr += stride_w * 8; ptr += 8; } for (; j < outw; j++) { float16x4_t _val = vld1_f16(sptr); vst1_f16(ptr, _val); sptr += stride_w * 4; ptr += 4; } sptr += gap; } } } } } im2col_sgemm_pack4to8_fp16sa_neon(bottom_im2col, top_blob, kernel, _bias, opt); }
Example_acquire_release_broke.4.c
/* * @@name: acquire_release.4.c * @@type: C * @@compilable: yes * @@linkable: yes * @@expect: success * @@version: omp_5.0 */ #include <stdio.h> #include <omp.h> int main() { // !!! THIS CODE WILL FAIL TO PRODUCE CONSISTENT RESULTS !!!!!!! // !!! DO NOT PROGRAM SYNCHRONIZATION THIS WAY !!!!!!! int x = 0, y; #pragma omp parallel num_threads(2) { int thrd = omp_get_thread_num(); if (thrd == 0) { #pragma omp critical { x = 10; } // an explicit flush directive that provides // release semantics is needed here // to complete the synchronization. #pragma omp atomic write y = 1; } else { int tmp = 0; while (tmp == 0) { #pragma omp atomic read acquire // or seq_cst tmp = y; } #pragma omp critical { printf("x = %d\n", x); } // !! NOT ALWAYS 10 } } return 0; }
omp_bug6.c
/****************************************************************************** * FILE: omp_bug6.c * DESCRIPTION: * This program compiles and runs fine, but produces the wrong result. * Compare to omp_orphan.c. * AUTHOR: Blaise Barney 6/05 * LAST REVISED: 06/30/05 ******************************************************************************/ #include <omp.h> #include <stdio.h> #include <stdlib.h> #define VECLEN 100 float a[VECLEN], b[VECLEN]; float dotprod () { int i,tid; float sum; tid = omp_get_thread_num(); #pragma omp for reduction(+:sum) for (i=0; i < VECLEN; i++) { sum = sum + (a[i]*b[i]); printf(" tid= %d i=%d\n",tid,i); } } int main (int argc, char *argv[]) { int i; float sum; for (i=0; i < VECLEN; i++) a[i] = b[i] = 1.0 * i; sum = 0.0; #pragma omp parallel shared(sum) dotprod(); printf("Sum = %f\n",sum); }
timer.h
#ifndef SPLATT_TIMER_H #define SPLATT_TIMER_H /****************************************************************************** * INCLUDES *****************************************************************************/ #include <time.h> #include <stddef.h> #include <stdbool.h> #ifdef __MACH__ #include <mach/mach.h> #include <mach/mach_time.h> #endif /****************************************************************************** * STRUCTURES *****************************************************************************/ /** * @brief Represents a wall-clock timer. */ typedef struct { bool running; double seconds; double start; double stop; } sp_timer_t; /** * @brief timer_id provides easy indexing into timers[]. */ typedef enum { TIMER_LVL0, /* LEVEL 0 */ TIMER_ALL, TIMER_CPD, TIMER_REORDER, TIMER_CONVERT, TIMER_LVL1, /* LEVEL 1 */ TIMER_MTTKRP, TIMER_INV, TIMER_FIT, TIMER_MATMUL, TIMER_ATA, TIMER_MATNORM, TIMER_IO, TIMER_PART, TIMER_LVL2, /* LEVEL 2 */ #ifdef SPLATT_USE_MPI TIMER_MPI, TIMER_MPI_IDLE, TIMER_MPI_COMM, TIMER_MPI_ATA, TIMER_MPI_REDUCE, TIMER_MPI_PARTIALS, TIMER_MPI_NORM, TIMER_MPI_UPDATE, TIMER_MPI_FIT, /* timer max */ TIMER_MTTKRP_MAX, TIMER_MPI_MAX, TIMER_MPI_IDLE_MAX, TIMER_MPI_COMM_MAX, #endif TIMER_SPLATT, TIMER_GIGA, TIMER_DFACTO, TIMER_TTBOX, TIMER_SORT, TIMER_TILE, TIMER_MISC, TIMER_NTIMERS /* LEVEL N */ } timer_id; /* globals */ int timer_lvl; sp_timer_t timers[TIMER_NTIMERS]; /****************************************************************************** * PUBLIC FUNCTIONS *****************************************************************************/ #define init_timers splatt_init_timers /** * @brief Call timer_reset() on all of timers[]. */ void init_timers(void); #define report_times splatt_report_times /** * @brief Output a summary of all used timers. */ void report_times(void); #define timer_inc_verbose splatt_timer_inc_verbose /** * @brief Increment timer verbosity to the next level; */ void timer_inc_verbose(void); /** * @brief Return the number of seconds since an unspecified time (e.g., Unix * epoch). This is accomplished with a high-resolution monotonic timer, * suitable for performance timing. * * @return The number of seconds. */ static inline double monotonic_seconds() { #ifdef __MACH__ /* OSX */ static mach_timebase_info_data_t info; static double seconds_per_unit; if(seconds_per_unit == 0) { #pragma omp critical { mach_timebase_info(&info); seconds_per_unit = (info.numer / info.denom) / 1e9; } } return seconds_per_unit * mach_absolute_time(); #else /* Linux systems */ struct timespec ts; clock_gettime(CLOCK_MONOTONIC, &ts); return ts.tv_sec + ts.tv_nsec * 1e-9; #endif } /** * @brief Reset all fields of a sp_timer_t. * * @param timer The timer to reset. */ static inline void timer_reset(sp_timer_t * const timer) { timer->running = false; timer->seconds = 0; timer->start = 0; timer->stop = 0; } /** * @brief Start a sp_timer_t. NOTE: this does not reset the timer. * * @param timer The timer to start. */ static inline void timer_start(sp_timer_t * const timer) { if(!timer->running) { timer->running = true; timer->start = monotonic_seconds(); } } /** * @brief Stop a sp_timer_t and update its time. * * @param timer The timer to stop. */ static inline void timer_stop(sp_timer_t * const timer) { timer->running = false; timer->stop = monotonic_seconds(); timer->seconds += timer->stop - timer->start; } /** * @brief Give a sp_timer_t a fresh start by resetting and starting it. * * @param timer The timer to refresh. */ static inline void timer_fstart(sp_timer_t * const timer) { timer_reset(timer); timer_start(timer); } #endif
GB_unaryop__identity_uint32_int16.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_uint32_int16 // op(A') function: GB_tran__identity_uint32_int16 // C type: uint32_t // A type: int16_t // cast: uint32_t cij = (uint32_t) aij // unaryop: cij = aij #define GB_ATYPE \ int16_t #define GB_CTYPE \ uint32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, aij) \ uint32_t z = (uint32_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT32 || GxB_NO_INT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_uint32_int16 ( uint32_t *Cx, // Cx and Ax may be aliased int16_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_uint32_int16 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
maxwell_zeroBC.c
/****************************************************************************** * Copyright (c) 1998 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ #include "_hypre_sstruct_ls.h" HYPRE_Int hypre_ParVectorZeroBCValues(hypre_ParVector *v, HYPRE_Int *rows, HYPRE_Int nrows) { HYPRE_Int ierr = 0; hypre_Vector *v_local = hypre_ParVectorLocalVector(v); hypre_SeqVectorZeroBCValues(v_local, rows, nrows); return ierr; } HYPRE_Int hypre_SeqVectorZeroBCValues(hypre_Vector *v, HYPRE_Int *rows, HYPRE_Int nrows) { HYPRE_Real *vector_data = hypre_VectorData(v); HYPRE_Int i; HYPRE_Int ierr = 0; #if defined(HYPRE_USING_OPENMP) #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < nrows; i++) { vector_data[rows[i]] = 0.0; } return ierr; }
GB_unop__trunc_fc64_fc64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__trunc_fc64_fc64 // op(A') function: GB_unop_tran__trunc_fc64_fc64 // C type: GxB_FC64_t // A type: GxB_FC64_t // cast: GxB_FC64_t cij = aij // unaryop: cij = GB_ctrunc (aij) #define GB_ATYPE \ GxB_FC64_t #define GB_CTYPE \ GxB_FC64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_ctrunc (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC64_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC64_t z = aij ; \ Cx [pC] = GB_ctrunc (z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_TRUNC || GxB_NO_FC64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__trunc_fc64_fc64 ( GxB_FC64_t *Cx, // Cx and Ax may be aliased const GxB_FC64_t *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC64_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = aij ; Cx [p] = GB_ctrunc (z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = aij ; Cx [p] = GB_ctrunc (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__trunc_fc64_fc64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
DRB103-master-orig-no.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* A master directive is used to protect memory accesses. */ #include <omp.h> #include <stdio.h> int main() { int k; #pragma omp parallel { #pragma omp master { k = omp_get_num_threads(); printf ("Number of Threads requested = %i\n",k); } } return 0; }
mat_mul_p4a_8000.c
/* * file for mat_mul.c */ #include "./mat_mul.h" #include "./size.h" void mat_mul(int *a, int *b, int *c); void mat_mul(int *a, int *b, int *c) { int i, j, k, t; #pragma omp parallel for private(j, t, k) for(i = 0; i <= 7999; i += 1) for(j = 0; j <= 7999; j += 1) { c[i*8000+j] = 0; for(k = 0; k <= 7999; k += 1) for(t = 0; t <= 99; t += 1) c[i*8000+j] += a[i*8000+k]*b[j*8000+k]; } return; }
bli_axpyv_bgq_int.c
/* BLIS An object-based framework for developing high-performance BLAS-like libraries. Copyright (C) 2014, The University of Texas at Austin Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. - Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - Neither the name(s) of the copyright holder(s) nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "blis.h" void bli_daxpyv_bgq_int ( conj_t conjx, dim_t n, double* restrict alpha, double* restrict x, inc_t incx, double* restrict y, inc_t incy, cntx_t* cntx ) { if ( bli_zero_dim1( n ) ) return; // If there is anything that would interfere with our use of aligned // vector loads/stores, call the reference implementation. bool use_ref = FALSE; if ( incx != 1 || incy != 1 || bli_is_unaligned_to( ( siz_t )x, 32 ) || bli_is_unaligned_to( ( siz_t )y, 32 ) ) { use_ref = TRUE; } // Call the reference implementation if needed. if ( use_ref == TRUE ) { BLIS_DAXPYV_KERNEL_REF( conjx, n, alpha, x, incx, y, incy, cntx ); return; } dim_t n_run = n / 4; dim_t n_left = n % 4; vector4double xv, yv, zv; vector4double alphav = vec_lds( 0 * sizeof(double), (double*)alpha ); #pragma omp parallel for for ( dim_t i = 0; i < n_run; i++ ) { xv = vec_lda( 0 * sizeof(double), &x[i*4] ); yv = vec_lda( 0 * sizeof(double), &y[i*4] ); zv = vec_madd( alphav, xv, yv ); vec_sta( zv, 0 * sizeof(double), &y[i*4] ); } for ( dim_t i = 0; i < n_left; i++ ) { y[4*n_run + i] += *alpha * x[4*n_run + i]; } }
Interp1PrimThirdOrderMUSCLChar.c
/*! @file Interp1PrimThirdOrderMUSCLChar.c @author Debojyoti Ghosh @brief Characteristic-based 3rd-order MUSCL scheme with Koren's limiter */ #include <stdio.h> #include <stdlib.h> #include <basic.h> #include <arrayfunctions.h> #include <mathfunctions.h> #include <interpolation.h> #include <mpivars.h> #include <hypar.h> #ifdef with_omp #include <omp.h> #endif #undef _MINIMUM_GHOSTS_ /*! \def _MINIMUM_GHOSTS_ * Minimum number of ghost points required for this interpolation * method. */ #define _MINIMUM_GHOSTS_ 3 /*! @brief 3rd order MUSCL scheme with Koren's limiter (characteristic-based) on a uniform grid Computes the interpolated values of the first primitive of a function \f${\bf f}\left({\bf u}\right)\f$ at the interfaces from the cell-centered values of the function using the 3rd order MUSCL scheme with Koren's limiter on a uniform grid. The first primitive is defined as a function \f${\bf h}\left({\bf u}\right)\f$ that satisfies: \f{equation}{ {\bf f}\left({\bf u}\left(x\right)\right) = \frac{1}{\Delta x} \int_{x-\Delta x/2}^{x+\Delta x/2} {\bf h}\left({\bf u}\left(\zeta\right)\right)d\zeta, \f} where \f$x\f$ is the spatial coordinate along the dimension of the interpolation. This function computes numerical approximation \f$\hat{\bf f}_{j+1/2} \approx {\bf h}_{j+1/2}\f$ as: using the 3rd order MUSCL scheme with Koren's limiter as follows: \f{equation}{ \hat{\alpha}^k_{j+1/2} = {\alpha}^k_{j-1} + \phi \left[\frac{1}{3}\left({\alpha}^k_j-{\alpha}^k_{j-1}\right) + \frac{1}{6}\left({\alpha}^k_{j-1}-{\alpha}^k_{j-2}\right)\right] \f} where \f{equation}{ \phi = \frac {3\left({\alpha}^k_j-{\alpha}^k_{j-1}\right)\left({\alpha}^k_{j-1}-{\alpha}^k_{j-2}\right) + \epsilon} {2\left[\left({\alpha}^k_j-{\alpha}^k_{j-1}\right)-\left({\alpha}^k_{j-1}-{\alpha}^k_{j-2}\right)\right]^2 + 3\left({\alpha}^k_j-{\alpha}^k_{j-1}\right)\left({\alpha}^k_{j-1}-{\alpha}^k_{j-2}\right) + \epsilon}, \f} \f$\epsilon\f$ is a small constant (typically \f$10^{-3}\f$), and \f{equation}{ \alpha^k = {\bf l}_k \cdot {\bf f},\ k=1,\cdots,n \f} is the \f$k\f$-th characteristic quantity, and \f${\bf l}_k\f$ is the \f$k\f$-th left eigenvector, \f${\bf r}_k\f$ is the \f$k\f$-th right eigenvector, and \f$n\f$ is #HyPar::nvars. The final interpolated function is computed from the interpolated characteristic quantities as: \f{equation}{ \hat{\bf f}_{j+1/2} = \sum_{k=1}^n \alpha^k_{j+1/2} {\bf r}_k \f} \b Implementation \b Notes: + This method assumes a uniform grid in the spatial dimension corresponding to the interpolation. + The method described above corresponds to a left-biased interpolation. The corresponding right-biased interpolation can be obtained by reflecting the equations about interface j+1/2. + The left and right eigenvectors are computed at an averaged quantity at j+1/2. Thus, this function requires functions to compute the average state, and the left and right eigenvectors. These are provided by the physical model through - #HyPar::GetLeftEigenvectors() - #HyPar::GetRightEigenvectors() - #HyPar::AveragingFunction() If these functions are not provided by the physical model, then a characteristic-based interpolation cannot be used. + The function computes the interpolant for the entire grid in one call. It loops over all the grid lines along the interpolation direction and carries out the 1D interpolation along these grid lines. + Location of cell-centers and cell interfaces along the spatial dimension of the interpolation is shown in the following figure: @image html chap1_1Ddomain.png @image latex chap1_1Ddomain.eps width=0.9\textwidth \b Function \b arguments: Argument | Type | Explanation --------- | --------- | --------------------------------------------- fI | double* | Array to hold the computed interpolant at the grid interfaces. This array must have the same layout as the solution, but with \b no \b ghost \b points. Its size should be the same as u in all dimensions, except dir (the dimension along which to interpolate) along which it should be larger by 1 (number of interfaces is 1 more than the number of interior cell centers). fC | double* | Array with the cell-centered values of the flux function \f${\bf f}\left({\bf u}\right)\f$. This array must have the same layout and size as the solution, \b with \b ghost \b points. u | double* | The solution array \f${\bf u}\f$ (with ghost points). If the interpolation is characteristic based, this is needed to compute the eigendecomposition. For a multidimensional problem, the layout is as follows: u is a contiguous 1D array of size (nvars*dim[0]*dim[1]*...*dim[D-1]) corresponding to the multi-dimensional solution, with the following ordering - nvars, dim[0], dim[1], ..., dim[D-1], where nvars is the number of solution components (#HyPar::nvars), dim is the local size (#HyPar::dim_local), D is the number of spatial dimensions. x | double* | The grid array (with ghost points). This is used only by non-uniform-grid interpolation methods. For multidimensional problems, the layout is as follows: x is a contiguous 1D array of size (dim[0]+dim[1]+...+dim[D-1]), with the spatial coordinates along dim[0] stored from 0,...,dim[0]-1, the spatial coordinates along dim[1] stored along dim[0],...,dim[0]+dim[1]-1, and so forth. upw | int | Upwinding direction: if positive, a left-biased interpolant will be computed; if negative, a right-biased interpolant will be computed. If the interpolation method is central, then this has no effect. dir | int | Spatial dimension along which to interpolate (eg: 0 for 1D; 0 or 1 for 2D; 0,1 or 2 for 3D) s | void* | Solver object of type #HyPar: the following variables are needed - #HyPar::ghosts, #HyPar::ndims, #HyPar::nvars, #HyPar::dim_local. m | void* | MPI object of type #MPIVariables: this is needed only by compact interpolation method that need to solve a global implicit system across MPI ranks. uflag | int | A flag indicating if the function being interpolated \f${\bf f}\f$ is the solution itself \f${\bf u}\f$ (if 1, \f${\bf f}\left({\bf u}\right) \equiv {\bf u}\f$). Reference: + van Leer, B., Towards the Ultimate Conservative Difference Scheme. 2: Monotonicity and Conservation Combined in a Second-Order Scheme, J. of Comput. Phys., 14 (4), 1974, pp.361-370, http://dx.doi.org/10.1016/0021-9991(74)90019-9 + Koren, B., A Robust Upwind Discretization Method for Advection, Diffusion and Source Terms, Centrum voor Wiskunde en Informatica, Amsterdam, 1993 */ int Interp1PrimThirdOrderMUSCLChar( double *fI, /*!< Array of interpolated function values at the interfaces */ double *fC, /*!< Array of cell-centered values of the function \f${\bf f}\left({\bf u}\right)\f$ */ double *u, /*!< Array of cell-centered values of the solution \f${\bf u}\f$ */ double *x, /*!< Grid coordinates */ int upw, /*!< Upwind direction (left or right biased) */ int dir, /*!< Spatial dimension along which to interpolation */ void *s, /*!< Object of type #HyPar containing solver-related variables */ void *m, /*!< Object of type #MPIVariables containing MPI-related variables */ int uflag /*!< Flag to indicate if \f$f(u) \equiv u\f$, i.e, if the solution is being reconstructed */ ) { HyPar *solver = (HyPar*) s; MUSCLParameters *muscl = (MUSCLParameters*) solver->interp; int i, k, v; _DECLARE_IERR_; int ghosts = solver->ghosts; int ndims = solver->ndims; int nvars = solver->nvars; int *dim = solver->dim_local; /* define some constants */ double one_third = 1.0/3.0; double one_sixth = 1.0/6.0; /* create index and bounds for the outer loop, i.e., to loop over all 1D lines along dimension "dir" */ int indexC[ndims], indexI[ndims], index_outer[ndims], bounds_outer[ndims], bounds_inter[ndims]; _ArrayCopy1D_(dim,bounds_outer,ndims); bounds_outer[dir] = 1; _ArrayCopy1D_(dim,bounds_inter,ndims); bounds_inter[dir] += 1; int N_outer; _ArrayProduct1D_(bounds_outer,ndims,N_outer); /* allocate arrays for the averaged state, eigenvectors and characteristic interpolated f */ double R[nvars*nvars], L[nvars*nvars], uavg[nvars], fchar[nvars]; if (upw > 0) { #pragma omp parallel for schedule(auto) default(shared) private(i,k,v,R,L,uavg,fchar,index_outer,indexC,indexI) for (i=0; i<N_outer; i++) { _ArrayIndexnD_(ndims,i,bounds_outer,index_outer,0); _ArrayCopy1D_(index_outer,indexC,ndims); _ArrayCopy1D_(index_outer,indexI,ndims); for (indexI[dir] = 0; indexI[dir] < dim[dir]+1; indexI[dir]++) { /* 1D indices of the stencil grid points */ int qm1,qm2,qp1; indexC[dir] = indexI[dir]-2; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qm2); indexC[dir] = indexI[dir]-1; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qm1); indexC[dir] = indexI[dir] ; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qp1); int p; /* 1D index of the interface */ _ArrayIndex1D_(ndims,bounds_inter,indexI,0,p); /* find averaged state at this interface */ IERR solver->AveragingFunction(uavg,&u[nvars*qm1],&u[nvars*qp1],solver->physics); CHECKERR(ierr); /* Get the left and right eigenvectors */ IERR solver->GetLeftEigenvectors (uavg,L,solver->physics,dir); CHECKERR(ierr); IERR solver->GetRightEigenvectors (uavg,R,solver->physics,dir); CHECKERR(ierr); /* For each characteristic field */ for (v = 0; v < nvars; v++) { /* calculate the characteristic flux components along this characteristic */ double m2, m1, p1; m2 = m1 = p1 = 0; for (k = 0; k < nvars; k++) { m2 += L[v*nvars+k] * fC[qm2*nvars+k]; m1 += L[v*nvars+k] * fC[qm1*nvars+k]; p1 += L[v*nvars+k] * fC[qp1*nvars+k]; } double fdiff = p1 - m1; double bdiff = m1 - m2; double limit = (3*fdiff*bdiff + muscl->eps) / (2*(fdiff-bdiff)*(fdiff-bdiff) + 3*fdiff*bdiff + muscl->eps); fchar[v] = m1 + limit * (one_third*fdiff + one_sixth*bdiff); } /* calculate the interface u from the characteristic u */ IERR MatVecMult(nvars,(fI+nvars*p),R,fchar); CHECKERR(ierr); } } } else { #pragma omp parallel for schedule(auto) default(shared) private(i,k,v,R,L,uavg,fchar,index_outer,indexC,indexI) for (i=0; i<N_outer; i++) { _ArrayIndexnD_(ndims,i,bounds_outer,index_outer,0); _ArrayCopy1D_(index_outer,indexC,ndims); _ArrayCopy1D_(index_outer,indexI,ndims); for (indexI[dir] = 0; indexI[dir] < dim[dir]+1; indexI[dir]++) { /* 1D indices of the stencil grid points */ int qm1,qp1,qp2; indexC[dir] = indexI[dir]-1; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qm1); indexC[dir] = indexI[dir] ; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qp1); indexC[dir] = indexI[dir]+1; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qp2); int p; /* 1D index of the interface */ _ArrayIndex1D_(ndims,bounds_inter,indexI,0,p); /* find averaged state at this interface */ IERR solver->AveragingFunction(uavg,&u[nvars*qm1],&u[nvars*qp1],solver->physics); CHECKERR(ierr); /* Get the left and right eigenvectors */ IERR solver->GetLeftEigenvectors (uavg,L,solver->physics,dir); CHECKERR(ierr); IERR solver->GetRightEigenvectors (uavg,R,solver->physics,dir); CHECKERR(ierr); /* For each characteristic field */ for (v = 0; v < nvars; v++) { /* calculate the characteristic flux components along this characteristic */ double m1, p1, p2; m1 = p1 = p2 = 0; for (k = 0; k < nvars; k++) { m1 += L[v*nvars+k] * fC[qm1*nvars+k]; p1 += L[v*nvars+k] * fC[qp1*nvars+k]; p2 += L[v*nvars+k] * fC[qp2*nvars+k]; } double fdiff = p2 - p1; double bdiff = p1 - m1; double limit = (3*fdiff*bdiff + muscl->eps) / (2*(fdiff-bdiff)*(fdiff-bdiff) + 3*fdiff*bdiff + muscl->eps); fchar[v] = p1 - limit * (one_third*fdiff + one_sixth*bdiff); } /* calculate the interface u from the characteristic u */ IERR MatVecMult(nvars,(fI+nvars*p),R,fchar); CHECKERR(ierr); } } } return(0); }
2-2t.c
#include <stdio.h> #include <omp.h> int main() { omp_set_num_threads(2); #pragma omp parallel { printf(" Hello "); } printf("\n\n GoodBye – Team Destroyed – Exiting Program \n\n"); }
cg_aux.h
//MIT License // //Copyright (c) 2018 Sicong Zhuang // //Permission is hereby granted, free of charge, to any person obtaining a copy //of this software and associated documentation files (the "Software"), to deal //in the Software without restriction, including without limitation the rights //to use, copy, modify, merge, publish, distribute, sublicense, and/or sell //copies of the Software, and to permit persons to whom the Software is //furnished to do so, subject to the following conditions: // //The above copyright notice and this permission notice shall be included in all //copies or substantial portions of the Software. // //THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR //IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, //FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE //AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER //LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, //OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE //SOFTWARE. #ifndef __CG_AUX_H__ #define __CG_AUX_H__ #include <stdio.h> #include <stdlib.h> #include <assert.h> #include <string.h> #include "vector.h" #include "csparse.h" #include "hb_io.h" #define FP_SQRT sqrt #define FP_RAND drand48 #define FP_SEED srand48 #define FP_ABS fabs #define FP_EXP frexp #define FP_LOG10 log10 #define FP_POW pow #define FP_SCANSPEC scan_dconspec #ifdef INTEL_MKL #include "mkl.h" #define BLAS_cp(n, dx, incx, dy, incy) cblas_dcopy(n, dx, incx, dy, incy) #define BLAS_dot(n, dx, incx, dy, incy) cblas_ddot(n, dx, incx, dy, incy) #define BLAS_axpy(n, da, dx, incx, dy, incy) cblas_daxpy(n, da, dx, incx, dy, incy) #define SBLAS_csrmv(trans, m, n, alpha, matdescra, avval, avpos, avptr, avptr1, Bptr, beta, Cptr) \ mkl_dcsrmv(trans, &m, &n, &alpha, matdescra, avval, avpos, avptr, avptr1, Bptr, &beta, Cptr) #elif defined LAPACK #include "cblas.h" #define BLAS_cp(n, dx, incx, dy, incy) cblas_dcopy(n, dx, incx, dy, incy) #define BLAS_dot(n, dx, incx, dy, incy) cblas_ddot(n, dx, incx, dy, incy) #define BLAS_axpy(n, da, dx, incx, dy, incy) cblas_daxpy(n, da, dx, incx, dy, incy) #define SBLAS_csrmv(trans, m, n, alpha, matdescra, avval, avpos, avptr, avptr1, Bptr, beta, Cptr) \ manual_csrmv(trans, m, n, alpha, avval, avpos, avptr, Bptr, beta, Cptr) #endif typedef struct strhbmat { int m, n; int elemc; int *vptr; int *vpos; void *vval; int *vdiag; int *udiagc; int b; int type; struct strhbmat *orig; struct strhbmat *trans; struct strhbmat *hyper; int orig_row; int orig_col; int *e_tree; int FACT; /* * The following for hyper-matrix only */ int *vptr_pool; int *vpos_pool; void *vval_pool; int vptr_unit; int vpos_unit; int vval_unit; int vptr_pp; int vpos_pp; int vval_pp; // pthread_mutex_t* mtx; } hbmat_t; extern const char *scan_dconspec; extern const char *scan_sconspec; void hb_read_double(char *input_file, int *m, int *n, int *elemc, int **vptr, int **vpos, double **vval); void hb_reset(hbmat_t *A); void one2zero(hbmat_t* in_matrix); void hb_sym_expand(hbmat_t *A, hbmat_t *B); void hb_init_basic(hbmat_t *A, hbmat_t *B); void hb_free(hbmat_t *A); void* __hb2hbh_block(int I, int J, hbmat_t *A, int b, hbmat_t *Bp) ; hbmat_t* hb2hbh(hbmat_t *A, int b, int is_csr); void hb_sym_diag_block(hbmat_t *src_mat, int bsze, hbmat_t *diagb); int read_mm2dense(FILE *f, int m, int n, double *A); void print_dense2mm(FILE *f, const char *name, int m, int n, const double *A, int lda); void fprint_dense2mm(const char *fname, const char *name, int m, int n, const double *A, int lda); static inline void __attribute__((always_inline)) bblas_dcopy(int p, int bm, int bn, int m, int n, double *X, double *Y) { int i; for ( i=0; i<m; i+=bm ) { int cs = m - i; int c = cs < bm ? cs : bm; int j; for ( j=0; j<n; j+=bn ) { int ds = n - j; int d = ds < bn ? ds : bn; __t_copy(p, c, d, m, n, X, Y, j*m+i, j*m+i); // __t_copy(p, c, d, m, n, &X[j*m+i], &Y[j*m+i]); } } } static inline void __attribute__((always_inline)) hbsblas_dcsrmv(int p, int b, double alpha, hbmat_t *Ahbh, double *B, double beta, double *C) { int M = Ahbh->m; int N = Ahbh->n; int *vptr = Ahbh->vptr; int *vpos = Ahbh->vpos; hbmat_t **vval = Ahbh->vval; int offs = vptr[0] == 0 ? 0 : 1; //Detect zero/one based int cmaj = 1; char *trans = "N"; char *matdescra = "GLNC"; double fp_one = 1.0; int I; for ( I = 0; I < M; ++I ) { double *Cptr = &C[I*b]; int first = 1; int J; for ( J = vptr[I]; J < vptr[I+1]; J++ ) { hbmat_t *A = vval[J]; int icol = vpos[J]; double *Bptr = &B[icol*b]; double *avval = A->vval; int *avpos = A->vpos; int *avptr = A->vptr; int m = A->m; int n = A->n; if ( first ) { #pragma omp task in(B[icol*b:icol*b+n-1]) out(C[I*b:I*b+m-1]) no_copy_deps label(csrmv_hbh) priority(p) SBLAS_csrmv(trans, m, n, alpha, matdescra, avval, avpos, avptr, avptr+1, Bptr, beta, Cptr); // mkl_dcsrmv(trans, &m, &n, &alpha, matdescra, avval, avpos, avptr, avptr+1, Bptr, &beta, Cptr); first = 0; } else { #pragma omp task in(B[icol*b:icol*b+n-1]) out(C[I*b:I*b+m-1]) no_copy_deps label(csrmv_hbh) priority(p) // #pragma omp task in(B[icol*b;n]) out(C[I*b;m]) no_copy_deps label(csrmv_hbh) priority(p) SBLAS_csrmv(trans, m, n, alpha, matdescra, avval, avpos, avptr, avptr+1, Bptr, fp_one, Cptr); // mkl_dcsrmv(trans, &m, &n, &alpha, matdescra, avval, avpos, avptr, avptr+1, Bptr, &fp_one, Cptr); } } } } static inline void __attribute__((always_inline)) bsblas_dcholsolv2(int p, int b, int m, css **S, csn **N, double *B, double *x) { int idx; int i; for ( i = 0, idx = 0; i < m; i+=b, idx++) { int bs = b < m-i ? b : m-i; css *sptr = S[idx]; csn *nptr = N[idx]; double *bptr = &B[i]; double *xptr = &x[i]; #pragma omp task in(B[i:i+bs-1]) out(x[i:i+bs-1]) label(dcholsolv2) cs_cholsol2(bs, sptr, nptr, bptr, xptr); } } static inline void __attribute__((always_inline)) bsblas_dcholsolv2_seq(int p, int b, int m, css **S, csn **N, double *B, double *x) { int idx; int i; for ( i = 0, idx = 0; i < m; i+=b, idx++) { int bs = b < m-i ? b : m-i; css *sptr = S[idx]; csn *nptr = N[idx]; double *bptr = &B[i]; double *xptr = &x[i]; cs_cholsol2(bs, sptr, nptr, bptr, xptr); } } static inline void __attribute__((always_inline)) dcholsolv2_blk(int p, int m, css *S, csn *N, double *B, double *x) { #pragma omp task in(B[0:m-1]) out(x[0:m-1]) label(dcholsolv2_blk) cs_cholsol2(m, S, N, B, x); } static inline void __attribute__((always_inline)) dcholsolv2_nested(int p, int b, int m, css **S, csn **N, double *B, double *x) { #pragma omp task in(B[0:m-1]) out(x[0:m-1]) label(dcholsolv2_nested) { int idx; int i; for ( i = 0, idx = 0; i < m; i+=b, idx++) { int bs = b < m-i ? b : m-i; css *sptr = S[idx]; csn *nptr = N[idx]; double *bptr = &B[i]; double *xptr = &x[i]; #pragma omp task label(dcholsolv2_in) //in([bs]bptr) out([bs]xptr) label(dcholsolv2_in) cs_cholsol2(bs, sptr, nptr, bptr, xptr); } #pragma omp taskwait } } static inline void __attribute__((always_inline)) bblas_ddot(int p, int bm, int bn, int m, int n, double *X, double *Y, double *result) { int j; for ( j=0; j<n; j+=bn ) { int ds = n - j; int d = ds < bn ? ds : bn; int idx; int i; for ( i=0, idx=0; i<m; i+=bm, ++idx ) { int cs = m - i; int c = cs < bm ? cs : bm; __t_dot(p, c, d, m, n, X, Y, j*m+i, j*m+i, result); // __t_dot(p, c, d, m, n, &X[j*m+i], &Y[j*m+i], result); } result += bn; } } static inline void __attribute__((always_inline)) bblas_dcpaxpy_comb(int bm, int bn, int m, int n, double alpha, double *Anum, double *Aden, double *X1, double *X2, double *Y1, double *Y2, double *Z1, double *Z2) { int i; for ( i=0; i<m; i+=bm ) { int cs = m - i; int c = cs < bm ? cs : bm; int j; for ( j=0; j<n; j+=bn ) { int ds = n - j; int d = ds < bn ? ds : bn; __t_cpaxpy_comb(c, d, m, n, alpha, &Anum[j], &Aden[j], &X1[j*m+i], &X2[j*m+i], &Y1[j*m+i], &Y2[j*m+i], &Z1[j*m+i], &Z2[j*m+i]); } } } static inline void __attribute__((always_inline)) bblas_extm_daxpy(int p, int bm, int bn, int m, int n, double *SAnum, double *SAden, double *X, double *Y, double *Z) { int i; for ( i=0; i<m; i+=bm ) { int cs = m - i; int c = cs < bm ? cs : bm; int j; for ( j=0; j<n; j+=bn) { int ds = n - j; int d = ds < bn ? ds : bn; __t_extm_axpy(c, d, m, n, &SAnum[j], &SAden[j], &X[j*m+i], &Y[j*m+i], &Z[j*m+i], p); } } } static inline __attribute__((always_inline)) void cg_ddot2(int p, int bm, int bn, int m, int n, double *X, double *Y, double *result, double *A, double *B, double *result2) { int j; for ( j=0; j<n; j+=bn ) { int ds = n - j; int d = ds < bn ? ds : bn; int idx; int i; for ( i=0, idx=0; i<m; i+=bm, ++idx ) { int cs = m - i; int c = cs < bm ? cs : bm; _cg_dot2(p, c, d, m, n, X, Y, j*m+i, j*m+i, result, A, B, j*m+i, j*m+i, result2); } result += bn; result2 += bn; } } static inline void __attribute__((always_inline)) bblas_ddot_array(int p, int bm, int bn, int m, int n, double *X, double *Y, double *result) { int j; for ( j=0; j<n; j+=bn ) { int ds = n - j; int d = ds < bn ? ds : bn; int idx; int i; for ( i=0, idx=0; i<m; i+=bm, ++idx ) { int cs = m - i; int c = cs < bm ? cs : bm; __t_dot_array(p, c, d, m, n, X, Y, j*m+i, j*m+i, result, idx); } result += bn; } } static inline __attribute__((always_inline)) void cg_ddot2_array(int p, int bm, int bn, int m, int n, double *X, double *Y, double *result, double *A, double *B, double *result2) { int j; for ( j=0; j<n; j+=bn ) { int ds = n - j; int d = ds < bn ? ds : bn; int idx; int i; for (i=0, idx=0; i<m; i+=bm, idx++) { int cs = m - i; int c = cs < bm ? cs : bm; _cg_dot2_array(p, c, d, m, n, X, Y, j*m+i, j*m+i, result, idx, A, B, j*m+i, j*m+i, result2, idx); } result += bn; result2 += bn; } } #pragma omp task in(X[initx:initx+bm-1]) out(Y[inity:inity+bm-1]) priority(p) label(dcopy) no_copy_deps void __t_copy(int p, int bm, int bn, int m, int n, double *X, double *Y, int initx, int inity); #pragma omp task in(X[initx:initx+bm-1], Y[inity:inity+bm-1]) concurrent(result[0:bn-1]) no_copy_deps priority(p) label(ddot) void __t_dot(int p, int bm, int bn, int m, int n, double *X, double *Y, int initx, int inity, double *result); #pragma omp task in(X[initx:initx+bm-1], Y[inity:inity+bm-1]) concurrent(result[0:(m+bm-1)/bm-1]) no_copy_deps priority(p) label(ddot_array) void __t_dot_array(int p, int bm, int bn, int m, int n, double *X, double *Y, int initx, int inity, double *result, int initr); #pragma omp task in(X1[0:bm-1], X2[0:bm-1], Anum[0:bn-1], Aden[0:bn-1], Y1[0:bm-1], Y2[0:bm-1]) out(Z1[0:bm-1], Z2[0:bm-1]) no_copy_deps priority(1) label(dcpaxpy_comb) void __t_cpaxpy_comb(int bm, int bn, int m, int n, double alpha, double *Anum, double *Aden, double *X1, double *X2, double *Y1, double *Y2, double *Z1, double *Z2); #pragma omp task in(X[0:bm-1], Y[0:bm-1], SAnum[0:bn-1], SAden[0:bn-1]) out(Z[0:bm-1]) no_copy_deps priority(p) label(extm_axpy) void __t_extm_axpy(int bm, int bn, int m, int n, double *SAnum, double *SAden, double *X, double *Y, double *Z, int p); #pragma omp task in(X[initx:initx+bm-1], Y[inity:inity+bm-1], A[inita:inita+bm-1], B[initb:initb+bm-1]) concurrent([bn]result, [bn]result2) no_copy_deps priority(p) label(cg_dot2) void _cg_dot2(int p, int bm, int bn, int m, int n, double *X, double *Y, int initx, int inity, double *result, double *A, double *B, int inita, int initb, double *result2); #pragma omp task in(X[initx:initx+bm-1], Y[inity:inity+bm-1], A[inita:inita+bm-1], B[initb:initb+bm-1]) concurrent(result[0:(m+bm-1)/bm-1], result2[0:(m+bm-1)/bm-1]) no_copy_deps priority(p) label(ddot2_array) void _cg_dot2_array(int p, int bm, int bn, int m, int n, double *X, double *Y, int initx, int inity, double *result, int initr, double *A, double *B, int inita, int initb, double *result2, int initr2); void manual_csrmv(char *trans, int m, int n, double alpha, double *avval, int *avpos, int *avptr, double *Bptr, double beta, double *Cptr); #endif //__CG_AUX_H__
_uniform_ld.c
/* The batman package: fast computation of exoplanet transit light curves * Copyright (C) 2015 Laura Kreidberg * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION #include <Python.h> #include "numpy/arrayobject.h" #if defined (_OPENACC) && defined(__PGI) # include <accelmath.h> #else # include <math.h> #endif #if defined (_OPENMP) && !defined(_OPENACC) # include <omp.h> #endif #ifndef M_PI #define M_PI 3.14159265358979323846 #endif static PyObject *_uniform_ld(PyObject *self, PyObject *args) { int nthreads; double p; PyArrayObject *ds, *flux; npy_intp dims[1]; if(!PyArg_ParseTuple(args, "Odi", &ds, &p, &nthreads)) return NULL; //parses function input dims[0] = PyArray_DIMS(ds)[0]; flux = (PyArrayObject *) PyArray_SimpleNew(1, dims, PyArray_TYPE(ds)); //creates numpy array to store return flux values double *f_array = PyArray_DATA(flux); double *d_array = PyArray_DATA(ds); if(fabs(p - 0.5) < 1.e-3) p = 0.5; #if defined (_OPENMP) && !defined(_OPENACC) omp_set_num_threads(nthreads); //specifies number of threads (if OpenMP is supported) #endif #if defined (_OPENACC) #pragma acc parallel loop copyout(f_array[:dims[0]]) #elif defined (_OPENMP) #pragma omp parallel for #endif for(int i=0; i<dims[0]; i++) { double d = d_array[i]; // separation of centers if(d >= 1. + p) f_array[i] = 1.; //no overlap if(p >= 1. && d <= p - 1.) f_array[i] = 0.; //total eclipse of the star else if(d <= 1. - p) f_array[i] = 1. - p*p; //planet is fully in transit else //planet is crossing the limb { double kap1=acos(fmin((1. - p*p + d*d)/2./d, 1.)); double kap0=acos(fmin((p*p + d*d - 1.)/2./p/d, 1.)); f_array[i] = 1. - (p*p*kap0 + kap1 - 0.5*sqrt(fmax(4.*d*d - pow(1. + d*d - p*p, 2.), 0.)))/M_PI; } } return PyArray_Return((PyArrayObject *)flux); } static char _uniform_ld_doc[] = "This extension module returns a limb darkened light curve for a uniform stellar intensity profile."; static PyMethodDef _uniform_ld_methods[] = { {"_uniform_ld", _uniform_ld, METH_VARARGS, _uniform_ld_doc},{NULL}}; #if PY_MAJOR_VERSION >= 3 static struct PyModuleDef _uniform_ld_module = { PyModuleDef_HEAD_INIT, "_uniform_ld", _uniform_ld_doc, -1, _uniform_ld_methods }; PyMODINIT_FUNC PyInit__uniform_ld(void) { PyObject* module = PyModule_Create(&_uniform_ld_module); if(!module) { return NULL; } import_array(); return module; } #else void init_uniform_ld(void) { Py_InitModule("_uniform_ld", _uniform_ld_methods); import_array(); } #endif
convolutionbnrelu_1x1.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. #if __ARM_NEON #include <arm_neon.h> #endif // __ARM_NEON static void convbnrelu1x1s1_sgemm_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt, const Mat& a_data, const Mat& b_data) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int size = w * h; const float* bias = _bias; // interleave Mat tmp(8*4, inch/4+inch%4, size/8 + (size%8)/4 + size%4, 4u, opt.workspace_allocator); { int nn_size = size >> 3; int remain_size_start = nn_size << 3; #pragma omp parallel for num_threads(opt.num_threads) for (int ii=0; ii<nn_size; ii++) { int i = ii * 8; const float* img0 = bottom_blob.channel(0); img0 += i; float* tmpptr = tmp.channel(i/8); for (int q=0; q<inch; q++) { #if __ARM_NEON #if __aarch64__ vst1q_f32(tmpptr, vld1q_f32(img0)); vst1q_f32(tmpptr+4, vld1q_f32(img0+4)); tmpptr += 8; img0 += bottom_blob.cstep; #else asm volatile( "pld [%0, #256] \n" "vld1.f32 {d0-d3}, [%0 :128] \n" "vst1.f32 {d0-d3}, [%1 :128]! \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "q0", "q1" ); img0 += bottom_blob.cstep; #endif // __aarch64__ #else tmpptr[0] = img0[0]; tmpptr[1] = img0[1]; tmpptr[2] = img0[2]; tmpptr[3] = img0[3]; tmpptr[4] = img0[4]; tmpptr[5] = img0[5]; tmpptr[6] = img0[6]; tmpptr[7] = img0[7]; tmpptr += 8; img0 += bottom_blob.cstep; #endif // __ARM_NEON } } nn_size = (size - remain_size_start) >> 2; #pragma omp parallel for num_threads(opt.num_threads) for (int ii=0; ii<nn_size; ii++) { int i = remain_size_start + ii * 4; const float* img0 = bottom_blob.channel(0); img0 += i; float* tmpptr = tmp.channel(i/8 + (i%8)/4); for (int q=0; q<inch; q++) { #if __ARM_NEON #if __aarch64__ vst1q_f32(tmpptr, vld1q_f32(img0)); tmpptr += 4; img0 += bottom_blob.cstep; #else asm volatile( "pld [%0, #128] \n" "vld1.f32 {d0-d1}, [%0 :128] \n" "vst1.f32 {d0-d1}, [%1 :128]! \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "q0" ); img0 += bottom_blob.cstep; #endif // __aarch64__ #else tmpptr[0] = img0[0]; tmpptr[1] = img0[1]; tmpptr[2] = img0[2]; tmpptr[3] = img0[3]; tmpptr += 4; img0 += bottom_blob.cstep; #endif // __ARM_NEON } } remain_size_start += nn_size << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int i=remain_size_start; i<size; i++) { const float* img0 = bottom_blob.channel(0); img0 += i; float* tmpptr = tmp.channel(i/8 + (i%8)/4 + i%4); for (int q=0; q<inch; q++) { tmpptr[0] = img0[0]; tmpptr++; img0 += bottom_blob.cstep; } } } int nn_outch = 0; int remain_outch_start = 0; #if __ARM_NEON && __aarch64__ nn_outch = outch >> 3; remain_outch_start = nn_outch << 3; #pragma omp parallel for num_threads(opt.num_threads) for (int pp=0; pp<nn_outch; pp++) { int p = pp * 8; float* outptr0 = top_blob.channel(p); float* outptr1 = top_blob.channel(p+1); float* outptr2 = top_blob.channel(p+2); float* outptr3 = top_blob.channel(p+3); float* outptr4 = top_blob.channel(p+4); float* outptr5 = top_blob.channel(p+5); float* outptr6 = top_blob.channel(p+6); float* outptr7 = top_blob.channel(p+7); const float zeros[8] = {0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f}; const float* biasptr = bias ? bias + p : zeros; int i = 0; for (; i+7<size; i+=8) { const float* tmpptr = tmp.channel(i/8); const float* kptr = kernel.channel(p/8); asm volatile( "ld1 {v0.4s, v1.4s}, [%20] \n" "dup v16.4s, v0.s[0] \n" "dup v17.4s, v0.s[0] \n" "dup v18.4s, v0.s[1] \n" "dup v19.4s, v0.s[1] \n" "dup v20.4s, v0.s[2] \n" "dup v21.4s, v0.s[2] \n" "dup v22.4s, v0.s[3] \n" "dup v23.4s, v0.s[3] \n" "dup v24.4s, v1.s[0] \n" "dup v25.4s, v1.s[0] \n" "dup v26.4s, v1.s[1] \n" "dup v27.4s, v1.s[1] \n" "dup v28.4s, v1.s[2] \n" "dup v29.4s, v1.s[2] \n" "dup v30.4s, v1.s[3] \n" "dup v31.4s, v1.s[3] \n" // inch loop "lsr w4, %w21, #2 \n"// w4 = nn = inch >> 2 "cmp w4, #0 \n" "beq 1f \n" "0: \n" "prfm pldl1keep, [%8, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%8], #64 \n" "prfm pldl1keep, [%9, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%9], #64 \n" "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v18.4s, v8.4s, v0.s[1] \n" "fmla v20.4s, v8.4s, v0.s[2] \n" "fmla v22.4s, v8.4s, v0.s[3] \n" "fmla v17.4s, v9.4s, v0.s[0] \n" "fmla v19.4s, v9.4s, v0.s[1] \n" "fmla v21.4s, v9.4s, v0.s[2] \n" "fmla v23.4s, v9.4s, v0.s[3] \n" "fmla v24.4s, v8.4s, v1.s[0] \n" "fmla v26.4s, v8.4s, v1.s[1] \n" "fmla v28.4s, v8.4s, v1.s[2] \n" "fmla v30.4s, v8.4s, v1.s[3] \n" "fmla v25.4s, v9.4s, v1.s[0] \n" "fmla v27.4s, v9.4s, v1.s[1] \n" "fmla v29.4s, v9.4s, v1.s[2] \n" "fmla v31.4s, v9.4s, v1.s[3] \n" "prfm pldl1keep, [%8, #512] \n" "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%8], #64 \n" "fmla v16.4s, v10.4s, v2.s[0] \n" "fmla v18.4s, v10.4s, v2.s[1] \n" "fmla v20.4s, v10.4s, v2.s[2] \n" "fmla v22.4s, v10.4s, v2.s[3] \n" "fmla v17.4s, v11.4s, v2.s[0] \n" "fmla v19.4s, v11.4s, v2.s[1] \n" "fmla v21.4s, v11.4s, v2.s[2] \n" "fmla v23.4s, v11.4s, v2.s[3] \n" "fmla v24.4s, v10.4s, v3.s[0] \n" "fmla v26.4s, v10.4s, v3.s[1] \n" "fmla v28.4s, v10.4s, v3.s[2] \n" "fmla v30.4s, v10.4s, v3.s[3] \n" "fmla v25.4s, v11.4s, v3.s[0] \n" "fmla v27.4s, v11.4s, v3.s[1] \n" "fmla v29.4s, v11.4s, v3.s[2] \n" "fmla v31.4s, v11.4s, v3.s[3] \n" "prfm pldl1keep, [%9, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%9], #64 \n" "fmla v16.4s, v12.4s, v4.s[0] \n" "fmla v18.4s, v12.4s, v4.s[1] \n" "fmla v20.4s, v12.4s, v4.s[2] \n" "fmla v22.4s, v12.4s, v4.s[3] \n" "fmla v17.4s, v13.4s, v4.s[0] \n" "fmla v19.4s, v13.4s, v4.s[1] \n" "fmla v21.4s, v13.4s, v4.s[2] \n" "fmla v23.4s, v13.4s, v4.s[3] \n" "fmla v24.4s, v12.4s, v5.s[0] \n" "fmla v26.4s, v12.4s, v5.s[1] \n" "fmla v28.4s, v12.4s, v5.s[2] \n" "fmla v30.4s, v12.4s, v5.s[3] \n" "fmla v25.4s, v13.4s, v5.s[0] \n" "fmla v27.4s, v13.4s, v5.s[1] \n" "fmla v29.4s, v13.4s, v5.s[2] \n" "fmla v31.4s, v13.4s, v5.s[3] \n" "subs w4, w4, #1 \n" "fmla v16.4s, v14.4s, v6.s[0] \n" "fmla v18.4s, v14.4s, v6.s[1] \n" "fmla v20.4s, v14.4s, v6.s[2] \n" "fmla v22.4s, v14.4s, v6.s[3] \n" "fmla v17.4s, v15.4s, v6.s[0] \n" "fmla v19.4s, v15.4s, v6.s[1] \n" "fmla v21.4s, v15.4s, v6.s[2] \n" "fmla v23.4s, v15.4s, v6.s[3] \n" "fmla v24.4s, v14.4s, v7.s[0] \n" "fmla v26.4s, v14.4s, v7.s[1] \n" "fmla v28.4s, v14.4s, v7.s[2] \n" "fmla v30.4s, v14.4s, v7.s[3] \n" "fmla v25.4s, v15.4s, v7.s[0] \n" "fmla v27.4s, v15.4s, v7.s[1] \n" "fmla v29.4s, v15.4s, v7.s[2] \n" "fmla v31.4s, v15.4s, v7.s[3] \n" "bne 0b \n" "1: \n" // remain loop "and w4, %w21, #3 \n"// w4 = remain = inch & 3; "cmp w4, #0 \n" "beq 3f \n" "2: \n" "prfm pldl1keep, [%8, #256] \n" "ld1 {v8.4s, v9.4s}, [%8], #32 \n" "prfm pldl1keep, [%9, #256] \n" "ld1 {v0.4s, v1.4s}, [%9], #32 \n" "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v18.4s, v8.4s, v0.s[1] \n" "fmla v20.4s, v8.4s, v0.s[2] \n" "fmla v22.4s, v8.4s, v0.s[3] \n" "fmla v17.4s, v9.4s, v0.s[0] \n" "fmla v19.4s, v9.4s, v0.s[1] \n" "fmla v21.4s, v9.4s, v0.s[2] \n" "fmla v23.4s, v9.4s, v0.s[3] \n" "subs w4, w4, #1 \n" "fmla v24.4s, v8.4s, v1.s[0] \n" "fmla v26.4s, v8.4s, v1.s[1] \n" "fmla v28.4s, v8.4s, v1.s[2] \n" "fmla v30.4s, v8.4s, v1.s[3] \n" "fmla v25.4s, v9.4s, v1.s[0] \n" "fmla v27.4s, v9.4s, v1.s[1] \n" "fmla v29.4s, v9.4s, v1.s[2] \n" "fmla v31.4s, v9.4s, v1.s[3] \n" "bne 2b \n" "3: \n" "st1 {v16.4s, v17.4s}, [%0], #32 \n" "st1 {v18.4s, v19.4s}, [%1], #32 \n" "st1 {v20.4s, v21.4s}, [%2], #32 \n" "st1 {v22.4s, v23.4s}, [%3], #32 \n" "st1 {v24.4s, v25.4s}, [%4], #32 \n" "st1 {v26.4s, v27.4s}, [%5], #32 \n" "st1 {v28.4s, v29.4s}, [%6], #32 \n" "st1 {v30.4s, v31.4s}, [%7], #32 \n" : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(outptr2), // %2 "=r"(outptr3), // %3 "=r"(outptr4), // %4 "=r"(outptr5), // %5 "=r"(outptr6), // %6 "=r"(outptr7), // %7 "=r"(tmpptr), // %8 "=r"(kptr) // %9 : "0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(outptr4), "5"(outptr5), "6"(outptr6), "7"(outptr7), "8"(tmpptr), "9"(kptr), "r"(biasptr), // %20 "r"(inch) // %21 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31" ); } for (; i+3<size; i+=4) { const float* tmpptr = tmp.channel(i/8 + (i%8)/4); const float* kptr = kernel.channel(p/8); asm volatile( "ld1 {v0.4s, v1.4s}, [%20] \n" "dup v16.4s, v0.s[0] \n" "dup v17.4s, v0.s[1] \n" "dup v18.4s, v0.s[2] \n" "dup v19.4s, v0.s[3] \n" "dup v20.4s, v1.s[0] \n" "dup v21.4s, v1.s[1] \n" "dup v22.4s, v1.s[2] \n" "dup v23.4s, v1.s[3] \n" // inch loop "lsr w4, %w21, #2 \n"// w4 = nn = inch >> 2 "cmp w4, #0 \n" "beq 1f \n" "0: \n" "prfm pldl1keep, [%8, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%8], #64 \n" "prfm pldl1keep, [%9, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%9], #64 \n" "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v8.4s, v0.s[1] \n" "fmla v18.4s, v8.4s, v0.s[2] \n" "fmla v19.4s, v8.4s, v0.s[3] \n" "fmla v20.4s, v8.4s, v1.s[0] \n" "fmla v21.4s, v8.4s, v1.s[1] \n" "fmla v22.4s, v8.4s, v1.s[2] \n" "fmla v23.4s, v8.4s, v1.s[3] \n" "prfm pldl1keep, [%9, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%9], #64 \n" "fmla v16.4s, v9.4s, v2.s[0] \n" "fmla v17.4s, v9.4s, v2.s[1] \n" "fmla v18.4s, v9.4s, v2.s[2] \n" "fmla v19.4s, v9.4s, v2.s[3] \n" "fmla v20.4s, v9.4s, v3.s[0] \n" "fmla v21.4s, v9.4s, v3.s[1] \n" "fmla v22.4s, v9.4s, v3.s[2] \n" "fmla v23.4s, v9.4s, v3.s[3] \n" "subs w4, w4, #1 \n" "fmla v16.4s, v10.4s, v4.s[0] \n" "fmla v17.4s, v10.4s, v4.s[1] \n" "fmla v18.4s, v10.4s, v4.s[2] \n" "fmla v19.4s, v10.4s, v4.s[3] \n" "fmla v20.4s, v10.4s, v5.s[0] \n" "fmla v21.4s, v10.4s, v5.s[1] \n" "fmla v22.4s, v10.4s, v5.s[2] \n" "fmla v23.4s, v10.4s, v5.s[3] \n" "fmla v16.4s, v11.4s, v6.s[0] \n" "fmla v17.4s, v11.4s, v6.s[1] \n" "fmla v18.4s, v11.4s, v6.s[2] \n" "fmla v19.4s, v11.4s, v6.s[3] \n" "fmla v20.4s, v11.4s, v7.s[0] \n" "fmla v21.4s, v11.4s, v7.s[1] \n" "fmla v22.4s, v11.4s, v7.s[2] \n" "fmla v23.4s, v11.4s, v7.s[3] \n" "bne 0b \n" "1: \n" // remain loop "and w4, %w21, #3 \n"// w4 = remain = inch & 3; "cmp w4, #0 \n" "beq 3f \n" "2: \n" "prfm pldl1keep, [%8, #128] \n" "ld1 {v8.4s}, [%8], #16 \n" "prfm pldl1keep, [%9, #256] \n" "ld1 {v0.4s, v1.4s}, [%9], #32 \n" "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v8.4s, v0.s[1] \n" "fmla v18.4s, v8.4s, v0.s[2] \n" "fmla v19.4s, v8.4s, v0.s[3] \n" "subs w4, w4, #1 \n" "fmla v20.4s, v8.4s, v1.s[0] \n" "fmla v21.4s, v8.4s, v1.s[1] \n" "fmla v22.4s, v8.4s, v1.s[2] \n" "fmla v23.4s, v8.4s, v1.s[3] \n" "bne 2b \n" "3: \n" "st1 {v16.4s}, [%0], #16 \n" "st1 {v17.4s}, [%1], #16 \n" "st1 {v18.4s}, [%2], #16 \n" "st1 {v19.4s}, [%3], #16 \n" "st1 {v20.4s}, [%4], #16 \n" "st1 {v21.4s}, [%5], #16 \n" "st1 {v22.4s}, [%6], #16 \n" "st1 {v23.4s}, [%7], #16 \n" : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(outptr2), // %2 "=r"(outptr3), // %3 "=r"(outptr4), // %4 "=r"(outptr5), // %5 "=r"(outptr6), // %6 "=r"(outptr7), // %7 "=r"(tmpptr), // %8 "=r"(kptr) // %9 : "0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(outptr4), "5"(outptr5), "6"(outptr6), "7"(outptr7), "8"(tmpptr), "9"(kptr), "r"(biasptr), // %20 "r"(inch) // %21 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23" ); } for (; i<size; i++) { const float* tmpptr = tmp.channel(i/8 + (i%8)/4 + i%4); const float* kptr = kernel.channel(p/8); asm volatile( "ld1 {v24.4s, v25.4s}, [%20] \n" // inch loop "lsr w4, %w21, #2 \n"// w4 = nn = inch >> 2 "cmp w4, #0 \n" "beq 1f \n" "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "eor v20.16b, v20.16b, v20.16b \n" "eor v21.16b, v21.16b, v21.16b \n" "eor v22.16b, v22.16b, v22.16b \n" "eor v23.16b, v23.16b, v23.16b \n" "0: \n" "prfm pldl1keep, [%8, #128] \n" "ld1 {v8.4s}, [%8], #16 \n" "prfm pldl1keep, [%9, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%9], #64 \n" "fmla v16.4s, v0.4s, v8.s[0] \n" "fmla v17.4s, v1.4s, v8.s[0] \n" "fmla v18.4s, v2.4s, v8.s[1] \n" "fmla v19.4s, v3.4s, v8.s[1] \n" "prfm pldl1keep, [%9, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%9], #64 \n" "subs w4, w4, #1 \n" "fmla v20.4s, v4.4s, v8.s[2] \n" "fmla v21.4s, v5.4s, v8.s[2] \n" "fmla v22.4s, v6.4s, v8.s[3] \n" "fmla v23.4s, v7.4s, v8.s[3] \n" "bne 0b \n" "fadd v16.4s, v16.4s, v18.4s \n" "fadd v17.4s, v17.4s, v19.4s \n" "fadd v20.4s, v20.4s, v22.4s \n" "fadd v21.4s, v21.4s, v23.4s \n" "fadd v16.4s, v16.4s, v20.4s \n" "fadd v17.4s, v17.4s, v21.4s \n" "fadd v24.4s, v24.4s, v16.4s \n" "fadd v25.4s, v25.4s, v17.4s \n" "1: \n" // remain loop "and w4, %w21, #3 \n"// w4 = remain = inch & 3; "cmp w4, #0 \n" "beq 3f \n" "2: \n" "prfm pldl1keep, [%8, #32] \n" "ld1r {v8.4s}, [%8], #4 \n" "prfm pldl1keep, [%9, #256] \n" "ld1 {v0.4s, v1.4s}, [%9], #32 \n" "subs w4, w4, #1 \n" "fmla v24.4s, v8.4s, v0.4s \n" "fmla v25.4s, v8.4s, v1.4s \n" "bne 2b \n" "3: \n" "st1 {v24.s}[0],[%0], #4 \n" "st1 {v24.s}[1],[%1], #4 \n" "st1 {v24.s}[2],[%2], #4 \n" "st1 {v24.s}[3],[%3], #4 \n" "st1 {v25.s}[0],[%4], #4 \n" "st1 {v25.s}[1],[%5], #4 \n" "st1 {v25.s}[2],[%6], #4 \n" "st1 {v25.s}[3],[%7], #4 \n" : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(outptr2), // %2 "=r"(outptr3), // %3 "=r"(outptr4), // %4 "=r"(outptr5), // %5 "=r"(outptr6), // %6 "=r"(outptr7), // %7 "=r"(tmpptr), // %8 "=r"(kptr) // %9 : "0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(outptr4), "5"(outptr5), "6"(outptr6), "7"(outptr7), "8"(tmpptr), "9"(kptr), "r"(biasptr), // %20 "r"(inch) // %21 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25" ); } } #endif // __ARM_NEON && __aarch64__ nn_outch = (outch - remain_outch_start) >> 2; #pragma omp parallel for num_threads(opt.num_threads) for (int pp=0; pp<nn_outch; pp++) { int p = remain_outch_start + pp * 4; float* outptr0 = top_blob.channel(p); float* outptr1 = top_blob.channel(p+1); float* outptr2 = top_blob.channel(p+2); float* outptr3 = top_blob.channel(p+3); const float zeros[4] = {0.f, 0.f, 0.f, 0.f}; const float* biasptr = bias ? bias + p : zeros; int i = 0; for (; i+7<size; i+=8) { const float* tmpptr = tmp.channel(i/8); #if __ARM_NEON && __aarch64__ const float* kptr = kernel.channel(p/8 + (p%8)/4); #else const float* kptr = kernel.channel(p/4); #endif // __ARM_NEON && __aarch64__ #if __ARM_NEON #if __aarch64__ asm volatile( "ld1 {v0.4s}, [%12] \n" "dup v8.4s, v0.s[0] \n" "dup v9.4s, v0.s[0] \n" "dup v10.4s, v0.s[1] \n" "dup v11.4s, v0.s[1] \n" "dup v12.4s, v0.s[2] \n" "dup v13.4s, v0.s[2] \n" "dup v14.4s, v0.s[3] \n" "dup v15.4s, v0.s[3] \n" // inch loop "lsr w4, %w13, #2 \n"// w4 = nn = inch >> 2 "cmp w4, #0 \n" "beq 1f \n" "0: \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%4], #64 \n" "prfm pldl1keep, [%5, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%5], #64 \n" "fmla v8.4s, v4.4s, v0.s[0] \n" "fmla v10.4s, v4.4s, v0.s[1] \n" "fmla v12.4s, v4.4s, v0.s[2] \n" "fmla v14.4s, v4.4s, v0.s[3] \n" "fmla v9.4s, v5.4s, v0.s[0] \n" "fmla v11.4s, v5.4s, v0.s[1] \n" "fmla v13.4s, v5.4s, v0.s[2] \n" "fmla v15.4s, v5.4s, v0.s[3] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%4], #64 \n" "fmla v8.4s, v6.4s, v1.s[0] \n" "fmla v10.4s, v6.4s, v1.s[1] \n" "fmla v12.4s, v6.4s, v1.s[2] \n" "fmla v14.4s, v6.4s, v1.s[3] \n" "fmla v9.4s, v7.4s, v1.s[0] \n" "fmla v11.4s, v7.4s, v1.s[1] \n" "fmla v13.4s, v7.4s, v1.s[2] \n" "fmla v15.4s, v7.4s, v1.s[3] \n" "subs w4, w4, #1 \n" "fmla v8.4s, v16.4s, v2.s[0] \n" "fmla v10.4s, v16.4s, v2.s[1] \n" "fmla v12.4s, v16.4s, v2.s[2] \n" "fmla v14.4s, v16.4s, v2.s[3] \n" "fmla v9.4s, v17.4s, v2.s[0] \n" "fmla v11.4s, v17.4s, v2.s[1] \n" "fmla v13.4s, v17.4s, v2.s[2] \n" "fmla v15.4s, v17.4s, v2.s[3] \n" "fmla v8.4s, v18.4s, v3.s[0] \n" "fmla v10.4s, v18.4s, v3.s[1] \n" "fmla v12.4s, v18.4s, v3.s[2] \n" "fmla v14.4s, v18.4s, v3.s[3] \n" "fmla v9.4s, v19.4s, v3.s[0] \n" "fmla v11.4s, v19.4s, v3.s[1] \n" "fmla v13.4s, v19.4s, v3.s[2] \n" "fmla v15.4s, v19.4s, v3.s[3] \n" "bne 0b \n" "1: \n" // remain loop "and w4, %w13, #3 \n"// w4 = remain = inch & 3; "cmp w4, #0 \n" "beq 3f \n" "2: \n" "prfm pldl1keep, [%4, #256] \n" "ld1 {v4.4s, v5.4s}, [%4], #32 \n" "prfm pldl1keep, [%5, #128] \n" "ld1 {v0.4s}, [%5], #16 \n" "fmla v8.4s, v4.4s, v0.s[0] \n" "fmla v10.4s, v4.4s, v0.s[1] \n" "fmla v12.4s, v4.4s, v0.s[2] \n" "fmla v14.4s, v4.4s, v0.s[3] \n" "subs w4, w4, #1 \n" "fmla v9.4s, v5.4s, v0.s[0] \n" "fmla v11.4s, v5.4s, v0.s[1] \n" "fmla v13.4s, v5.4s, v0.s[2] \n" "fmla v15.4s, v5.4s, v0.s[3] \n" "bne 2b \n" "3: \n" "st1 {v8.4s, v9.4s}, [%0], #32 \n" "st1 {v10.4s, v11.4s}, [%1], #32 \n" "st1 {v12.4s, v13.4s}, [%2], #32 \n" "st1 {v14.4s, v15.4s}, [%3], #32 \n" : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(outptr2), // %2 "=r"(outptr3), // %3 "=r"(tmpptr), // %4 "=r"(kptr) // %5 : "0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(tmpptr), "5"(kptr), "r"(biasptr), // %12 "r"(inch) // %13 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19" ); #else // __aarch64__ asm volatile( "vld1.f32 {d0-d1}, [%12] \n" "vdup.f32 q8, d0[0] \n" "vdup.f32 q9, d0[0] \n" "vdup.f32 q10, d0[1] \n" "vdup.f32 q11, d0[1] \n" "vdup.f32 q12, d1[0] \n" "vdup.f32 q13, d1[0] \n" "vdup.f32 q14, d1[1] \n" "vdup.f32 q15, d1[1] \n" // inch loop "lsr r4, %13, #2 \n"// r4 = nn = inch >> 2 "cmp r4, #0 \n" "beq 1f \n" "0: \n" "pld [%4, #512] \n" "vldm %4!, {d8-d15} \n" // "vld1.f32 {d8-d11}, [%4 :128]! \n" // "vld1.f32 {d12-d15}, [%4 :128]! \n" "pld [%5, #512] \n" "vldm %5!, {d0-d7} \n" // "vld1.f32 {d0-d3}, [%5 :128]! \n" // "vld1.f32 {d4-d7}, [%5 :128]! \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q10, q4, d0[1] \n" "vmla.f32 q12, q4, d1[0] \n" "vmla.f32 q14, q4, d1[1] \n" "vmla.f32 q9, q5, d0[0] \n" "vmla.f32 q11, q5, d0[1] \n" "vmla.f32 q13, q5, d1[0] \n" "vmla.f32 q15, q5, d1[1] \n" "vmla.f32 q8, q6, d2[0] \n" "vmla.f32 q10, q6, d2[1] \n" "vmla.f32 q12, q6, d3[0] \n" "vmla.f32 q14, q6, d3[1] \n" "vmla.f32 q9, q7, d2[0] \n" "vmla.f32 q11, q7, d2[1] \n" "vmla.f32 q13, q7, d3[0] \n" "vmla.f32 q15, q7, d3[1] \n" "pld [%4, #512] \n" "vldm %4!, {d8-d15} \n" // "vld1.f32 {d8-d11}, [%4 :128]! \n" // "vld1.f32 {d12-d15}, [%4 :128]! \n" "vmla.f32 q8, q4, d4[0] \n" "vmla.f32 q10, q4, d4[1] \n" "vmla.f32 q12, q4, d5[0] \n" "vmla.f32 q14, q4, d5[1] \n" "vmla.f32 q9, q5, d4[0] \n" "vmla.f32 q11, q5, d4[1] \n" "vmla.f32 q13, q5, d5[0] \n" "vmla.f32 q15, q5, d5[1] \n" "subs r4, r4, #1 \n" "vmla.f32 q8, q6, d6[0] \n" "vmla.f32 q10, q6, d6[1] \n" "vmla.f32 q12, q6, d7[0] \n" "vmla.f32 q14, q6, d7[1] \n" "vmla.f32 q9, q7, d6[0] \n" "vmla.f32 q11, q7, d6[1] \n" "vmla.f32 q13, q7, d7[0] \n" "vmla.f32 q15, q7, d7[1] \n" "bne 0b \n" "1: \n" // remain loop "and r4, %13, #3 \n"// r4 = remain = inch & 3; "cmp r4, #0 \n" "beq 3f \n" "2: \n" "pld [%4, #256] \n" "vld1.f32 {d8-d11}, [%4 :128]! \n" "pld [%5, #128] \n" "vld1.f32 {d0-d1}, [%5 :128]! \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q10, q4, d0[1] \n" "vmla.f32 q12, q4, d1[0] \n" "vmla.f32 q14, q4, d1[1] \n" "subs r4, r4, #1 \n" "vmla.f32 q9, q5, d0[0] \n" "vmla.f32 q11, q5, d0[1] \n" "vmla.f32 q13, q5, d1[0] \n" "vmla.f32 q15, q5, d1[1] \n" "bne 2b \n" "3: \n" "vst1.f32 {d16-d19}, [%0 :128]! \n" "vst1.f32 {d20-d23}, [%1 :128]! \n" "vst1.f32 {d24-d27}, [%2 :128]! \n" "vst1.f32 {d28-d31}, [%3 :128]! \n" : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(outptr2), // %2 "=r"(outptr3), // %3 "=r"(tmpptr), // %4 "=r"(kptr) // %5 : "0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(tmpptr), "5"(kptr), "r"(biasptr), // %12 "r"(inch) // %13 : "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); #endif // __aarch64__ #else float sum0_0 = biasptr[0]; float sum0_1 = biasptr[0]; float sum0_2 = biasptr[0]; float sum0_3 = biasptr[0]; float sum0_4 = biasptr[0]; float sum0_5 = biasptr[0]; float sum0_6 = biasptr[0]; float sum0_7 = biasptr[0]; float sum1_0 = biasptr[1]; float sum1_1 = biasptr[1]; float sum1_2 = biasptr[1]; float sum1_3 = biasptr[1]; float sum1_4 = biasptr[1]; float sum1_5 = biasptr[1]; float sum1_6 = biasptr[1]; float sum1_7 = biasptr[1]; float sum2_0 = biasptr[2]; float sum2_1 = biasptr[2]; float sum2_2 = biasptr[2]; float sum2_3 = biasptr[2]; float sum2_4 = biasptr[2]; float sum2_5 = biasptr[2]; float sum2_6 = biasptr[2]; float sum2_7 = biasptr[2]; float sum3_0 = biasptr[3]; float sum3_1 = biasptr[3]; float sum3_2 = biasptr[3]; float sum3_3 = biasptr[3]; float sum3_4 = biasptr[3]; float sum3_5 = biasptr[3]; float sum3_6 = biasptr[3]; float sum3_7 = biasptr[3]; for (int q=0; q<inch; q++) { sum0_0 += tmpptr[0] * kptr[0]; sum0_1 += tmpptr[1] * kptr[0]; sum0_2 += tmpptr[2] * kptr[0]; sum0_3 += tmpptr[3] * kptr[0]; sum0_4 += tmpptr[4] * kptr[0]; sum0_5 += tmpptr[5] * kptr[0]; sum0_6 += tmpptr[6] * kptr[0]; sum0_7 += tmpptr[7] * kptr[0]; sum1_0 += tmpptr[0] * kptr[1]; sum1_1 += tmpptr[1] * kptr[1]; sum1_2 += tmpptr[2] * kptr[1]; sum1_3 += tmpptr[3] * kptr[1]; sum1_4 += tmpptr[4] * kptr[1]; sum1_5 += tmpptr[5] * kptr[1]; sum1_6 += tmpptr[6] * kptr[1]; sum1_7 += tmpptr[7] * kptr[1]; sum2_0 += tmpptr[0] * kptr[2]; sum2_1 += tmpptr[1] * kptr[2]; sum2_2 += tmpptr[2] * kptr[2]; sum2_3 += tmpptr[3] * kptr[2]; sum2_4 += tmpptr[4] * kptr[2]; sum2_5 += tmpptr[5] * kptr[2]; sum2_6 += tmpptr[6] * kptr[2]; sum2_7 += tmpptr[7] * kptr[2]; sum3_0 += tmpptr[0] * kptr[3]; sum3_1 += tmpptr[1] * kptr[3]; sum3_2 += tmpptr[2] * kptr[3]; sum3_3 += tmpptr[3] * kptr[3]; sum3_4 += tmpptr[4] * kptr[3]; sum3_5 += tmpptr[5] * kptr[3]; sum3_6 += tmpptr[6] * kptr[3]; sum3_7 += tmpptr[7] * kptr[3]; tmpptr += 8; kptr += 4; } outptr0[0] = sum0_0; outptr0[1] = sum0_1; outptr0[2] = sum0_2; outptr0[3] = sum0_3; outptr0[4] = sum0_4; outptr0[5] = sum0_5; outptr0[6] = sum0_6; outptr0[7] = sum0_7; outptr1[0] = sum1_0; outptr1[1] = sum1_1; outptr1[2] = sum1_2; outptr1[3] = sum1_3; outptr1[4] = sum1_4; outptr1[5] = sum1_5; outptr1[6] = sum1_6; outptr1[7] = sum1_7; outptr2[0] = sum2_0; outptr2[1] = sum2_1; outptr2[2] = sum2_2; outptr2[3] = sum2_3; outptr2[4] = sum2_4; outptr2[5] = sum2_5; outptr2[6] = sum2_6; outptr2[7] = sum2_7; outptr3[0] = sum3_0; outptr3[1] = sum3_1; outptr3[2] = sum3_2; outptr3[3] = sum3_3; outptr3[4] = sum3_4; outptr3[5] = sum3_5; outptr3[6] = sum3_6; outptr3[7] = sum3_7; outptr0 += 8; outptr1 += 8; outptr2 += 8; outptr3 += 8; #endif // __ARM_NEON } for (; i+3<size; i+=4) { const float* tmpptr = tmp.channel(i/8 + (i%8)/4); #if __ARM_NEON && __aarch64__ const float* kptr = kernel.channel(p/8 + (p%8)/4); #else const float* kptr = kernel.channel(p/4); #endif // __ARM_NEON && __aarch64__ #if __ARM_NEON #if __aarch64__ asm volatile( "ld1 {v0.4s}, [%12] \n" "dup v8.4s, v0.s[0] \n" "dup v9.4s, v0.s[1] \n" "dup v10.4s, v0.s[2] \n" "dup v11.4s, v0.s[3] \n" // inch loop "lsr w4, %w13, #2 \n"// w4 = nn = inch >> 2 "cmp w4, #0 \n" "beq 1f \n" "0: \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%4], #64 \n" "prfm pldl1keep, [%5, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%5], #64 \n" "fmla v8.4s, v4.4s, v0.s[0] \n" "fmla v9.4s, v4.4s, v0.s[1] \n" "fmla v10.4s, v4.4s, v0.s[2] \n" "fmla v11.4s, v4.4s, v0.s[3] \n" "fmla v8.4s, v5.4s, v1.s[0] \n" "fmla v9.4s, v5.4s, v1.s[1] \n" "fmla v10.4s, v5.4s, v1.s[2] \n" "fmla v11.4s, v5.4s, v1.s[3] \n" "subs w4, w4, #1 \n" "fmla v8.4s, v6.4s, v2.s[0] \n" "fmla v9.4s, v6.4s, v2.s[1] \n" "fmla v10.4s, v6.4s, v2.s[2] \n" "fmla v11.4s, v6.4s, v2.s[3] \n" "fmla v8.4s, v7.4s, v3.s[0] \n" "fmla v9.4s, v7.4s, v3.s[1] \n" "fmla v10.4s, v7.4s, v3.s[2] \n" "fmla v11.4s, v7.4s, v3.s[3] \n" "bne 0b \n" "1: \n" // remain loop "and w4, %w13, #3 \n"// w4 = remain = inch & 3; "cmp w4, #0 \n" "beq 3f \n" "2: \n" "prfm pldl1keep, [%4, #128] \n" "ld1 {v4.4s}, [%4], #16 \n" "prfm pldl1keep, [%5, #128] \n" "ld1 {v0.4s}, [%5], #16 \n" "subs w4, w4, #1 \n" "fmla v8.4s, v4.4s, v0.s[0] \n" "fmla v9.4s, v4.4s, v0.s[1] \n" "fmla v10.4s, v4.4s, v0.s[2] \n" "fmla v11.4s, v4.4s, v0.s[3] \n" "bne 2b \n" "3: \n" "st1 {v8.4s}, [%0], #16 \n" "st1 {v9.4s}, [%1], #16 \n" "st1 {v10.4s}, [%2], #16 \n" "st1 {v11.4s}, [%3], #16 \n" : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(outptr2), // %2 "=r"(outptr3), // %3 "=r"(tmpptr), // %4 "=r"(kptr) // %5 : "0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(tmpptr), "5"(kptr), "r"(biasptr), // %12 "r"(inch) // %13 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11" ); #else // __aarch64__ asm volatile( "vld1.f32 {d0-d1}, [%12] \n" "vdup.f32 q8, d0[0] \n" "vdup.f32 q9, d0[1] \n" "vdup.f32 q10, d1[0] \n" "vdup.f32 q11, d1[1] \n" // inch loop "lsr r4, %13, #2 \n"// r4 = nn = inch >> 2 "cmp r4, #0 \n" "beq 1f \n" "0: \n" "pld [%4, #512] \n" "vldm %4!, {d8-d15} \n" // "vld1.f32 {d8-d11}, [%4 :128]! \n" // "vld1.f32 {d12-d15}, [%4 :128]! \n" "pld [%5, #512] \n" "vldm %5!, {d0-d7} \n" // "vld1.f32 {d0-d3}, [%5 :128]! \n" // "vld1.f32 {d4-d7}, [%5 :128]! \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q9, q4, d0[1] \n" "vmla.f32 q10, q4, d1[0] \n" "vmla.f32 q11, q4, d1[1] \n" "vmla.f32 q8, q5, d2[0] \n" "vmla.f32 q9, q5, d2[1] \n" "vmla.f32 q10, q5, d3[0] \n" "vmla.f32 q11, q5, d3[1] \n" "subs r4, r4, #1 \n" "vmla.f32 q8, q6, d4[0] \n" "vmla.f32 q9, q6, d4[1] \n" "vmla.f32 q10, q6, d5[0] \n" "vmla.f32 q11, q6, d5[1] \n" "vmla.f32 q8, q7, d6[0] \n" "vmla.f32 q9, q7, d6[1] \n" "vmla.f32 q10, q7, d7[0] \n" "vmla.f32 q11, q7, d7[1] \n" "bne 0b \n" "1: \n" // remain loop "and r4, %13, #3 \n"// r4 = remain = inch & 3; "cmp r4, #0 \n" "beq 3f \n" "2: \n" "pld [%4, #128] \n" "vld1.f32 {d8-d9}, [%4 :128]! \n" "pld [%5, #128] \n" "vld1.f32 {d0-d1}, [%5 :128]! \n" "subs r4, r4, #1 \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q9, q4, d0[1] \n" "vmla.f32 q10, q4, d1[0] \n" "vmla.f32 q11, q4, d1[1] \n" "bne 2b \n" "3: \n" "vst1.f32 {d16-d17}, [%0 :128]! \n" "vst1.f32 {d18-d19}, [%1 :128]! \n" "vst1.f32 {d20-d21}, [%2 :128]! \n" "vst1.f32 {d22-d23}, [%3 :128]! \n" : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(outptr2), // %2 "=r"(outptr3), // %3 "=r"(tmpptr), // %4 "=r"(kptr) // %5 : "0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(tmpptr), "5"(kptr), "r"(biasptr), // %12 "r"(inch) // %13 : "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11" ); #endif // __aarch64__ #else float sum0_0 = biasptr[0]; float sum0_1 = biasptr[0]; float sum0_2 = biasptr[0]; float sum0_3 = biasptr[0]; float sum1_0 = biasptr[1]; float sum1_1 = biasptr[1]; float sum1_2 = biasptr[1]; float sum1_3 = biasptr[1]; float sum2_0 = biasptr[2]; float sum2_1 = biasptr[2]; float sum2_2 = biasptr[2]; float sum2_3 = biasptr[2]; float sum3_0 = biasptr[3]; float sum3_1 = biasptr[3]; float sum3_2 = biasptr[3]; float sum3_3 = biasptr[3]; for (int q=0; q<inch; q++) { sum0_0 += tmpptr[0] * kptr[0]; sum0_1 += tmpptr[1] * kptr[0]; sum0_2 += tmpptr[2] * kptr[0]; sum0_3 += tmpptr[3] * kptr[0]; sum1_0 += tmpptr[0] * kptr[1]; sum1_1 += tmpptr[1] * kptr[1]; sum1_2 += tmpptr[2] * kptr[1]; sum1_3 += tmpptr[3] * kptr[1]; sum2_0 += tmpptr[0] * kptr[2]; sum2_1 += tmpptr[1] * kptr[2]; sum2_2 += tmpptr[2] * kptr[2]; sum2_3 += tmpptr[3] * kptr[2]; sum3_0 += tmpptr[0] * kptr[3]; sum3_1 += tmpptr[1] * kptr[3]; sum3_2 += tmpptr[2] * kptr[3]; sum3_3 += tmpptr[3] * kptr[3]; tmpptr += 4; kptr += 4; } outptr0[0] = sum0_0; outptr0[1] = sum0_1; outptr0[2] = sum0_2; outptr0[3] = sum0_3; outptr1[0] = sum1_0; outptr1[1] = sum1_1; outptr1[2] = sum1_2; outptr1[3] = sum1_3; outptr2[0] = sum2_0; outptr2[1] = sum2_1; outptr2[2] = sum2_2; outptr2[3] = sum2_3; outptr3[0] = sum3_0; outptr3[1] = sum3_1; outptr3[2] = sum3_2; outptr3[3] = sum3_3; outptr0 += 4; outptr1 += 4; outptr2 += 4; outptr3 += 4; #endif // __ARM_NEON } for (; i<size; i++) { const float* tmpptr = tmp.channel(i/8 + (i%8)/4 + i%4); #if __ARM_NEON && __aarch64__ const float* kptr = kernel.channel(p/8 + (p%8)/4); #else const float* kptr = kernel.channel(p/4); #endif // __ARM_NEON && __aarch64__ #if __ARM_NEON #if __aarch64__ asm volatile( "ld1 {v12.4s}, [%12] \n" // inch loop "lsr w4, %w13, #2 \n"// w4 = nn = inch >> 2 "cmp w4, #0 \n" "beq 1f \n" "eor v8.16b, v8.16b, v8.16b \n" "eor v9.16b, v9.16b, v9.16b \n" "eor v10.16b, v10.16b, v10.16b \n" "eor v11.16b, v11.16b, v11.16b \n" "0: \n" "prfm pldl1keep, [%4, #128] \n" "ld1 {v4.4s}, [%4], #16 \n" "prfm pldl1keep, [%5, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%5], #64 \n" "subs w4, w4, #1 \n" "fmla v8.4s, v0.4s, v4.s[0] \n" "fmla v9.4s, v1.4s, v4.s[1] \n" "fmla v10.4s, v2.4s, v4.s[2] \n" "fmla v11.4s, v3.4s, v4.s[3] \n" "bne 0b \n" "fadd v8.4s, v8.4s, v9.4s \n" "fadd v10.4s, v10.4s, v11.4s \n" "fadd v8.4s, v8.4s, v10.4s \n" "fadd v12.4s, v12.4s, v8.4s \n" "1: \n" // remain loop "and w4, %w13, #3 \n"// w4 = remain = inch & 3; "cmp w4, #0 \n" "beq 3f \n" "2: \n" "prfm pldl1keep, [%4, #32] \n" "ld1r {v4.4s}, [%4], #4 \n" "prfm pldl1keep, [%5, #128] \n" "ld1 {v0.4s}, [%5], #16 \n" "subs w4, w4, #1 \n" "fmla v12.4s, v4.4s, v0.4s \n" "bne 2b \n" "3: \n" "st1 {v12.s}[0], [%0], #4 \n" "st1 {v12.s}[1], [%1], #4 \n" "st1 {v12.s}[2], [%2], #4 \n" "st1 {v12.s}[3], [%3], #4 \n" : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(outptr2), // %2 "=r"(outptr3), // %3 "=r"(tmpptr), // %4 "=r"(kptr) // %5 : "0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(tmpptr), "5"(kptr), "r"(biasptr), // %12 "r"(inch) // %13 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v8", "v9", "v10", "v11", "v12" ); #else // __aarch64__ asm volatile( "vld1.f32 {d24-d25}, [%12] \n" // inch loop "lsr r4, %13, #2 \n"// r4 = nn = inch >> 2 "cmp r4, #0 \n" "beq 1f \n" "veor q8, q8, q8 \n" "veor q9, q9, q9 \n" "veor q10, q10, q10 \n" "veor q11, q11, q11 \n" "0: \n" "pld [%4, #128] \n" "vld1.f32 {d8-d9}, [%4 :128]! \n" "pld [%5, #512] \n" "vldm %5!, {d0-d7} \n" // "vld1.f32 {d0-d3}, [%5 :128]! \n" // "vld1.f32 {d4-d7}, [%5 :128]! \n" "subs r4, r4, #1 \n" "vmla.f32 q8, q0, d8[0] \n" "vmla.f32 q9, q1, d8[1] \n" "vmla.f32 q10, q2, d9[0] \n" "vmla.f32 q11, q3, d9[1] \n" "bne 0b \n" "vadd.f32 q8, q8, q9 \n" "vadd.f32 q10, q10, q11 \n" "vadd.f32 q8, q8, q10 \n" "vadd.f32 q12, q12, q8 \n" "1: \n" // remain loop "and r4, %13, #3 \n"// r4 = remain = inch & 3; "cmp r4, #0 \n" "beq 3f \n" "2: \n" "pld [%4, #32] \n" "vld1.f32 {d8[],d9[]}, [%4]! \n" "pld [%5, #128] \n" "vld1.f32 {d0-d1}, [%5 :128]! \n" "subs r4, r4, #1 \n" "vmla.f32 q12, q4, q0 \n" "bne 2b \n" "3: \n" "vst1.f32 {d24[0]}, [%0]! \n" "vst1.f32 {d24[1]}, [%1]! \n" "vst1.f32 {d25[0]}, [%2]! \n" "vst1.f32 {d25[1]}, [%3]! \n" : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(outptr2), // %2 "=r"(outptr3), // %3 "=r"(tmpptr), // %4 "=r"(kptr) // %5 : "0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(tmpptr), "5"(kptr), "r"(biasptr), // %12 "r"(inch) // %13 : "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q8", "q9", "q10", "q11", "q12" ); #endif // __aarch64__ #else float sum0 = biasptr[0]; float sum1 = biasptr[1]; float sum2 = biasptr[2]; float sum3 = biasptr[3]; for (int q=0; q<inch; q++) { sum0 += tmpptr[0] * kptr[0]; sum1 += tmpptr[0] * kptr[1]; sum2 += tmpptr[0] * kptr[2]; sum3 += tmpptr[0] * kptr[3]; tmpptr++; kptr += 4; } outptr0[0] = sum0; outptr1[0] = sum1; outptr2[0] = sum2; outptr3[0] = sum3; outptr0++; outptr1++; outptr2++; outptr3++; #endif // __ARM_NEON } } remain_outch_start += nn_outch << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int p=remain_outch_start; p<outch; p++) { Mat out0 = top_blob.channel(p); const float bias0 = bias ? bias[p] : 0.f; float* outptr0 = out0; int i = 0; for (; i+7<size; i+=8) { const float* tmpptr = tmp.channel(i/8); #if __ARM_NEON && __aarch64__ const float* kptr = kernel.channel(p/8 + (p%8)/4 + p%4); #else const float* kptr = kernel.channel(p/4 + p%4); #endif // __ARM_NEON && __aarch64__ #if __ARM_NEON #if __aarch64__ asm volatile( "dup v8.4s, %w6 \n" "dup v9.4s, %w6 \n" // inch loop "lsr w4, %w7, #2 \n"// w4 = nn = inch >> 2 "cmp w4, #0 \n" "beq 1f \n" "0: \n" "prfm pldl1keep, [%1, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%1], #64 \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v0.4s}, [%2], #16 \n" "fmla v8.4s, v4.4s, v0.s[0] \n" "fmla v9.4s, v5.4s, v0.s[0] \n" "prfm pldl1keep, [%1, #512] \n" "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%1], #64 \n" "fmla v8.4s, v6.4s, v0.s[1] \n" "fmla v9.4s, v7.4s, v0.s[1] \n" "subs w4, w4, #1 \n" "fmla v8.4s, v12.4s, v0.s[2] \n" "fmla v9.4s, v13.4s, v0.s[2] \n" "fmla v8.4s, v14.4s, v0.s[3] \n" "fmla v9.4s, v15.4s, v0.s[3] \n" "bne 0b \n" "1: \n" // remain loop "and w4, %w7, #3 \n"// w4 = remain = inch & 3; "cmp w4, #0 \n" "beq 3f \n" "2: \n" "prfm pldl1keep, [%1, #256] \n" "ld1 {v4.4s, v5.4s}, [%1], #32 \n" "prfm pldl1keep, [%2, #32] \n" "ld1r {v0.4s}, [%2], #4 \n" "subs w4, w4, #1 \n" "fmla v8.4s, v4.4s, v0.4s \n" "fmla v9.4s, v5.4s, v0.4s \n" "bne 2b \n" "3: \n" "st1 {v8.4s, v9.4s}, [%0], #32 \n" : "=r"(outptr0), // %0 "=r"(tmpptr), // %1 "=r"(kptr) // %2 : "0"(outptr0), "1"(tmpptr), "2"(kptr), "r"(bias0), // %6 "r"(inch) // %7 : "cc", "memory", "x4", "v0", "v4", "v5", "v6", "v7", "v8", "v9", "v12", "v13", "v14", "v15" ); #else // __aarch64__ asm volatile( "vdup.f32 q8, %6 \n" "vdup.f32 q9, %6 \n" // inch loop "lsr r4, %7, #2 \n"// r4 = nn = inch >> 2 "cmp r4, #0 \n" "beq 1f \n" "0: \n" "pld [%1, #512] \n" "vldm %1!, {d8-d15} \n" // "vld1.f32 {d8-d11}, [%1 :128]! \n" // "vld1.f32 {d12-d15}, [%1 :128]! \n" "pld [%2, #128] \n" "vld1.f32 {d0-d1}, [%2 :128]! \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q9, q5, d0[0] \n" "pld [%1, #512] \n" "vldm %1!, {d24-d31} \n" // "vld1.f32 {d24-d27}, [%1 :128]! \n" // "vld1.f32 {d28-d31}, [%1 :128]! \n" "vmla.f32 q8, q6, d0[1] \n" "vmla.f32 q9, q7, d0[1] \n" "subs r4, r4, #1 \n" "vmla.f32 q8, q12, d1[0] \n" "vmla.f32 q9, q13, d1[0] \n" "vmla.f32 q8, q14, d1[1] \n" "vmla.f32 q9, q15, d1[1] \n" "bne 0b \n" "1: \n" // remain loop "and r4, %7, #3 \n"// r4 = remain = inch & 3; "cmp r4, #0 \n" "beq 3f \n" "2: \n" "pld [%1, #256] \n" "vld1.f32 {d8-d11}, [%1 :128]! \n" "pld [%2, #32] \n" "vld1.f32 {d0[],d1[]}, [%2]! \n" "subs r4, r4, #1 \n" "vmla.f32 q8, q4, q0 \n" "vmla.f32 q9, q5, q0 \n" "bne 2b \n" "3: \n" "vst1.f32 {d16-d19}, [%0 :128]! \n" : "=r"(outptr0), // %0 "=r"(tmpptr), // %1 "=r"(kptr) // %2 : "0"(outptr0), "1"(tmpptr), "2"(kptr), "r"(bias0), // %6 "r"(inch) // %7 : "cc", "memory", "r4", "q0", "q4", "q5", "q6", "q7", "q8", "q9", "q12", "q13", "q14", "q15" ); #endif // __aarch64__ #else float sum0 = bias0; float sum1 = bias0; float sum2 = bias0; float sum3 = bias0; float sum4 = bias0; float sum5 = bias0; float sum6 = bias0; float sum7 = bias0; for (int q=0; q<inch; q++) { sum0 += tmpptr[0] * kptr[0]; sum1 += tmpptr[1] * kptr[0]; sum2 += tmpptr[2] * kptr[0]; sum3 += tmpptr[3] * kptr[0]; sum4 += tmpptr[4] * kptr[0]; sum5 += tmpptr[5] * kptr[0]; sum6 += tmpptr[6] * kptr[0]; sum7 += tmpptr[7] * kptr[0]; tmpptr += 8; kptr++; } outptr0[0] = sum0; outptr0[1] = sum1; outptr0[2] = sum2; outptr0[3] = sum3; outptr0[4] = sum4; outptr0[5] = sum5; outptr0[6] = sum6; outptr0[7] = sum7; outptr0 += 8; #endif // __ARM_NEON } for (; i+3<size; i+=4) { const float* tmpptr = tmp.channel(i/8 + (i%8)/4); #if __ARM_NEON && __aarch64__ const float* kptr = kernel.channel(p/8 + (p%8)/4 + p%4); #else const float* kptr = kernel.channel(p/4 + p%4); #endif // __ARM_NEON && __aarch64__ #if __ARM_NEON #if __aarch64__ asm volatile( "dup v8.4s, %w6 \n" // inch loop "lsr w4, %w7, #2 \n"// w4 = nn = inch >> 2 "cmp w4, #0 \n" "beq 1f \n" "0: \n" "prfm pldl1keep, [%1, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%1], #64 \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v0.4s}, [%2], #16 \n" "subs w4, w4, #1 \n" "fmla v8.4s, v4.4s, v0.s[0] \n" "fmla v8.4s, v5.4s, v0.s[1] \n" "fmla v8.4s, v6.4s, v0.s[2] \n" "fmla v8.4s, v7.4s, v0.s[3] \n" "bne 0b \n" "1: \n" // remain loop "and w4, %w7, #3 \n"// w4 = remain = inch & 3; "cmp w4, #0 \n" "beq 3f \n" "2: \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v4.4s}, [%1], #16 \n" "prfm pldl1keep, [%2, #32] \n" "ld1r {v0.4s}, [%2], #4 \n" "subs w4, w4, #1 \n" "fmla v8.4s, v4.4s, v0.4s \n" "bne 2b \n" "3: \n" "st1 {v8.4s}, [%0], #16 \n" : "=r"(outptr0), // %0 "=r"(tmpptr), // %1 "=r"(kptr) // %2 : "0"(outptr0), "1"(tmpptr), "2"(kptr), "r"(bias0), // %6 "r"(inch) // %7 : "cc", "memory", "x4", "v0", "v4", "v5", "v6", "v7", "v8" ); #else // __aarch64__ asm volatile( "vdup.f32 q8, %6 \n" // inch loop "lsr r4, %7, #2 \n"// r4 = nn = inch >> 2 "cmp r4, #0 \n" "beq 1f \n" "0: \n" "pld [%1, #512] \n" "vldm %1!, {d8-d15} \n" // "vld1.f32 {d8-d11}, [%1 :128]! \n" // "vld1.f32 {d12-d15}, [%1 :128]! \n" "pld [%2, #128] \n" "vld1.f32 {d0-d1}, [%2]! \n" "subs r4, r4, #1 \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q8, q5, d0[1] \n" "vmla.f32 q8, q6, d1[0] \n" "vmla.f32 q8, q7, d1[1] \n" "bne 0b \n" "1: \n" // remain loop "and r4, %7, #3 \n"// r4 = remain = inch & 3; "cmp r4, #0 \n" "beq 3f \n" "2: \n" "pld [%1, #128] \n" "vld1.f32 {d8-d9}, [%1 :128]! \n" "pld [%2, #32] \n" "vld1.f32 {d0[],d1[]}, [%2]! \n" "subs r4, r4, #1 \n" "vmla.f32 q8, q4, q0 \n" "bne 2b \n" "3: \n" "vst1.f32 {d16-d17}, [%0 :128]! \n" : "=r"(outptr0), // %0 "=r"(tmpptr), // %1 "=r"(kptr) // %2 : "0"(outptr0), "1"(tmpptr), "2"(kptr), "r"(bias0), // %6 "r"(inch) // %7 : "cc", "memory", "r4", "q0", "q4", "q5", "q6", "q7", "q8" ); #endif // __aarch64__ #else float sum0 = bias0; float sum1 = bias0; float sum2 = bias0; float sum3 = bias0; for (int q=0; q<inch; q++) { sum0 += tmpptr[0] * kptr[0]; sum1 += tmpptr[1] * kptr[0]; sum2 += tmpptr[2] * kptr[0]; sum3 += tmpptr[3] * kptr[0]; tmpptr += 4; kptr++; } outptr0[0] = sum0; outptr0[1] = sum1; outptr0[2] = sum2; outptr0[3] = sum3; outptr0 += 4; #endif // __ARM_NEON } for (; i<size; i++) { const float* tmpptr = tmp.channel(i/8 + (i%8)/4 + i%4); #if __ARM_NEON && __aarch64__ const float* kptr = kernel.channel(p/8 + (p%8)/4 + p%4); #else const float* kptr = kernel.channel(p/4 + p%4); #endif // __ARM_NEON && __aarch64__ int q = 0; #if __ARM_NEON float32x4_t _sum0 = vdupq_n_f32(0.f); for (; q+3<inch; q+=4) { float32x4_t _p0 = vld1q_f32(tmpptr); tmpptr += 4; float32x4_t _k0 = vld1q_f32(kptr); kptr += 4; #if __aarch64__ _sum0 = vfmaq_f32(_sum0, _p0, _k0); #else _sum0 = vmlaq_f32(_sum0, _p0, _k0); #endif } #if __aarch64__ float sum0 = bias0 + vaddvq_f32(_sum0); #else float32x2_t _ss = vadd_f32(vget_low_f32(_sum0), vget_high_f32(_sum0)); float sum0 = bias0 + vget_lane_f32(vpadd_f32(_ss, _ss), 0); #endif #else float sum0 = bias0; #endif // __ARM_NEON for (; q<inch; q++) { sum0 += tmpptr[0] * kptr[0]; tmpptr++; kptr++; } outptr0[0] = sum0; outptr0++; } } // // NOTE sgemm // for (; p<outch; p++) // { // Mat out0 = top_blob.channel(p); // // const float bias0 = bias ? bias[p] : 0.f; // // float* outptr0 = out0; // // for (int i=0; i<size; i++) // { // float sum = bias0; // // const float* kptr = _kernel.channel(p/8 + p%8); // // for (int q=0; q<inch; q++) // { // const float* img0 = bottom_blob.channel(q); // // sum += img0[i] * kptr[0]; // kptr ++; // } // // outptr0[i] = sum; // } // } //////////////////BN RELU/////////////////////////// { int size = top_blob.w * top_blob.h; const float *a_data_ptr = a_data; const float *b_data_ptr = b_data; #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < top_blob.c; q++) { { float *ptr = top_blob.channel(q); float a = a_data_ptr[q]; float b = b_data_ptr[q]; #if __ARM_NEON int nn = size >> 2; int remain = size - (nn << 2); #else int remain = size; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ if (nn > 0) { asm volatile( "dup v1.4s, %w4 \n" "dup v2.4s, %w5 \n" "0: \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v0.4s}, [%1] \n" "orr v3.16b, v1.16b, v1.16b \n" "fmla v3.4s, v0.4s, v2.4s \n" "subs %w0, %w0, #1 \n" "st1 {v3.4s}, [%1], #16 \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(ptr) // %1 : "0"(nn), "1"(ptr), "r"(a), // %4 "r"(b) // %5 : "cc", "memory", "v0", "v1", "v2", "v3" ); } #else if (nn > 0) { asm volatile( "vdup.f32 q1, %4 \n" "vdup.f32 q2, %5 \n" "0: \n" "pld [%1, #128] \n" "vld1.f32 {d0-d1}, [%1 :128] \n" "vorr.32 q3, q1, q1 \n" "vmla.f32 q3, q0, q2 \n" "subs %0, #1 \n" "vst1.f32 {d6-d7}, [%1 :128]! \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(ptr) // %1 : "0"(nn), "1"(ptr), "r"(a), // %4 "r"(b) // %5 : "cc", "memory", "q0", "q1", "q2", "q3" ); } #endif // __aarch64__ #endif // __ARM_NEON ptr = top_blob.channel(q); #if __ARM_NEON nn = size >> 2; remain = size - (nn << 2); #else remain = size; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ float32x4_t _zero = vdupq_n_f32(0.f); for (; nn>0; nn--) { float32x4_t _p = vld1q_f32(ptr); _p = vmaxq_f32(_p, _zero); vst1q_f32(ptr, _p); ptr += 4; } #else if (nn > 0) { asm volatile( "veor q1, q0, q0 \n" "0: \n" "pld [%1, #128] \n" "vld1.f32 {d0-d1}, [%1 :128] \n" "vmax.f32 q0, q0, q1 \n" "subs %0, #1 \n" "vst1.f32 {d0-d1}, [%1 :128]! \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(ptr) // %1 : "0"(nn), "1"(ptr) : "cc", "memory", "q0", "q1" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain > 0; remain--) { *ptr = b * *ptr + a; *ptr = std::max(*ptr, 0.f); ptr++; } } } } } static void convbnrelu1x1s1_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt, const Mat& a_data, const Mat& b_data) { int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const float* kernel = _kernel; const float* bias = _bias; int nn_outch = 0; int remain_outch_start = 0; #if __ARM_NEON && __aarch64__ nn_outch = outch >> 3; remain_outch_start = nn_outch << 3; #pragma omp parallel for num_threads(opt.num_threads) for (int pp=0; pp<nn_outch; pp++) { int p = pp * 8; Mat out0 = top_blob.channel(p); Mat out1 = top_blob.channel(p+1); Mat out2 = top_blob.channel(p+2); Mat out3 = top_blob.channel(p+3); Mat out4 = top_blob.channel(p+4); Mat out5 = top_blob.channel(p+5); Mat out6 = top_blob.channel(p+6); Mat out7 = top_blob.channel(p+7); const float bias0 = bias ? bias[p] : 0.f; const float bias1 = bias ? bias[p+1] : 0.f; const float bias2 = bias ? bias[p+2] : 0.f; const float bias3 = bias ? bias[p+3] : 0.f; const float bias4 = bias ? bias[p+4] : 0.f; const float bias5 = bias ? bias[p+5] : 0.f; const float bias6 = bias ? bias[p+6] : 0.f; const float bias7 = bias ? bias[p+7] : 0.f; out0.fill(bias0); out1.fill(bias1); out2.fill(bias2); out3.fill(bias3); out4.fill(bias4); out5.fill(bias5); out6.fill(bias6); out7.fill(bias7); int q = 0; for (; q+7<inch; q+=8) { float* outptr0 = out0; float* outptr1 = out1; float* outptr2 = out2; float* outptr3 = out3; float* outptr4 = out4; float* outptr5 = out5; float* outptr6 = out6; float* outptr7 = out7; const float* img0 = bottom_blob.channel(q); const float* img1 = bottom_blob.channel(q+1); const float* img2 = bottom_blob.channel(q+2); const float* img3 = bottom_blob.channel(q+3); const float* img4 = bottom_blob.channel(q+4); const float* img5 = bottom_blob.channel(q+5); const float* img6 = bottom_blob.channel(q+6); const float* img7 = bottom_blob.channel(q+7); const float* kernel0 = kernel + p*inch + q; const float* kernel1 = kernel + (p+1)*inch + q; const float* kernel2 = kernel + (p+2)*inch + q; const float* kernel3 = kernel + (p+3)*inch + q; const float* kernel4 = kernel + (p+4)*inch + q; const float* kernel5 = kernel + (p+5)*inch + q; const float* kernel6 = kernel + (p+6)*inch + q; const float* kernel7 = kernel + (p+7)*inch + q; const float* r0 = img0; const float* r1 = img1; const float* r2 = img2; const float* r3 = img3; const float* r4 = img4; const float* r5 = img5; const float* r6 = img6; const float* r7 = img7; int size = outw * outh; int nn = size >> 2; int remain = size & 3; float32x4_t _k0 = vld1q_f32(kernel0); float32x4_t _k1 = vld1q_f32(kernel1); float32x4_t _k2 = vld1q_f32(kernel2); float32x4_t _k3 = vld1q_f32(kernel3); float32x4_t _k4 = vld1q_f32(kernel4); float32x4_t _k5 = vld1q_f32(kernel5); float32x4_t _k6 = vld1q_f32(kernel6); float32x4_t _k7 = vld1q_f32(kernel7); float32x4_t _k0n = vld1q_f32(kernel0+4); float32x4_t _k1n = vld1q_f32(kernel1+4); float32x4_t _k2n = vld1q_f32(kernel2+4); float32x4_t _k3n = vld1q_f32(kernel3+4); float32x4_t _k4n = vld1q_f32(kernel4+4); float32x4_t _k5n = vld1q_f32(kernel5+4); float32x4_t _k6n = vld1q_f32(kernel6+4); float32x4_t _k7n = vld1q_f32(kernel7+4); #ifdef __clang__ // gcc reject over 30 oprands :( if (nn > 0) { asm volatile( "prfm pldl1keep, [%9, #128] \n" "ld1 {v17.4s}, [%9], #16 \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v18.4s}, [%1] \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v19.4s}, [%2] \n" "0: \n" "fmla v18.4s, v17.4s, %34.s[0] \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v20.4s}, [%3] \n" "fmla v19.4s, v17.4s, %35.s[0] \n" "prfm pldl1keep, [%4, #128] \n" "ld1 {v21.4s}, [%4] \n" "fmla v20.4s, v17.4s, %36.s[0] \n" "prfm pldl1keep, [%5, #128] \n" "ld1 {v22.4s}, [%5] \n" "fmla v21.4s, v17.4s, %37.s[0] \n" "prfm pldl1keep, [%6, #128] \n" "ld1 {v23.4s}, [%6] \n" "fmla v22.4s, v17.4s, %38.s[0] \n" "prfm pldl1keep, [%10, #128] \n" "ld1 {v16.4s}, [%10], #16 \n" "fmla v23.4s, v17.4s, %39.s[0] \n" "prfm pldl1keep, [%7, #128] \n" "ld1 {v24.4s}, [%7] \n" "fmla v18.4s, v16.4s, %34.s[1] \n" "fmla v19.4s, v16.4s, %35.s[1] \n" "prfm pldl1keep, [%8, #128] \n" "ld1 {v25.4s}, [%8] \n" "fmla v24.4s, v17.4s, %40.s[0] \n" "fmla v25.4s, v17.4s, %41.s[0] \n" "fmla v20.4s, v16.4s, %36.s[1] \n" "fmla v21.4s, v16.4s, %37.s[1] \n" "prfm pldl1keep, [%11, #128] \n" "ld1 {v17.4s}, [%11], #16 \n" "fmla v22.4s, v16.4s, %38.s[1] \n" "fmla v23.4s, v16.4s, %39.s[1] \n" "fmla v18.4s, v17.4s, %34.s[2] \n" "fmla v19.4s, v17.4s, %35.s[2] \n" "fmla v24.4s, v16.4s, %40.s[1] \n" "fmla v25.4s, v16.4s, %41.s[1] \n" "fmla v20.4s, v17.4s, %36.s[2] \n" "fmla v21.4s, v17.4s, %37.s[2] \n" "prfm pldl1keep, [%12, #128] \n" "ld1 {v16.4s}, [%12], #16 \n" "fmla v22.4s, v17.4s, %38.s[2] \n" "fmla v23.4s, v17.4s, %39.s[2] \n" "fmla v18.4s, v16.4s, %34.s[3] \n" "fmla v19.4s, v16.4s, %35.s[3] \n" "fmla v24.4s, v17.4s, %40.s[2] \n" "fmla v25.4s, v17.4s, %41.s[2] \n" "fmla v20.4s, v16.4s, %36.s[3] \n" "fmla v21.4s, v16.4s, %37.s[3] \n" "prfm pldl1keep, [%13, #128] \n" "ld1 {v17.4s}, [%13], #16 \n" "fmla v22.4s, v16.4s, %38.s[3] \n" "fmla v23.4s, v16.4s, %39.s[3] \n" "fmla v18.4s, v17.4s, %42.s[0] \n" "fmla v19.4s, v17.4s, %43.s[0] \n" "fmla v24.4s, v16.4s, %40.s[3] \n" "fmla v25.4s, v16.4s, %41.s[3] \n" "fmla v20.4s, v17.4s, %44.s[0] \n" "fmla v21.4s, v17.4s, %45.s[0] \n" "prfm pldl1keep, [%14, #128] \n" "ld1 {v16.4s}, [%14], #16 \n" "fmla v22.4s, v17.4s, %46.s[0] \n" "fmla v23.4s, v17.4s, %47.s[0] \n" "fmla v18.4s, v16.4s, %42.s[1] \n" "fmla v19.4s, v16.4s, %43.s[1] \n" "fmla v24.4s, v17.4s, %48.s[0] \n" "fmla v25.4s, v17.4s, %49.s[0] \n" "fmla v20.4s, v16.4s, %44.s[1] \n" "fmla v21.4s, v16.4s, %45.s[1] \n" "prfm pldl1keep, [%15, #128] \n" "ld1 {v17.4s}, [%15], #16 \n" "fmla v22.4s, v16.4s, %46.s[1] \n" "fmla v23.4s, v16.4s, %47.s[1] \n" "fmla v18.4s, v17.4s, %42.s[2] \n" "fmla v19.4s, v17.4s, %43.s[2] \n" "fmla v24.4s, v16.4s, %48.s[1] \n" "fmla v25.4s, v16.4s, %49.s[1] \n" "fmla v20.4s, v17.4s, %44.s[2] \n" "fmla v21.4s, v17.4s, %45.s[2] \n" "prfm pldl1keep, [%16, #128] \n" "ld1 {v16.4s}, [%16], #16 \n" "fmla v22.4s, v17.4s, %46.s[2] \n" "fmla v23.4s, v17.4s, %47.s[2] \n" "fmla v18.4s, v16.4s, %42.s[3] \n" "fmla v19.4s, v16.4s, %43.s[3] \n" "fmla v24.4s, v17.4s, %48.s[2] \n" "fmla v25.4s, v17.4s, %49.s[2] \n" "fmla v20.4s, v16.4s, %44.s[3] \n" "fmla v21.4s, v16.4s, %45.s[3] \n" "st1 {v18.4s}, [%1], #16 \n" "fmla v22.4s, v16.4s, %46.s[3] \n" "st1 {v19.4s}, [%2], #16 \n" "fmla v23.4s, v16.4s, %47.s[3] \n" "st1 {v20.4s}, [%3], #16 \n" "prfm pldl1keep, [%9, #128] \n" "ld1 {v17.4s}, [%9], #16 \n" "fmla v24.4s, v16.4s, %48.s[3] \n" "st1 {v21.4s}, [%4], #16 \n" "fmla v25.4s, v16.4s, %49.s[3] \n" "st1 {v22.4s}, [%5], #16 \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v18.4s}, [%1] \n" "st1 {v23.4s}, [%6], #16 \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v19.4s}, [%2] \n" "st1 {v24.4s}, [%7], #16 \n" "subs %w0, %w0, #1 \n" "st1 {v25.4s}, [%8], #16 \n" "bne 0b \n" "sub %9, %9, #16 \n" : "=r"(nn), // %0 "=r"(outptr0),// %1 "=r"(outptr1),// %2 "=r"(outptr2),// %3 "=r"(outptr3),// %4 "=r"(outptr4),// %5 "=r"(outptr5),// %6 "=r"(outptr6),// %7 "=r"(outptr7),// %8 "=r"(r0), // %9 "=r"(r1), // %10 "=r"(r2), // %11 "=r"(r3), // %12 "=r"(r4), // %13 "=r"(r5), // %14 "=r"(r6), // %15 "=r"(r7) // %16 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(outptr2), "4"(outptr3), "5"(outptr4), "6"(outptr5), "7"(outptr6), "8"(outptr7), "9"(r0), "10"(r1), "11"(r2), "12"(r3), "13"(r4), "14"(r5), "15"(r6), "16"(r7), "w"(_k0), // %34 "w"(_k1), // %35 "w"(_k2), // %36 "w"(_k3), // %37 "w"(_k4), // %38 "w"(_k5), // %39 "w"(_k6), // %40 "w"(_k7), // %41 "w"(_k0n), // %42 "w"(_k1n), // %43 "w"(_k2n), // %44 "w"(_k3n), // %45 "w"(_k4n), // %46 "w"(_k5n), // %47 "w"(_k6n), // %48 "w"(_k7n) // %49 : "cc", "memory", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25"//, "v26", "v27", "v28", "v29", "v30", "v31" ); } #else for (; nn>0; nn--) { float32x4_t _p = vld1q_f32(r0); float32x4_t _out0p = vld1q_f32(outptr0); float32x4_t _out1p = vld1q_f32(outptr1); float32x4_t _out2p = vld1q_f32(outptr2); float32x4_t _out3p = vld1q_f32(outptr3); float32x4_t _out4p = vld1q_f32(outptr4); float32x4_t _out5p = vld1q_f32(outptr5); float32x4_t _out6p = vld1q_f32(outptr6); float32x4_t _out7p = vld1q_f32(outptr7); _out0p = vfmaq_laneq_f32(_out0p, _p, _k0, 0); _out1p = vfmaq_laneq_f32(_out1p, _p, _k1, 0); _out2p = vfmaq_laneq_f32(_out2p, _p, _k2, 0); _out3p = vfmaq_laneq_f32(_out3p, _p, _k3, 0); _out4p = vfmaq_laneq_f32(_out4p, _p, _k4, 0); _out5p = vfmaq_laneq_f32(_out5p, _p, _k5, 0); _out6p = vfmaq_laneq_f32(_out6p, _p, _k6, 0); _out7p = vfmaq_laneq_f32(_out7p, _p, _k7, 0); float32x4_t _p1 = vld1q_f32(r1); _out0p = vfmaq_laneq_f32(_out0p, _p1, _k0, 1); _out1p = vfmaq_laneq_f32(_out1p, _p1, _k1, 1); _out2p = vfmaq_laneq_f32(_out2p, _p1, _k2, 1); _out3p = vfmaq_laneq_f32(_out3p, _p1, _k3, 1); _out4p = vfmaq_laneq_f32(_out4p, _p1, _k4, 1); _out5p = vfmaq_laneq_f32(_out5p, _p1, _k5, 1); _out6p = vfmaq_laneq_f32(_out6p, _p1, _k6, 1); _out7p = vfmaq_laneq_f32(_out7p, _p1, _k7, 1); float32x4_t _p2 = vld1q_f32(r2); _out0p = vfmaq_laneq_f32(_out0p, _p2, _k0, 2); _out1p = vfmaq_laneq_f32(_out1p, _p2, _k1, 2); _out2p = vfmaq_laneq_f32(_out2p, _p2, _k2, 2); _out3p = vfmaq_laneq_f32(_out3p, _p2, _k3, 2); _out4p = vfmaq_laneq_f32(_out4p, _p2, _k4, 2); _out5p = vfmaq_laneq_f32(_out5p, _p2, _k5, 2); _out6p = vfmaq_laneq_f32(_out6p, _p2, _k6, 2); _out7p = vfmaq_laneq_f32(_out7p, _p2, _k7, 2); float32x4_t _p3 = vld1q_f32(r3); _out0p = vfmaq_laneq_f32(_out0p, _p3, _k0, 3); _out1p = vfmaq_laneq_f32(_out1p, _p3, _k1, 3); _out2p = vfmaq_laneq_f32(_out2p, _p3, _k2, 3); _out3p = vfmaq_laneq_f32(_out3p, _p3, _k3, 3); _out4p = vfmaq_laneq_f32(_out4p, _p3, _k4, 3); _out5p = vfmaq_laneq_f32(_out5p, _p3, _k5, 3); _out6p = vfmaq_laneq_f32(_out6p, _p3, _k6, 3); _out7p = vfmaq_laneq_f32(_out7p, _p3, _k7, 3); float32x4_t _p4 = vld1q_f32(r4); _out0p = vfmaq_laneq_f32(_out0p, _p4, _k0n, 0); _out1p = vfmaq_laneq_f32(_out1p, _p4, _k1n, 0); _out2p = vfmaq_laneq_f32(_out2p, _p4, _k2n, 0); _out3p = vfmaq_laneq_f32(_out3p, _p4, _k3n, 0); _out4p = vfmaq_laneq_f32(_out4p, _p4, _k4n, 0); _out5p = vfmaq_laneq_f32(_out5p, _p4, _k5n, 0); _out6p = vfmaq_laneq_f32(_out6p, _p4, _k6n, 0); _out7p = vfmaq_laneq_f32(_out7p, _p4, _k7n, 0); float32x4_t _p5 = vld1q_f32(r5); _out0p = vfmaq_laneq_f32(_out0p, _p5, _k0n, 1); _out1p = vfmaq_laneq_f32(_out1p, _p5, _k1n, 1); _out2p = vfmaq_laneq_f32(_out2p, _p5, _k2n, 1); _out3p = vfmaq_laneq_f32(_out3p, _p5, _k3n, 1); _out4p = vfmaq_laneq_f32(_out4p, _p5, _k4n, 1); _out5p = vfmaq_laneq_f32(_out5p, _p5, _k5n, 1); _out6p = vfmaq_laneq_f32(_out6p, _p5, _k6n, 1); _out7p = vfmaq_laneq_f32(_out7p, _p5, _k7n, 1); float32x4_t _p6 = vld1q_f32(r6); _out0p = vfmaq_laneq_f32(_out0p, _p6, _k0n, 2); _out1p = vfmaq_laneq_f32(_out1p, _p6, _k1n, 2); _out2p = vfmaq_laneq_f32(_out2p, _p6, _k2n, 2); _out3p = vfmaq_laneq_f32(_out3p, _p6, _k3n, 2); _out4p = vfmaq_laneq_f32(_out4p, _p6, _k4n, 2); _out5p = vfmaq_laneq_f32(_out5p, _p6, _k5n, 2); _out6p = vfmaq_laneq_f32(_out6p, _p6, _k6n, 2); _out7p = vfmaq_laneq_f32(_out7p, _p6, _k7n, 2); float32x4_t _p7 = vld1q_f32(r7); _out0p = vfmaq_laneq_f32(_out0p, _p7, _k0n, 3); _out1p = vfmaq_laneq_f32(_out1p, _p7, _k1n, 3); _out2p = vfmaq_laneq_f32(_out2p, _p7, _k2n, 3); _out3p = vfmaq_laneq_f32(_out3p, _p7, _k3n, 3); _out4p = vfmaq_laneq_f32(_out4p, _p7, _k4n, 3); _out5p = vfmaq_laneq_f32(_out5p, _p7, _k5n, 3); _out6p = vfmaq_laneq_f32(_out6p, _p7, _k6n, 3); _out7p = vfmaq_laneq_f32(_out7p, _p7, _k7n, 3); vst1q_f32(outptr0, _out0p); vst1q_f32(outptr1, _out1p); vst1q_f32(outptr2, _out2p); vst1q_f32(outptr3, _out3p); vst1q_f32(outptr4, _out4p); vst1q_f32(outptr5, _out5p); vst1q_f32(outptr6, _out6p); vst1q_f32(outptr7, _out7p); r0 += 4; r1 += 4; r2 += 4; r3 += 4; r4 += 4; r5 += 4; r6 += 4; r7 += 4; outptr0 += 4; outptr1 += 4; outptr2 += 4; outptr3 += 4; outptr4 += 4; outptr5 += 4; outptr6 += 4; outptr7 += 4; } #endif for (; remain>0; remain--) { // TODO neon optimize float sum0 = *r0 * kernel0[0] + *r1 * kernel0[1] + *r2 * kernel0[2] + *r3 * kernel0[3] + *r4 * kernel0[4] + *r5 * kernel0[5] + *r6 * kernel0[6] + *r7 * kernel0[7]; float sum1 = *r0 * kernel1[0] + *r1 * kernel1[1] + *r2 * kernel1[2] + *r3 * kernel1[3] + *r4 * kernel1[4] + *r5 * kernel1[5] + *r6 * kernel1[6] + *r7 * kernel1[7]; float sum2 = *r0 * kernel2[0] + *r1 * kernel2[1] + *r2 * kernel2[2] + *r3 * kernel2[3] + *r4 * kernel2[4] + *r5 * kernel2[5] + *r6 * kernel2[6] + *r7 * kernel2[7]; float sum3 = *r0 * kernel3[0] + *r1 * kernel3[1] + *r2 * kernel3[2] + *r3 * kernel3[3] + *r4 * kernel3[4] + *r5 * kernel3[5] + *r6 * kernel3[6] + *r7 * kernel3[7]; float sum4 = *r0 * kernel4[0] + *r1 * kernel4[1] + *r2 * kernel4[2] + *r3 * kernel4[3] + *r4 * kernel4[4] + *r5 * kernel4[5] + *r6 * kernel4[6] + *r7 * kernel4[7]; float sum5 = *r0 * kernel5[0] + *r1 * kernel5[1] + *r2 * kernel5[2] + *r3 * kernel5[3] + *r4 * kernel5[4] + *r5 * kernel5[5] + *r6 * kernel5[6] + *r7 * kernel5[7]; float sum6 = *r0 * kernel6[0] + *r1 * kernel6[1] + *r2 * kernel6[2] + *r3 * kernel6[3] + *r4 * kernel6[4] + *r5 * kernel6[5] + *r6 * kernel6[6] + *r7 * kernel6[7]; float sum7 = *r0 * kernel7[0] + *r1 * kernel7[1] + *r2 * kernel7[2] + *r3 * kernel7[3] + *r4 * kernel7[4] + *r5 * kernel7[5] + *r6 * kernel7[6] + *r7 * kernel7[7]; *outptr0 += sum0; *outptr1 += sum1; *outptr2 += sum2; *outptr3 += sum3; *outptr4 += sum4; *outptr5 += sum5; *outptr6 += sum6; *outptr7 += sum7; r0++; r1++; r2++; r3++; r4++; r5++; r6++; r7++; outptr0++; outptr1++; outptr2++; outptr3++; outptr4++; outptr5++; outptr6++; outptr7++; } } for (; q<inch; q++) { float* outptr0 = out0; float* outptr1 = out1; float* outptr2 = out2; float* outptr3 = out3; float* outptr4 = out4; float* outptr5 = out5; float* outptr6 = out6; float* outptr7 = out7; const float* img0 = bottom_blob.channel(q); const float* kernel0 = kernel + p*inch + q; const float* kernel1 = kernel + (p+1)*inch + q; const float* kernel2 = kernel + (p+2)*inch + q; const float* kernel3 = kernel + (p+3)*inch + q; const float* kernel4 = kernel + (p+4)*inch + q; const float* kernel5 = kernel + (p+5)*inch + q; const float* kernel6 = kernel + (p+6)*inch + q; const float* kernel7 = kernel + (p+7)*inch + q; const float k0 = kernel0[0]; const float k1 = kernel1[0]; const float k2 = kernel2[0]; const float k3 = kernel3[0]; const float k4 = kernel4[0]; const float k5 = kernel5[0]; const float k6 = kernel6[0]; const float k7 = kernel7[0]; const float* r0 = img0; int size = outw * outh; int nn = size >> 2; int remain = size & 3; float32x4_t _k0 = vdupq_n_f32(k0); float32x4_t _k1 = vdupq_n_f32(k1); float32x4_t _k2 = vdupq_n_f32(k2); float32x4_t _k3 = vdupq_n_f32(k3); float32x4_t _k4 = vdupq_n_f32(k4); float32x4_t _k5 = vdupq_n_f32(k5); float32x4_t _k6 = vdupq_n_f32(k6); float32x4_t _k7 = vdupq_n_f32(k7); for (; nn>0; nn--) { float32x4_t _p = vld1q_f32(r0); float32x4_t _out0p = vld1q_f32(outptr0); float32x4_t _out1p = vld1q_f32(outptr1); float32x4_t _out2p = vld1q_f32(outptr2); float32x4_t _out3p = vld1q_f32(outptr3); float32x4_t _out4p = vld1q_f32(outptr4); float32x4_t _out5p = vld1q_f32(outptr5); float32x4_t _out6p = vld1q_f32(outptr6); float32x4_t _out7p = vld1q_f32(outptr7); _out0p = vfmaq_f32(_out0p, _p, _k0); _out1p = vfmaq_f32(_out1p, _p, _k1); _out2p = vfmaq_f32(_out2p, _p, _k2); _out3p = vfmaq_f32(_out3p, _p, _k3); _out4p = vfmaq_f32(_out4p, _p, _k4); _out5p = vfmaq_f32(_out5p, _p, _k5); _out6p = vfmaq_f32(_out6p, _p, _k6); _out7p = vfmaq_f32(_out7p, _p, _k7); vst1q_f32(outptr0, _out0p); vst1q_f32(outptr1, _out1p); vst1q_f32(outptr2, _out2p); vst1q_f32(outptr3, _out3p); vst1q_f32(outptr4, _out4p); vst1q_f32(outptr5, _out5p); vst1q_f32(outptr6, _out6p); vst1q_f32(outptr7, _out7p); r0 += 4; outptr0 += 4; outptr1 += 4; outptr2 += 4; outptr3 += 4; outptr4 += 4; outptr5 += 4; outptr6 += 4; outptr7 += 4; } for (; remain>0; remain--) { // TODO neon optimize float sum0 = *r0 * k0; float sum1 = *r0 * k1; float sum2 = *r0 * k2; float sum3 = *r0 * k3; float sum4 = *r0 * k4; float sum5 = *r0 * k5; float sum6 = *r0 * k6; float sum7 = *r0 * k7; *outptr0 += sum0; *outptr1 += sum1; *outptr2 += sum2; *outptr3 += sum3; *outptr4 += sum4; *outptr5 += sum5; *outptr6 += sum6; *outptr7 += sum7; r0++; outptr0++; outptr1++; outptr2++; outptr3++; outptr4++; outptr5++; outptr6++; outptr7++; } } } #else nn_outch = outch / 6; remain_outch_start = nn_outch * 6; #pragma omp parallel for num_threads(opt.num_threads) for (int pp=0; pp<nn_outch; pp++) { int p = pp * 6; Mat out0 = top_blob.channel(p); Mat out1 = top_blob.channel(p+1); Mat out2 = top_blob.channel(p+2); Mat out3 = top_blob.channel(p+3); Mat out4 = top_blob.channel(p+4); Mat out5 = top_blob.channel(p+5); const float bias0 = bias ? bias[p] : 0.f; const float bias1 = bias ? bias[p+1] : 0.f; const float bias2 = bias ? bias[p+2] : 0.f; const float bias3 = bias ? bias[p+3] : 0.f; const float bias4 = bias ? bias[p+4] : 0.f; const float bias5 = bias ? bias[p+5] : 0.f; out0.fill(bias0); out1.fill(bias1); out2.fill(bias2); out3.fill(bias3); out4.fill(bias4); out5.fill(bias5); int q = 0; for (; q+3<inch; q+=4) { float* outptr0 = out0; float* outptr1 = out1; float* outptr2 = out2; float* outptr3 = out3; float* outptr4 = out4; float* outptr5 = out5; const float* img0 = bottom_blob.channel(q); const float* img1 = bottom_blob.channel(q+1); const float* img2 = bottom_blob.channel(q+2); const float* img3 = bottom_blob.channel(q+3); const float* kernel0 = kernel + p*inch + q; const float* kernel1 = kernel + (p+1)*inch + q; const float* kernel2 = kernel + (p+2)*inch + q; const float* kernel3 = kernel + (p+3)*inch + q; const float* kernel4 = kernel + (p+4)*inch + q; const float* kernel5 = kernel + (p+5)*inch + q; const float* r0 = img0; const float* r1 = img1; const float* r2 = img2; const float* r3 = img3; int size = outw * outh; #if __ARM_NEON int nn = size >> 2; int remain = size & 3; #else int remain = size; #endif // __ARM_NEON #if __ARM_NEON float32x4_t _k0 = vld1q_f32(kernel0); float32x4_t _k1 = vld1q_f32(kernel1); float32x4_t _k2 = vld1q_f32(kernel2); float32x4_t _k3 = vld1q_f32(kernel3); float32x4_t _k4 = vld1q_f32(kernel4); float32x4_t _k5 = vld1q_f32(kernel5); if (nn > 0) { asm volatile( "pld [%7, #128] \n" "vld1.f32 {d24-d25}, [%7 :128]! \n"// q12 = r0 "pld [%1, #128] \n" "vld1.f32 {d12-d13}, [%1 :128] \n"// q6 = outptr0 "pld [%2, #128] \n" "vld1.f32 {d14-d15}, [%2 :128] \n"// q7 = outptr1 "vmla.f32 q6, q12, %e22[0] \n" "0: \n" "pld [%3, #128] \n" "vld1.f32 {d16-d17}, [%3 :128] \n"// q8 = outptr2 "vmla.f32 q7, q12, %e23[0] \n" "pld [%4, #128] \n" "vld1.f32 {d18-d19}, [%4 :128] \n"// q9 = outptr3 "vmla.f32 q8, q12, %e24[0] \n" "pld [%8, #128] \n" "vld1.f32 {d26-d27}, [%8 :128]! \n"// q13 = r1 "vmla.f32 q9, q12, %e25[0] \n" "pld [%5, #128] \n" "vld1.f32 {d20-d21}, [%5 :128] \n"// q10 = outptr4 "vmla.f32 q6, q13, %e22[1] \n" "vmla.f32 q7, q13, %e23[1] \n" "pld [%6, #128] \n" "vld1.f32 {d22-d23}, [%6 :128] \n"// q11 = outptr5 "vmla.f32 q10, q12, %e26[0] \n" "vmla.f32 q11, q12, %e27[0] \n" "vmla.f32 q8, q13, %e24[1] \n" "vmla.f32 q9, q13, %e25[1] \n" "pld [%9, #128] \n" "vld1.f32 {d28-d29}, [%9 :128]! \n"// q14 = r2 "vmla.f32 q10, q13, %e26[1] \n" "vmla.f32 q11, q13, %e27[1] \n" "vmla.f32 q6, q14, %f22[0] \n" "vmla.f32 q7, q14, %f23[0] \n" "vmla.f32 q8, q14, %f24[0] \n" "vmla.f32 q9, q14, %f25[0] \n" "pld [%10, #128] \n" "vld1.f32 {d30-d31}, [%10 :128]! \n"// q15 = r3 "vmla.f32 q10, q14, %f26[0] \n" "vmla.f32 q11, q14, %f27[0] \n" "vmla.f32 q6, q15, %f22[1] \n" "vmla.f32 q7, q15, %f23[1] \n" "vmla.f32 q8, q15, %f24[1] \n" "vmla.f32 q9, q15, %f25[1] \n" "pld [%7, #128] \n" "vld1.f32 {d24-d25}, [%7 :128]! \n"// q12 = r0 "vmla.f32 q10, q15, %f26[1] \n" "vmla.f32 q11, q15, %f27[1] \n" "vst1.f32 {d12-d13}, [%1 :128]! \n" "vst1.f32 {d14-d15}, [%2 :128]! \n" "pld [%1, #128] \n" "vld1.f32 {d12-d13}, [%1 :128] \n"// q6 = outptr0 "vst1.f32 {d16-d17}, [%3 :128]! \n" "vst1.f32 {d18-d19}, [%4 :128]! \n" "vmla.f32 q6, q12, %e22[0] \n" "pld [%2, #128] \n" "vld1.f32 {d14-d15}, [%2 :128] \n"// q7 = outptr1 "subs %0, #1 \n" "vst1.f32 {d20-d21}, [%5 :128]! \n" "vst1.f32 {d22-d23}, [%6 :128]! \n" "bne 0b \n" "sub %7, #16 \n" : "=r"(nn), // %0 "=r"(outptr0),// %1 "=r"(outptr1),// %2 "=r"(outptr2),// %3 "=r"(outptr3),// %4 "=r"(outptr4),// %5 "=r"(outptr5),// %6 "=r"(r0), // %7 "=r"(r1), // %8 "=r"(r2), // %9 "=r"(r3) // %10 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(outptr2), "4"(outptr3), "5"(outptr4), "6"(outptr5), "7"(r0), "8"(r1), "9"(r2), "10"(r3), "w"(_k0), // %22 "w"(_k1), // %23 "w"(_k2), // %24 "w"(_k3), // %25 "w"(_k4), // %26 "w"(_k5) // %27 : "cc", "memory", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); } #endif // __ARM_NEON for (; remain>0; remain--) { // TODO neon optimize float sum0 = *r0 * kernel0[0] + *r1 * kernel0[1] + *r2 * kernel0[2] + *r3 * kernel0[3]; float sum1 = *r0 * kernel1[0] + *r1 * kernel1[1] + *r2 * kernel1[2] + *r3 * kernel1[3]; float sum2 = *r0 * kernel2[0] + *r1 * kernel2[1] + *r2 * kernel2[2] + *r3 * kernel2[3]; float sum3 = *r0 * kernel3[0] + *r1 * kernel3[1] + *r2 * kernel3[2] + *r3 * kernel3[3]; float sum4 = *r0 * kernel4[0] + *r1 * kernel4[1] + *r2 * kernel4[2] + *r3 * kernel4[3]; float sum5 = *r0 * kernel5[0] + *r1 * kernel5[1] + *r2 * kernel5[2] + *r3 * kernel5[3]; *outptr0 += sum0; *outptr1 += sum1; *outptr2 += sum2; *outptr3 += sum3; *outptr4 += sum4; *outptr5 += sum5; r0++; r1++; r2++; r3++; outptr0++; outptr1++; outptr2++; outptr3++; outptr4++; outptr5++; } } for (; q<inch; q++) { float* outptr0 = out0; float* outptr1 = out1; float* outptr2 = out2; float* outptr3 = out3; float* outptr4 = out4; float* outptr5 = out5; const float* img0 = bottom_blob.channel(q); const float* kernel0 = kernel + p*inch + q; const float* kernel1 = kernel + (p+1)*inch + q; const float* kernel2 = kernel + (p+2)*inch + q; const float* kernel3 = kernel + (p+3)*inch + q; const float* kernel4 = kernel + (p+4)*inch + q; const float* kernel5 = kernel + (p+5)*inch + q; const float k0 = kernel0[0]; const float k1 = kernel1[0]; const float k2 = kernel2[0]; const float k3 = kernel3[0]; const float k4 = kernel4[0]; const float k5 = kernel5[0]; const float* r0 = img0; int size = outw * outh; #if __ARM_NEON int nn = size >> 2; int remain = size & 3; #else int remain = size; #endif // __ARM_NEON #if __ARM_NEON float32x4_t _k0 = vdupq_n_f32(k0); float32x4_t _k1 = vdupq_n_f32(k1); float32x4_t _k2 = vdupq_n_f32(k2); float32x4_t _k3 = vdupq_n_f32(k3); float32x4_t _k4 = vdupq_n_f32(k4); float32x4_t _k5 = vdupq_n_f32(k5); if (nn > 0) { asm volatile( "pld [%7, #128] \n" "vld1.f32 {d24-d25}, [%7 :128]! \n"// q12 = r0 "pld [%1, #128] \n" "vld1.f32 {d12-d13}, [%1 :128] \n"// q6 = outptr0 "0: \n" "pld [%2, #128] \n" "vld1.f32 {d14-d15}, [%2 :128] \n"// q7 = outptr1 "vmla.f32 q6, q12, %q16 \n" "pld [%3, #128] \n" "vld1.f32 {d16-d17}, [%3 :128] \n"// q8 = outptr2 "vmla.f32 q7, q12, %q17 \n" "pld [%4, #128] \n" "vld1.f32 {d18-d19}, [%4 :128] \n"// q9 = outptr3 "vmla.f32 q8, q12, %q18 \n" "pld [%5, #128] \n" "vld1.f32 {d20-d21}, [%5 :128] \n"// q10 = outptr4 "vmla.f32 q9, q12, %q19 \n" "pld [%6, #128] \n" "vld1.f32 {d22-d23}, [%6 :128] \n"// q11 = outptr5 "vmla.f32 q10, q12, %q20 \n" "vmla.f32 q11, q12, %q21 \n" "pld [%7, #128] \n" "vld1.f32 {d24-d25}, [%7 :128]! \n"// q12 = r0 "vst1.f32 {d12-d13}, [%1 :128]! \n" "vst1.f32 {d14-d15}, [%2 :128]! \n" "pld [%1, #128] \n" "vld1.f32 {d12-d13}, [%1 :128] \n"// q6 = outptr0 "vst1.f32 {d16-d17}, [%3 :128]! \n" "vst1.f32 {d18-d19}, [%4 :128]! \n" "subs %0, #1 \n" "vst1.f32 {d20-d21}, [%5 :128]! \n" "vst1.f32 {d22-d23}, [%6 :128]! \n" "bne 0b \n" "sub %7, #16 \n" : "=r"(nn), // %0 "=r"(outptr0),// %1 "=r"(outptr1),// %2 "=r"(outptr2),// %3 "=r"(outptr3),// %4 "=r"(outptr4),// %5 "=r"(outptr5),// %6 "=r"(r0) // %7 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(outptr2), "4"(outptr3), "5"(outptr4), "6"(outptr5), "7"(r0), "w"(_k0), // %16 "w"(_k1), // %17 "w"(_k2), // %18 "w"(_k3), // %19 "w"(_k4), // %20 "w"(_k5) // %21 : "cc", "memory", "q6", "q7", "q8", "q9", "q10", "q11", "q12" ); } #endif // __ARM_NEON for (; remain>0; remain--) { // TODO neon optimize float sum0 = *r0 * k0; float sum1 = *r0 * k1; float sum2 = *r0 * k2; float sum3 = *r0 * k3; float sum4 = *r0 * k4; float sum5 = *r0 * k5; *outptr0 += sum0; *outptr1 += sum1; *outptr2 += sum2; *outptr3 += sum3; *outptr4 += sum4; *outptr5 += sum5; r0++; outptr0++; outptr1++; outptr2++; outptr3++; outptr4++; outptr5++; } } } #endif // __ARM_NEON && __aarch64__ nn_outch = (outch - remain_outch_start) >> 2; #pragma omp parallel for num_threads(opt.num_threads) for (int pp=0; pp<nn_outch; pp++) { int p = remain_outch_start + pp * 4; Mat out0 = top_blob.channel(p); Mat out1 = top_blob.channel(p+1); Mat out2 = top_blob.channel(p+2); Mat out3 = top_blob.channel(p+3); const float bias0 = bias ? bias[p] : 0.f; const float bias1 = bias ? bias[p+1] : 0.f; const float bias2 = bias ? bias[p+2] : 0.f; const float bias3 = bias ? bias[p+3] : 0.f; out0.fill(bias0); out1.fill(bias1); out2.fill(bias2); out3.fill(bias3); int q = 0; for (; q+3<inch; q+=4) { float* outptr0 = out0; float* outptr1 = out1; float* outptr2 = out2; float* outptr3 = out3; const float* img0 = bottom_blob.channel(q); const float* img1 = bottom_blob.channel(q+1); const float* img2 = bottom_blob.channel(q+2); const float* img3 = bottom_blob.channel(q+3); const float* kernel0 = kernel + p*inch + q; const float* kernel1 = kernel + (p+1)*inch + q; const float* kernel2 = kernel + (p+2)*inch + q; const float* kernel3 = kernel + (p+3)*inch + q; const float* r0 = img0; const float* r1 = img1; const float* r2 = img2; const float* r3 = img3; int size = outw * outh; #if __ARM_NEON int nn = size >> 3; int remain = size & 7; #else int remain = size; #endif // __ARM_NEON #if __ARM_NEON float32x4_t _k0 = vld1q_f32(kernel0); float32x4_t _k1 = vld1q_f32(kernel1); float32x4_t _k2 = vld1q_f32(kernel2); float32x4_t _k3 = vld1q_f32(kernel3); #if __aarch64__ if (nn > 0) { asm volatile( "prfm pldl1keep, [%5, #256] \n" "ld1 {v6.4s, v7.4s}, [%5], #32 \n" "prfm pldl1keep, [%1, #256] \n" "ld1 {v8.4s, v9.4s}, [%1] \n" "0: \n" "fmla v8.4s, v6.4s, %18.s[0] \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v10.4s, v11.4s}, [%2] \n" "fmla v9.4s, v7.4s, %18.s[0] \n" "fmla v10.4s, v6.4s, %19.s[0] \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v12.4s, v13.4s}, [%3] \n" "fmla v11.4s, v7.4s, %19.s[0] \n" "fmla v12.4s, v6.4s, %20.s[0] \n" "prfm pldl1keep, [%4, #256] \n" "ld1 {v14.4s, v15.4s}, [%4] \n" "fmla v13.4s, v7.4s, %20.s[0] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v4.4s, v5.4s}, [%6], #32 \n" "fmla v14.4s, v6.4s, %21.s[0] \n" "fmla v15.4s, v7.4s, %21.s[0] \n" "fmla v8.4s, v4.4s, %18.s[1] \n" "fmla v9.4s, v5.4s, %18.s[1] \n" "fmla v10.4s, v4.4s, %19.s[1] \n" "fmla v11.4s, v5.4s, %19.s[1] \n" "fmla v12.4s, v4.4s, %20.s[1] \n" "fmla v13.4s, v5.4s, %20.s[1] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v6.4s, v7.4s}, [%7], #32 \n" "fmla v14.4s, v4.4s, %21.s[1] \n" "fmla v15.4s, v5.4s, %21.s[1] \n" "fmla v8.4s, v6.4s, %18.s[2] \n" "fmla v9.4s, v7.4s, %18.s[2] \n" "fmla v10.4s, v6.4s, %19.s[2] \n" "fmla v11.4s, v7.4s, %19.s[2] \n" "fmla v12.4s, v6.4s, %20.s[2] \n" "fmla v13.4s, v7.4s, %20.s[2] \n" "prfm pldl1keep, [%8, #256] \n" "ld1 {v4.4s, v5.4s}, [%8], #32 \n" "fmla v14.4s, v6.4s, %21.s[2] \n" "fmla v15.4s, v7.4s, %21.s[2] \n" "fmla v8.4s, v4.4s, %18.s[3] \n" "fmla v9.4s, v5.4s, %18.s[3] \n" "fmla v10.4s, v4.4s, %19.s[3] \n" "fmla v11.4s, v5.4s, %19.s[3] \n" "st1 {v8.4s, v9.4s}, [%1], #32 \n" "fmla v12.4s, v4.4s, %20.s[3] \n" "fmla v13.4s, v5.4s, %20.s[3] \n" "st1 {v10.4s, v11.4s}, [%2], #32 \n" "prfm pldl1keep, [%5, #256] \n" "ld1 {v6.4s, v7.4s}, [%5], #32 \n" "fmla v14.4s, v4.4s, %21.s[3] \n" "fmla v15.4s, v5.4s, %21.s[3] \n" "st1 {v12.4s, v13.4s}, [%3], #32 \n" "prfm pldl1keep, [%1, #256] \n" "ld1 {v8.4s, v9.4s}, [%1] \n" "subs %w0, %w0, #1 \n" "st1 {v14.4s, v15.4s}, [%4], #32 \n" "bne 0b \n" "sub %5, %5, #32 \n" : "=r"(nn), // %0 "=r"(outptr0),// %1 "=r"(outptr1),// %2 "=r"(outptr2),// %3 "=r"(outptr3),// %4 "=r"(r0), // %5 "=r"(r1), // %6 "=r"(r2), // %7 "=r"(r3) // %8 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(outptr2), "4"(outptr3), "5"(r0), "6"(r1), "7"(r2), "8"(r3), "w"(_k0), // %18 "w"(_k1), // %19 "w"(_k2), // %20 "w"(_k3) // %21 : "cc", "memory", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15" ); } #else if (nn > 0) { asm volatile( "pld [%5, #256] \n" "vld1.f32 {d12-d15}, [%5 :128]! \n" "pld [%1, #256] \n" "vld1.f32 {d16-d19}, [%1 :128] \n" "0: \n" "vmla.f32 q8, q6, %e18[0] \n" "pld [%2, #256] \n" "vld1.f32 {d20-d23}, [%2 :128] \n" "vmla.f32 q9, q7, %e18[0] \n" "vmla.f32 q10, q6, %e19[0] \n" "pld [%3, #256] \n" "vld1.f32 {d24-d27}, [%3 :128] \n" "vmla.f32 q11, q7, %e19[0] \n" "vmla.f32 q12, q6, %e20[0] \n" "pld [%4, #256] \n" "vld1.f32 {d28-d31}, [%4 :128] \n" "vmla.f32 q13, q7, %e20[0] \n" "pld [%6, #256] \n" "vld1.f32 {d8-d11}, [%6 :128]! \n" "vmla.f32 q14, q6, %e21[0] \n" "vmla.f32 q15, q7, %e21[0] \n" "vmla.f32 q8, q4, %e18[1] \n" "vmla.f32 q9, q5, %e18[1] \n" "vmla.f32 q10, q4, %e19[1] \n" "vmla.f32 q11, q5, %e19[1] \n" "vmla.f32 q12, q4, %e20[1] \n" "vmla.f32 q13, q5, %e20[1] \n" "pld [%7, #256] \n" "vld1.f32 {d12-d15}, [%7 :128]! \n" "vmla.f32 q14, q4, %e21[1] \n" "vmla.f32 q15, q5, %e21[1] \n" "vmla.f32 q8, q6, %f18[0] \n" "vmla.f32 q9, q7, %f18[0] \n" "vmla.f32 q10, q6, %f19[0] \n" "vmla.f32 q11, q7, %f19[0] \n" "vmla.f32 q12, q6, %f20[0] \n" "vmla.f32 q13, q7, %f20[0] \n" "pld [%8, #256] \n" "vld1.f32 {d8-d11}, [%8 :128]! \n" "vmla.f32 q14, q6, %f21[0] \n" "vmla.f32 q15, q7, %f21[0] \n" "vmla.f32 q8, q4, %f18[1] \n" "vmla.f32 q9, q5, %f18[1] \n" "vmla.f32 q10, q4, %f19[1] \n" "vmla.f32 q11, q5, %f19[1] \n" "vmla.f32 q12, q4, %f20[1] \n" "vst1.f32 {d16-d19}, [%1 :128]! \n" "vmla.f32 q13, q5, %f20[1] \n" "vst1.f32 {d20-d23}, [%2 :128]! \n" "vmla.f32 q14, q4, %f21[1] \n" "pld [%5, #256] \n" "vld1.f32 {d12-d15}, [%5 :128]! \n" "vmla.f32 q15, q5, %f21[1] \n" "vst1.f32 {d24-d27}, [%3 :128]! \n" "pld [%1, #256] \n" "vld1.f32 {d16-d19}, [%1 :128] \n" "subs %0, #1 \n" "vst1.f32 {d28-d31}, [%4 :128]! \n" "bne 0b \n" "sub %5, #32 \n" : "=r"(nn), // %0 "=r"(outptr0),// %1 "=r"(outptr1),// %2 "=r"(outptr2),// %3 "=r"(outptr3),// %4 "=r"(r0), // %5 "=r"(r1), // %6 "=r"(r2), // %7 "=r"(r3) // %8 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(outptr2), "4"(outptr3), "5"(r0), "6"(r1), "7"(r2), "8"(r3), "w"(_k0), // %18 "w"(_k1), // %19 "w"(_k2), // %20 "w"(_k3) // %21 : "cc", "memory", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { // TODO neon optimize float sum0 = *r0 * kernel0[0] + *r1 * kernel0[1] + *r2 * kernel0[2] + *r3 * kernel0[3]; float sum1 = *r0 * kernel1[0] + *r1 * kernel1[1] + *r2 * kernel1[2] + *r3 * kernel1[3]; float sum2 = *r0 * kernel2[0] + *r1 * kernel2[1] + *r2 * kernel2[2] + *r3 * kernel2[3]; float sum3 = *r0 * kernel3[0] + *r1 * kernel3[1] + *r2 * kernel3[2] + *r3 * kernel3[3]; *outptr0 += sum0; *outptr1 += sum1; *outptr2 += sum2; *outptr3 += sum3; r0++; r1++; r2++; r3++; outptr0++; outptr1++; outptr2++; outptr3++; } } for (; q<inch; q++) { float* outptr0 = out0; float* outptr1 = out1; float* outptr2 = out2; float* outptr3 = out3; const float* img0 = bottom_blob.channel(q); const float* kernel0 = kernel + p*inch + q; const float* kernel1 = kernel + (p+1)*inch + q; const float* kernel2 = kernel + (p+2)*inch + q; const float* kernel3 = kernel + (p+3)*inch + q; const float k0 = kernel0[0]; const float k1 = kernel1[0]; const float k2 = kernel2[0]; const float k3 = kernel3[0]; const float* r0 = img0; int size = outw * outh; #if __ARM_NEON int nn = size >> 3; int remain = size & 7; #else int remain = size; #endif // __ARM_NEON #if __ARM_NEON float32x4_t _k0 = vdupq_n_f32(k0); float32x4_t _k1 = vdupq_n_f32(k1); float32x4_t _k2 = vdupq_n_f32(k2); float32x4_t _k3 = vdupq_n_f32(k3); #if __aarch64__ if (nn > 0) { asm volatile( "prfm pldl1keep, [%5, #256] \n" "ld1 {v6.4s, v7.4s}, [%5], #32 \n" "0: \n" "prfm pldl1keep, [%1, #256] \n" "ld1 {v8.4s, v9.4s}, [%1] \n" "fmla v8.4s, v6.4s, %12.4s \n" "fmla v9.4s, v7.4s, %12.4s \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v10.4s, v11.4s}, [%2] \n" "fmla v10.4s, v6.4s, %13.4s \n" "fmla v11.4s, v7.4s, %13.4s \n" "st1 {v8.4s, v9.4s}, [%1], #32 \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v12.4s, v13.4s}, [%3] \n" "fmla v12.4s, v6.4s, %14.4s \n" "fmla v13.4s, v7.4s, %14.4s \n" "st1 {v10.4s, v11.4s}, [%2], #32 \n" "prfm pldl1keep, [%4, #256] \n" "ld1 {v14.4s, v15.4s}, [%4] \n" "fmla v14.4s, v6.4s, %15.4s \n" "fmla v15.4s, v7.4s, %15.4s \n" "st1 {v12.4s, v13.4s}, [%3], #32 \n" "prfm pldl1keep, [%5, #256] \n" "ld1 {v6.4s, v7.4s}, [%5], #32 \n" "subs %w0, %w0, #1 \n" "st1 {v14.4s, v15.4s}, [%4], #32 \n" "bne 0b \n" "sub %5, %5, #32 \n" : "=r"(nn), // %0 "=r"(outptr0),// %1 "=r"(outptr1),// %2 "=r"(outptr2),// %3 "=r"(outptr3),// %4 "=r"(r0) // %5 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(outptr2), "4"(outptr3), "5"(r0), "w"(_k0), // %12 "w"(_k1), // %13 "w"(_k2), // %14 "w"(_k3) // %15 : "cc", "memory", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15" ); } #else if (nn > 0) { asm volatile( "pld [%5, #256] \n" "vld1.f32 {d12-d15}, [%5 :128]! \n" "0: \n" "pld [%1, #256] \n" "vld1.f32 {d16-d19}, [%1 :128] \n" "vmla.f32 q8, q6, %q12 \n" "vmla.f32 q9, q7, %q12 \n" "pld [%2, #256] \n" "vld1.f32 {d20-d23}, [%2 :128] \n" "vmla.f32 q10, q6, %q13 \n" "vmla.f32 q11, q7, %q13 \n" "vst1.f32 {d16-d19}, [%1 :128]! \n" "pld [%3, #256] \n" "vld1.f32 {d24-d27}, [%3 :128] \n" "vmla.f32 q12, q6, %q14 \n" "vmla.f32 q13, q7, %q14 \n" "vst1.f32 {d20-d23}, [%2 :128]! \n" "pld [%4, #256] \n" "vld1.f32 {d28-d31}, [%4 :128] \n" "vmla.f32 q14, q6, %q15 \n" "vmla.f32 q15, q7, %q15 \n" "vst1.f32 {d24-d27}, [%3 :128]! \n" "pld [%5, #256] \n" "vld1.f32 {d12-d15}, [%5 :128]! \n" "subs %0, #1 \n" "vst1.f32 {d28-d31}, [%4 :128]! \n" "bne 0b \n" "sub %5, #32 \n" : "=r"(nn), // %0 "=r"(outptr0),// %1 "=r"(outptr1),// %2 "=r"(outptr2),// %3 "=r"(outptr3),// %4 "=r"(r0) // %5 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(outptr2), "4"(outptr3), "5"(r0), "w"(_k0), // %12 "w"(_k1), // %13 "w"(_k2), // %14 "w"(_k3) // %15 : "cc", "memory", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { // TODO neon optimize float sum0 = *r0 * k0; float sum1 = *r0 * k1; float sum2 = *r0 * k2; float sum3 = *r0 * k3; *outptr0 += sum0; *outptr1 += sum1; *outptr2 += sum2; *outptr3 += sum3; r0++; outptr0++; outptr1++; outptr2++; outptr3++; } } } remain_outch_start += nn_outch << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int p=remain_outch_start; p<outch; p++) { Mat out = top_blob.channel(p); const float bias0 = bias ? bias[p] : 0.f; out.fill(bias0); int q = 0; for (; q+3<inch; q+=4) { float* outptr = out; const float* img0 = bottom_blob.channel(q); const float* img1 = bottom_blob.channel(q+1); const float* img2 = bottom_blob.channel(q+2); const float* img3 = bottom_blob.channel(q+3); const float* kernel0 = kernel + p*inch + q; const float k0 = kernel0[0]; const float k1 = kernel0[1]; const float k2 = kernel0[2]; const float k3 = kernel0[3]; const float* r0 = img0; const float* r1 = img1; const float* r2 = img2; const float* r3 = img3; int size = outw * outh; #if __ARM_NEON int nn = size >> 3; int remain = size & 7; #else int remain = size; #endif // __ARM_NEON #if __ARM_NEON float32x4_t _k0 = vdupq_n_f32(k0); float32x4_t _k1 = vdupq_n_f32(k1); float32x4_t _k2 = vdupq_n_f32(k2); float32x4_t _k3 = vdupq_n_f32(k3); #if __aarch64__ if (nn > 0) { asm volatile( "prfm pldl1keep, [%2, #256] \n" "ld1 {v2.4s, v3.4s}, [%2], #32 \n" "0: \n" "prfm pldl1keep, [%1, #256] \n" "ld1 {v0.4s, v1.4s}, [%1] \n" "fmla v0.4s, v2.4s, %12.4s \n" "fmla v1.4s, v3.4s, %12.4s \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v2.4s, v3.4s}, [%3], #32 \n" "fmla v0.4s, v2.4s, %13.4s \n" "fmla v1.4s, v3.4s, %13.4s \n" "prfm pldl1keep, [%4, #256] \n" "ld1 {v2.4s, v3.4s}, [%4], #32 \n" "fmla v0.4s, v2.4s, %14.4s \n" "fmla v1.4s, v3.4s, %14.4s \n" "prfm pldl1keep, [%5, #256] \n" "ld1 {v2.4s, v3.4s}, [%5], #32 \n" "fmla v0.4s, v2.4s, %15.4s \n" "fmla v1.4s, v3.4s, %15.4s \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v2.4s, v3.4s}, [%2], #32 \n" "subs %w0, %w0, #1 \n" "st1 {v0.4s, v1.4s}, [%1], #32 \n" "bne 0b \n" "sub %2, %2, #32 \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(r3) // %5 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "5"(r3), "w"(_k0), // %12 "w"(_k1), // %13 "w"(_k2), // %14 "w"(_k3) // %15 : "cc", "memory", "v0", "v1", "v2", "v3" ); } #else if (nn > 0) { asm volatile( "pld [%2, #256] \n" "vld1.f32 {d4-d7}, [%2 :128]! \n" "0: \n" "pld [%1, #256] \n" "vld1.f32 {d0-d3}, [%1 :128] \n" "vmla.f32 q0, q2, %q12 \n" "vmla.f32 q1, q3, %q12 \n" "pld [%3, #256] \n" "vld1.f32 {d4-d7}, [%3 :128]! \n" "vmla.f32 q0, q2, %q13 \n" "vmla.f32 q1, q3, %q13 \n" "pld [%4, #256] \n" "vld1.f32 {d4-d7}, [%4 :128]! \n" "vmla.f32 q0, q2, %q14 \n" "vmla.f32 q1, q3, %q14 \n" "pld [%5, #256] \n" "vld1.f32 {d4-d7}, [%5 :128]! \n" "vmla.f32 q0, q2, %q15 \n" "vmla.f32 q1, q3, %q15 \n" "pld [%2, #256] \n" "vld1.f32 {d4-d7}, [%2 :128]! \n" "subs %0, #1 \n" "vst1.f32 {d0-d3}, [%1 :128]! \n" "bne 0b \n" "sub %2, #32 \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(r3) // %5 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "5"(r3), "w"(_k0), // %12 "w"(_k1), // %13 "w"(_k2), // %14 "w"(_k3) // %15 : "cc", "memory", "q0", "q1", "q2", "q3" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { float sum = *r0 * k0; float sum1 = *r1 * k1; float sum2 = *r2 * k2; float sum3 = *r3 * k3; *outptr += sum + sum1 + sum2 + sum3; r0++; r1++; r2++; r3++; outptr++; } } for (; q<inch; q++) { float* outptr = out; const float* img0 = bottom_blob.channel(q); const float* kernel0 = kernel + p*inch + q; const float k0 = kernel0[0]; const float* r0 = img0; int size = outw * outh; #if __ARM_NEON int nn = size >> 3; int remain = size & 7; #else int remain = size; #endif // __ARM_NEON #if __ARM_NEON float32x4_t _k0 = vdupq_n_f32(k0); #if __aarch64__ if (nn > 0) { asm volatile( "prfm pldl1keep, [%2, #256] \n" "ld1 {v2.4s, v3.4s}, [%2], #32 \n" "0: \n" "prfm pldl1keep, [%1, #256] \n" "ld1 {v0.4s, v1.4s}, [%1] \n" "fmla v0.4s, v2.4s, %6.4s \n" "fmla v1.4s, v3.4s, %6.4s \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v2.4s, v3.4s}, [%2], #32 \n" "subs %w0, %w0, #1 \n" "st1 {v0.4s, v1.4s}, [%1], #32 \n" "bne 0b \n" "sub %2, %2, #32 \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0) // %2 : "0"(nn), "1"(outptr), "2"(r0), "w"(_k0) // %6 : "cc", "memory", "v0", "v1", "v2", "v3" ); } #else if (nn > 0) { asm volatile( "pld [%2, #256] \n" "vld1.f32 {d4-d7}, [%2 :128]! \n" "0: \n" "pld [%1, #256] \n" "vld1.f32 {d0-d3}, [%1 :128] \n" "vmla.f32 q0, q2, %q6 \n" "vmla.f32 q1, q3, %q6 \n" "pld [%2, #256] \n" "vld1.f32 {d4-d7}, [%2 :128]! \n" "subs %0, #1 \n" "vst1.f32 {d0-d3}, [%1 :128]! \n" "bne 0b \n" "sub %2, #32 \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0) // %2 : "0"(nn), "1"(outptr), "2"(r0), "w"(_k0) // %6 : "cc", "memory", "q0", "q1", "q2", "q3" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { float sum = *r0 * k0; *outptr += sum; r0++; outptr++; } } } //////////////////BN RELU/////////////////////////// { int size = top_blob.w * top_blob.h; const float *a_data_ptr = a_data; const float *b_data_ptr = b_data; #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < top_blob.c; q++) { { float *ptr = top_blob.channel(q); float a = a_data_ptr[q]; float b = b_data_ptr[q]; #if __ARM_NEON int nn = size >> 2; int remain = size - (nn << 2); #else int remain = size; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ if (nn > 0) { asm volatile( "dup v1.4s, %w4 \n" "dup v2.4s, %w5 \n" "0: \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v0.4s}, [%1] \n" "orr v3.16b, v1.16b, v1.16b \n" "fmla v3.4s, v0.4s, v2.4s \n" "subs %w0, %w0, #1 \n" "st1 {v3.4s}, [%1], #16 \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(ptr) // %1 : "0"(nn), "1"(ptr), "r"(a), // %4 "r"(b) // %5 : "cc", "memory", "v0", "v1", "v2", "v3" ); } #else if (nn > 0) { asm volatile( "vdup.f32 q1, %4 \n" "vdup.f32 q2, %5 \n" "0: \n" "pld [%1, #128] \n" "vld1.f32 {d0-d1}, [%1 :128] \n" "vorr.32 q3, q1, q1 \n" "vmla.f32 q3, q0, q2 \n" "subs %0, #1 \n" "vst1.f32 {d6-d7}, [%1 :128]! \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(ptr) // %1 : "0"(nn), "1"(ptr), "r"(a), // %4 "r"(b) // %5 : "cc", "memory", "q0", "q1", "q2", "q3" ); } #endif // __aarch64__ #endif // __ARM_NEON ptr = top_blob.channel(q); #if __ARM_NEON nn = size >> 2; remain = size - (nn << 2); #else remain = size; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ float32x4_t _zero = vdupq_n_f32(0.f); for (; nn>0; nn--) { float32x4_t _p = vld1q_f32(ptr); _p = vmaxq_f32(_p, _zero); vst1q_f32(ptr, _p); ptr += 4; } #else if (nn > 0) { asm volatile( "veor q1, q0, q0 \n" "0: \n" "pld [%1, #128] \n" "vld1.f32 {d0-d1}, [%1 :128] \n" "vmax.f32 q0, q0, q1 \n" "subs %0, #1 \n" "vst1.f32 {d0-d1}, [%1 :128]! \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(ptr) // %1 : "0"(nn), "1"(ptr) : "cc", "memory", "q0", "q1" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain > 0; remain--) { *ptr = b * *ptr + a; *ptr = std::max(*ptr, 0.f); ptr++; } } } } } static void convbnrelu1x1s2_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt, const Mat& a_data, const Mat& b_data) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int tailstep = w - 2*outw + w; const float* kernel = _kernel; const float* bias = _bias; int nn_outch = outch >> 2; int remain_outch_start = nn_outch << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int pp=0; pp<nn_outch; pp++) { int p = pp * 4; Mat out0 = top_blob.channel(p); Mat out1 = top_blob.channel(p+1); Mat out2 = top_blob.channel(p+2); Mat out3 = top_blob.channel(p+3); const float bias0 = bias ? bias[p] : 0.f; const float bias1 = bias ? bias[p+1] : 0.f; const float bias2 = bias ? bias[p+2] : 0.f; const float bias3 = bias ? bias[p+3] : 0.f; out0.fill(bias0); out1.fill(bias1); out2.fill(bias2); out3.fill(bias3); int q = 0; for (; q+3<inch; q+=4) { float* outptr0 = out0; float* outptr1 = out1; float* outptr2 = out2; float* outptr3 = out3; const float* img0 = bottom_blob.channel(q); const float* img1 = bottom_blob.channel(q+1); const float* img2 = bottom_blob.channel(q+2); const float* img3 = bottom_blob.channel(q+3); const float* kernel0 = kernel + p*inch + q; const float* kernel1 = kernel + (p+1)*inch + q; const float* kernel2 = kernel + (p+2)*inch + q; const float* kernel3 = kernel + (p+3)*inch + q; const float* r0 = img0; const float* r1 = img1; const float* r2 = img2; const float* r3 = img3; for (int i = 0; i < outh; i++) { int size = outw; #if __ARM_NEON int nn = size >> 3; int remain = size & 7; #else int remain = size; #endif // __ARM_NEON #if __ARM_NEON float32x4_t _k0 = vld1q_f32(kernel0); float32x4_t _k1 = vld1q_f32(kernel1); float32x4_t _k2 = vld1q_f32(kernel2); float32x4_t _k3 = vld1q_f32(kernel3); #if __aarch64__ if (nn > 0) { asm volatile( "0: \n" "prfm pldl1keep, [%5, #512] \n" "ld2 {v4.4s, v5.4s}, [%5], #32 \n" "ld2 {v6.4s, v7.4s}, [%5], #32 \n" "and v5.16b, v6.16b, v6.16b \n"// v4 v5 "prfm pldl1keep, [%1, #256] \n" "ld1 {v8.4s, v9.4s}, [%1] \n" "fmla v8.4s, v4.4s, %18.s[0] \n" "fmla v9.4s, v5.4s, %18.s[0] \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v10.4s, v11.4s}, [%2] \n" "fmla v10.4s, v4.4s, %19.s[0] \n" "fmla v11.4s, v5.4s, %19.s[0] \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v12.4s, v13.4s}, [%3] \n" "fmla v12.4s, v4.4s, %20.s[0] \n" "fmla v13.4s, v5.4s, %20.s[0] \n" "prfm pldl1keep, [%4, #256] \n" "ld1 {v14.4s, v15.4s}, [%4] \n" "prfm pldl1keep, [%6, #512] \n" "ld2 {v6.4s, v7.4s}, [%6], #32 \n" "fmla v14.4s, v4.4s, %21.s[0] \n" "fmla v15.4s, v5.4s, %21.s[0] \n" "ld2 {v4.4s, v5.4s}, [%6], #32 \n" "and v7.16b, v4.16b, v4.16b \n"// v6 v7 "fmla v8.4s, v6.4s, %18.s[1] \n" "fmla v9.4s, v7.4s, %18.s[1] \n" "fmla v10.4s, v6.4s, %19.s[1] \n" "fmla v11.4s, v7.4s, %19.s[1] \n" "fmla v12.4s, v6.4s, %20.s[1] \n" "fmla v13.4s, v7.4s, %20.s[1] \n" "prfm pldl1keep, [%7, #512] \n" "ld2 {v4.4s, v5.4s}, [%7], #32 \n" "fmla v14.4s, v6.4s, %21.s[1] \n" "fmla v15.4s, v7.4s, %21.s[1] \n" "ld2 {v6.4s, v7.4s}, [%7], #32 \n" "and v5.16b, v6.16b, v6.16b \n"// v4 v5 "fmla v8.4s, v4.4s, %18.s[2] \n" "fmla v9.4s, v5.4s, %18.s[2] \n" "fmla v10.4s, v4.4s, %19.s[2] \n" "fmla v11.4s, v5.4s, %19.s[2] \n" "fmla v12.4s, v4.4s, %20.s[2] \n" "fmla v13.4s, v5.4s, %20.s[2] \n" "prfm pldl1keep, [%8, #512] \n" "ld2 {v6.4s, v7.4s}, [%8], #32 \n" "fmla v14.4s, v4.4s, %21.s[2] \n" "fmla v15.4s, v5.4s, %21.s[2] \n" "ld2 {v4.4s, v5.4s}, [%8], #32 \n" "and v7.16b, v4.16b, v4.16b \n"// v6 v7 "fmla v8.4s, v6.4s, %18.s[3] \n" "fmla v9.4s, v7.4s, %18.s[3] \n" "fmla v10.4s, v6.4s, %19.s[3] \n" "fmla v11.4s, v7.4s, %19.s[3] \n" "st1 {v8.4s, v9.4s}, [%1], #32 \n" "fmla v12.4s, v6.4s, %20.s[3] \n" "fmla v13.4s, v7.4s, %20.s[3] \n" "st1 {v10.4s, v11.4s}, [%2], #32 \n" "fmla v14.4s, v6.4s, %21.s[3] \n" "fmla v15.4s, v7.4s, %21.s[3] \n" "st1 {v12.4s, v13.4s}, [%3], #32 \n" "subs %w0, %w0, #1 \n" "st1 {v14.4s, v15.4s}, [%4], #32 \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr0),// %1 "=r"(outptr1),// %2 "=r"(outptr2),// %3 "=r"(outptr3),// %4 "=r"(r0), // %5 "=r"(r1), // %6 "=r"(r2), // %7 "=r"(r3) // %8 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(outptr2), "4"(outptr3), "5"(r0), "6"(r1), "7"(r2), "8"(r3), "w"(_k0), // %18 "w"(_k1), // %19 "w"(_k2), // %20 "w"(_k3) // %21 : "cc", "memory", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15" ); } #else if (nn > 0) { asm volatile( "0: \n" "pld [%5, #512] \n" "vld2.f32 {d8-d11}, [%5]! \n" "vld2.f32 {d12-d15}, [%5]! \n" "vand q5, q6, q6 \n"// q4 q5 "pld [%1, #256] \n" "vld1.f32 {d16-d19}, [%1] \n" "vmla.f32 q8, q4, %e18[0] \n" "vmla.f32 q9, q5, %e18[0] \n" "pld [%2, #256] \n" "vld1.f32 {d20-d23}, [%2] \n" "vmla.f32 q10, q4, %e19[0] \n" "vmla.f32 q11, q5, %e19[0] \n" "pld [%3, #256] \n" "vld1.f32 {d24-d27}, [%3] \n" "vmla.f32 q12, q4, %e20[0] \n" "vmla.f32 q13, q5, %e20[0] \n" "pld [%4, #256] \n" "vld1.f32 {d28-d31}, [%4] \n" "pld [%6, #512] \n" "vld2.f32 {d12-d15}, [%6]! \n" "vmla.f32 q14, q4, %e21[0] \n" "vmla.f32 q15, q5, %e21[0] \n" "vld2.f32 {d8-d11}, [%6]! \n" "vand q7, q4, q4 \n"// q6 q7 "vmla.f32 q8, q6, %e18[1] \n" "vmla.f32 q9, q7, %e18[1] \n" "vmla.f32 q10, q6, %e19[1] \n" "vmla.f32 q11, q7, %e19[1] \n" "vmla.f32 q12, q6, %e20[1] \n" "vmla.f32 q13, q7, %e20[1] \n" "pld [%7, #512] \n" "vld2.f32 {d8-d11}, [%7]! \n" "vmla.f32 q14, q6, %e21[1] \n" "vmla.f32 q15, q7, %e21[1] \n" "vld2.f32 {d12-d15}, [%7]! \n" "vand q5, q6, q6 \n"// q4 q5 "vmla.f32 q8, q4, %f18[0] \n" "vmla.f32 q9, q5, %f18[0] \n" "vmla.f32 q10, q4, %f19[0] \n" "vmla.f32 q11, q5, %f19[0] \n" "vmla.f32 q12, q4, %f20[0] \n" "vmla.f32 q13, q5, %f20[0] \n" "pld [%8, #512] \n" "vld2.f32 {d12-d15}, [%8]! \n" "vmla.f32 q14, q4, %f21[0] \n" "vmla.f32 q15, q5, %f21[0] \n" "vld2.f32 {d8-d11}, [%8]! \n" "vand q7, q4, q4 \n"// q6 q7 "vmla.f32 q8, q6, %f18[1] \n" "vmla.f32 q9, q7, %f18[1] \n" "vmla.f32 q10, q6, %f19[1] \n" "vmla.f32 q11, q7, %f19[1] \n" "vst1.f32 {d16-d19}, [%1]! \n" "vmla.f32 q12, q6, %f20[1] \n" "vmla.f32 q13, q7, %f20[1] \n" "vst1.f32 {d20-d23}, [%2]! \n" "vmla.f32 q14, q6, %f21[1] \n" "vmla.f32 q15, q7, %f21[1] \n" "vst1.f32 {d24-d27}, [%3]! \n" "subs %0, #1 \n" "vst1.f32 {d28-d31}, [%4]! \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr0),// %1 "=r"(outptr1),// %2 "=r"(outptr2),// %3 "=r"(outptr3),// %4 "=r"(r0), // %5 "=r"(r1), // %6 "=r"(r2), // %7 "=r"(r3) // %8 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(outptr2), "4"(outptr3), "5"(r0), "6"(r1), "7"(r2), "8"(r3), "w"(_k0), // %18 "w"(_k1), // %19 "w"(_k2), // %20 "w"(_k3) // %21 : "cc", "memory", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { // TODO neon optimize float sum0 = *r0 * kernel0[0] + *r1 * kernel0[1] + *r2 * kernel0[2] + *r3 * kernel0[3]; float sum1 = *r0 * kernel1[0] + *r1 * kernel1[1] + *r2 * kernel1[2] + *r3 * kernel1[3]; float sum2 = *r0 * kernel2[0] + *r1 * kernel2[1] + *r2 * kernel2[2] + *r3 * kernel2[3]; float sum3 = *r0 * kernel3[0] + *r1 * kernel3[1] + *r2 * kernel3[2] + *r3 * kernel3[3]; *outptr0 += sum0; *outptr1 += sum1; *outptr2 += sum2; *outptr3 += sum3; r0 += 2; r1 += 2; r2 += 2; r3 += 2; outptr0++; outptr1++; outptr2++; outptr3++; } r0 += tailstep; r1 += tailstep; r2 += tailstep; r3 += tailstep; } } for (; q<inch; q++) { float* outptr0 = out0; float* outptr1 = out1; float* outptr2 = out2; float* outptr3 = out3; const float* img0 = bottom_blob.channel(q); const float* kernel0 = kernel + p*inch + q; const float* kernel1 = kernel + (p+1)*inch + q; const float* kernel2 = kernel + (p+2)*inch + q; const float* kernel3 = kernel + (p+3)*inch + q; const float k0 = kernel0[0]; const float k1 = kernel1[0]; const float k2 = kernel2[0]; const float k3 = kernel3[0]; const float* r0 = img0; for (int i = 0; i < outh; i++) { int size = outw; #if __ARM_NEON int nn = size >> 3; int remain = size & 7; #else int remain = size; #endif // __ARM_NEON #if __ARM_NEON float32x4_t _k0 = vdupq_n_f32(k0); float32x4_t _k1 = vdupq_n_f32(k1); float32x4_t _k2 = vdupq_n_f32(k2); float32x4_t _k3 = vdupq_n_f32(k3); #if __aarch64__ if (nn > 0) { asm volatile( "0: \n" "prfm pldl1keep, [%5, #512] \n" "ld2 {v4.4s, v5.4s}, [%5], #32 \n" "ld2 {v6.4s, v7.4s}, [%5], #32 \n" "and v5.16b, v6.16b, v6.16b \n" "prfm pldl1keep, [%1, #256] \n" "ld1 {v8.4s, v9.4s}, [%1] \n" "fmla v8.4s, v4.4s, %12.4s \n" "fmla v9.4s, v5.4s, %12.4s \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v10.4s, v11.4s}, [%2] \n" "fmla v10.4s, v4.4s, %13.4s \n" "fmla v11.4s, v5.4s, %13.4s \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v12.4s, v13.4s}, [%3] \n" "st1 {v8.4s, v9.4s}, [%1], #32 \n" "fmla v12.4s, v4.4s, %14.4s \n" "fmla v13.4s, v5.4s, %14.4s \n" "prfm pldl1keep, [%4, #256] \n" "ld1 {v14.4s, v15.4s}, [%4] \n" "st1 {v10.4s, v11.4s}, [%2], #32 \n" "fmla v14.4s, v4.4s, %15.4s \n" "fmla v15.4s, v5.4s, %15.4s \n" "st1 {v12.4s, v13.4s}, [%3], #32 \n" "subs %w0, %w0, #1 \n" "st1 {v14.4s, v15.4s}, [%4], #32 \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr0),// %1 "=r"(outptr1),// %2 "=r"(outptr2),// %3 "=r"(outptr3),// %4 "=r"(r0) // %5 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(outptr2), "4"(outptr3), "5"(r0), "w"(_k0), // %12 "w"(_k1), // %13 "w"(_k2), // %14 "w"(_k3) // %15 : "cc", "memory", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15" ); } #else if (nn > 0) { asm volatile( "0: \n" "pld [%5, #512] \n" "vld2.f32 {d8-d11}, [%5]! \n" "vld2.f32 {d12-d15}, [%5]! \n" "vand q5, q6, q6 \n"// q4 q5 "pld [%1, #256] \n" "vld1.f32 {d16-d19}, [%1] \n" "vmla.f32 q8, q4, %q12 \n" "vmla.f32 q9, q5, %q12 \n" "pld [%2, #256] \n" "vld1.f32 {d20-d23}, [%2] \n" "vmla.f32 q10, q4, %q13 \n" "vmla.f32 q11, q5, %q13 \n" "pld [%3, #256] \n" "vld1.f32 {d24-d27}, [%3] \n" "vst1.f32 {d16-d19}, [%1]! \n" "vmla.f32 q12, q4, %q14 \n" "vmla.f32 q13, q5, %q14 \n" "pld [%4, #256] \n" "vld1.f32 {d28-d31}, [%4] \n" "vst1.f32 {d20-d23}, [%2]! \n" "vmla.f32 q14, q4, %q15 \n" "vmla.f32 q15, q5, %q15 \n" "vst1.f32 {d24-d27}, [%3]! \n" "subs %0, #1 \n" "vst1.f32 {d28-d31}, [%4]! \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr0),// %1 "=r"(outptr1),// %2 "=r"(outptr2),// %3 "=r"(outptr3),// %4 "=r"(r0) // %5 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(outptr2), "4"(outptr3), "5"(r0), "w"(_k0), // %12 "w"(_k1), // %13 "w"(_k2), // %14 "w"(_k3) // %15 : "cc", "memory", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { // TODO neon optimize float sum0 = *r0 * k0; float sum1 = *r0 * k1; float sum2 = *r0 * k2; float sum3 = *r0 * k3; *outptr0 += sum0; *outptr1 += sum1; *outptr2 += sum2; *outptr3 += sum3; r0 += 2; outptr0++; outptr1++; outptr2++; outptr3++; } r0 += tailstep; } } } #pragma omp parallel for num_threads(opt.num_threads) for (int p=remain_outch_start; p<outch; p++) { Mat out = top_blob.channel(p); const float bias0 = bias ? bias[p] : 0.f; out.fill(bias0); int q = 0; for (; q+3<inch; q+=4) { float* outptr = out; const float* img0 = bottom_blob.channel(q); const float* img1 = bottom_blob.channel(q+1); const float* img2 = bottom_blob.channel(q+2); const float* img3 = bottom_blob.channel(q+3); const float* kernel0 = kernel + p*inch + q; const float k0 = kernel0[0]; const float k1 = kernel0[1]; const float k2 = kernel0[2]; const float k3 = kernel0[3]; const float* r0 = img0; const float* r1 = img1; const float* r2 = img2; const float* r3 = img3; for (int i = 0; i < outh; i++) { #if __ARM_NEON int nn = outw >> 3; int remain = outw & 7; #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON float32x4_t _k0 = vdupq_n_f32(k0); float32x4_t _k1 = vdupq_n_f32(k1); float32x4_t _k2 = vdupq_n_f32(k2); float32x4_t _k3 = vdupq_n_f32(k3); #if __aarch64__ if (nn > 0) { asm volatile( "prfm pldl1keep, [%2, #512] \n" "ld2 {v2.4s, v3.4s}, [%2], #32 \n" "ld2 {v8.4s, v9.4s}, [%2], #32 \n" "0: \n" "prfm pldl1keep, [%1, #256] \n" "ld1 {v0.4s, v1.4s}, [%1] \n" "fmla v0.4s, v2.4s, %12.4s \n" "fmla v1.4s, v8.4s, %12.4s \n" "prfm pldl1keep, [%3, #512] \n" "ld2 {v2.4s, v3.4s}, [%3], #32 \n" "ld2 {v8.4s, v9.4s}, [%3], #32 \n" "fmla v0.4s, v2.4s, %13.4s \n" "fmla v1.4s, v8.4s, %13.4s \n" "prfm pldl1keep, [%4, #512] \n" "ld2 {v2.4s, v3.4s}, [%4], #32 \n" "ld2 {v8.4s, v9.4s}, [%4], #32 \n" "fmla v0.4s, v2.4s, %14.4s \n" "fmla v1.4s, v8.4s, %14.4s \n" "prfm pldl1keep, [%5, #512] \n" "ld2 {v2.4s, v3.4s}, [%5], #32 \n" "ld2 {v8.4s, v9.4s}, [%5], #32 \n" "fmla v0.4s, v2.4s, %15.4s \n" "fmla v1.4s, v8.4s, %15.4s \n" "prfm pldl1keep, [%2, #512] \n" "ld2 {v2.4s, v3.4s}, [%2], #32 \n" "ld2 {v8.4s, v9.4s}, [%2], #32 \n" "subs %w0, %w0, #1 \n" "st1 {v0.4s, v1.4s}, [%1], #32 \n" "bne 0b \n" "sub %2, %2, #64 \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(r3) // %5 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "5"(r3), "w"(_k0), // %12 "w"(_k1), // %13 "w"(_k2), // %14 "w"(_k3) // %15 : "cc", "memory", "v0", "v1", "v2", "v3", "v8", "v9" ); } #else if (nn > 0) { asm volatile( "pld [%2, #512] \n" "vld2.f32 {d4-d7}, [%2]! \n" "vld2.f32 {d16-d19}, [%2]! \n" "0: \n" "pld [%1, #256] \n" "vld1.f32 {d0-d3}, [%1] \n" "vmla.f32 q0, q2, %q12 \n" "vmla.f32 q1, q8, %q12 \n" "pld [%3, #512] \n" "vld2.f32 {d4-d7}, [%3]! \n" "vld2.f32 {d16-d19}, [%3]! \n" "vmla.f32 q0, q2, %q13 \n" "vmla.f32 q1, q8, %q13 \n" "pld [%4, #512] \n" "vld2.f32 {d4-d7}, [%4]! \n" "vld2.f32 {d16-d19}, [%4]! \n" "vmla.f32 q0, q2, %q14 \n" "vmla.f32 q1, q8, %q14 \n" "pld [%5, #512] \n" "vld2.f32 {d4-d7}, [%5]! \n" "vld2.f32 {d16-d19}, [%5]! \n" "vmla.f32 q0, q2, %q15 \n" "vmla.f32 q1, q8, %q15 \n" "pld [%2, #512] \n" "vld2.f32 {d4-d7}, [%2]! \n" "vld2.f32 {d16-d19}, [%2]! \n" "subs %0, #1 \n" "vst1.f32 {d0-d3}, [%1]! \n" "bne 0b \n" "sub %2, #64 \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(r3) // %5 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "5"(r3), "w"(_k0), // %12 "w"(_k1), // %13 "w"(_k2), // %14 "w"(_k3) // %15 : "cc", "memory", "q0", "q1", "q2", "q3", "q8", "q9" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { float sum = *r0 * k0; float sum1 = *r1 * k1; float sum2 = *r2 * k2; float sum3 = *r3 * k3; *outptr += sum + sum1 + sum2 + sum3; r0 += 2; r1 += 2; r2 += 2; r3 += 2; outptr++; } r0 += tailstep; r1 += tailstep; r2 += tailstep; r3 += tailstep; } } for (; q<inch; q++) { float* outptr = out; const float* img0 = bottom_blob.channel(q); const float* kernel0 = kernel + p*inch + q; const float k0 = kernel0[0]; const float* r0 = img0; for (int i = 0; i < outh; i++) { #if __ARM_NEON int nn = outw >> 3; int remain = outw & 7; #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON float32x4_t _k0 = vdupq_n_f32(k0); #if __aarch64__ if (nn > 0) { asm volatile( "prfm pldl1keep, [%2, #512] \n" "ld2 {v2.4s, v3.4s}, [%2], #32 \n" "ld2 {v8.4s, v9.4s}, [%2], #32 \n" "0: \n" "prfm pldl1keep, [%1, #256] \n" "ld1 {v0.4s, v1.4s}, [%1] \n" "fmla v0.4s, v2.4s, %6.4s \n" "fmla v1.4s, v8.4s, %6.4s \n" "prfm pldl1keep, [%2, #512] \n" "ld2 {v2.4s, v3.4s}, [%2], #32 \n" "ld2 {v8.4s, v9.4s}, [%2], #32 \n" "subs %w0, %w0, #1 \n" "st1 {v0.4s, v1.4s}, [%1], #32 \n" "bne 0b \n" "sub %2, %2, #64 \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0) // %2 : "0"(nn), "1"(outptr), "2"(r0), "w"(_k0) // %6 : "cc", "memory", "v0", "v1", "v2", "v3", "v8", "v9" ); } #else if (nn > 0) { asm volatile( "pld [%2, #512] \n" "vld2.f32 {d4-d7}, [%2]! \n" "vld2.f32 {d16-d19}, [%2]! \n" "0: \n" "pld [%1, #256] \n" "vld1.f32 {d0-d3}, [%1] \n" "vmla.f32 q0, q2, %q6 \n" "vmla.f32 q1, q8, %q6 \n" "pld [%2, #512] \n" "vld2.f32 {d4-d7}, [%2]! \n" "vld2.f32 {d16-d19}, [%2]! \n" "subs %0, #1 \n" "vst1.f32 {d0-d3}, [%1]! \n" "bne 0b \n" "sub %2, #64 \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0) // %2 : "0"(nn), "1"(outptr), "2"(r0), "w"(_k0) // %6 : "cc", "memory", "q0", "q1", "q2", "q3", "q8", "q9" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { float sum = *r0 * k0; *outptr += sum; r0 += 2; outptr++; } r0 += tailstep; } } } //////////////////BN RELU/////////////////////////// { int size = top_blob.w * top_blob.h; const float *a_data_ptr = a_data; const float *b_data_ptr = b_data; #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < top_blob.c; q++) { { float *ptr = top_blob.channel(q); float a = a_data_ptr[q]; float b = b_data_ptr[q]; #if __ARM_NEON int nn = size >> 2; int remain = size - (nn << 2); #else int remain = size; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ if (nn > 0) { asm volatile( "dup v1.4s, %w4 \n" "dup v2.4s, %w5 \n" "0: \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v0.4s}, [%1] \n" "orr v3.16b, v1.16b, v1.16b \n" "fmla v3.4s, v0.4s, v2.4s \n" "subs %w0, %w0, #1 \n" "st1 {v3.4s}, [%1], #16 \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(ptr) // %1 : "0"(nn), "1"(ptr), "r"(a), // %4 "r"(b) // %5 : "cc", "memory", "v0", "v1", "v2", "v3" ); } #else if (nn > 0) { asm volatile( "vdup.f32 q1, %4 \n" "vdup.f32 q2, %5 \n" "0: \n" "pld [%1, #128] \n" "vld1.f32 {d0-d1}, [%1 :128] \n" "vorr.32 q3, q1, q1 \n" "vmla.f32 q3, q0, q2 \n" "subs %0, #1 \n" "vst1.f32 {d6-d7}, [%1 :128]! \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(ptr) // %1 : "0"(nn), "1"(ptr), "r"(a), // %4 "r"(b) // %5 : "cc", "memory", "q0", "q1", "q2", "q3" ); } #endif // __aarch64__ #endif // __ARM_NEON ptr = top_blob.channel(q); #if __ARM_NEON nn = size >> 2; remain = size - (nn << 2); #else remain = size; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ float32x4_t _zero = vdupq_n_f32(0.f); for (; nn>0; nn--) { float32x4_t _p = vld1q_f32(ptr); _p = vmaxq_f32(_p, _zero); vst1q_f32(ptr, _p); ptr += 4; } #else if (nn > 0) { asm volatile( "veor q1, q0, q0 \n" "0: \n" "pld [%1, #128] \n" "vld1.f32 {d0-d1}, [%1 :128] \n" "vmax.f32 q0, q0, q1 \n" "subs %0, #1 \n" "vst1.f32 {d0-d1}, [%1 :128]! \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(ptr) // %1 : "0"(nn), "1"(ptr) : "cc", "memory", "q0", "q1" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain > 0; remain--) { *ptr = b * *ptr + a; *ptr = std::max(*ptr, 0.f); ptr++; } } } } }
sieve1-taskloop.c
#include <math.h> #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include <string.h> #ifdef _OPENMP #include <omp.h> #endif double getusec_() { struct timeval time; gettimeofday(&time, NULL); return ((double)time.tv_sec * (double)1e6 + (double)time.tv_usec); } #define START_COUNT_TIME stamp = getusec_(); #define STOP_COUNT_TIME stamp = getusec_() - stamp;\ stamp = stamp/1e6; // simple serial sieve of Eratosthenes int eratosthenes(int lastNumber) { int found = 0; int sqrt_lN = sqrt(lastNumber); // 1. create a list of natural numbers 2, 3, 4, ... all of them initially marked as potential primes char * isPrime = (char *) malloc((lastNumber+1) * sizeof(char)); int * counters; counters = (int *) malloc(omp_get_max_threads() * sizeof(int)); for(int k = 0; k < omp_get_max_threads(); ++k) counters[k] = 0; #pragma omp parallel { #pragma omp single { #pragma omp taskloop num_tasks(100) for (int i = 0; i <= lastNumber; i++) isPrime[i] = 1; // 2. Starting from i=2, the first unmarked number on the list ... for (int i = 2; i <= sqrt_lN; i++) //for (int i = 2; i*i <= lastNumber; i++) // ... find the smallest number greater or equal than i that is unmarked if (isPrime[i]) // 3. Mark all multiples of i between i^2 and lastNumber #pragma omp taskloop num_tasks(100) for (int j = i*i; j <= lastNumber; j += i) isPrime[j] = 0; // 4. The unmarked numbers are primes, count primes //#pragma omp taskloop num_tasks(100) for (int i = 2; i <= lastNumber; i++) { int task = omp_get_thread_num(); found += isPrime[i]; } } //#pragma omp atomic //found += counters[omp_get_thread_num()]; } // 5. We are done with the isPrime array, free it free(isPrime); free(counters); return found; } void usage(void) { #ifdef _OPENMP printf("sieve <range> <thread count>\n"); printf(" <range> is an integer N - the range is from 2 - N\n"); printf(" <thread count> is the number of threads to use\n"); #else printf("sieve <range>\n"); printf(" <range> is an integer N - the range is from 2 - N\n"); #endif } int main(int argc, char ** argv) { // argv[1]: Upper-bound on primes // argv[2]: Number of threads to run in parallel #ifdef _OPENMP if (argc != 3) { #else if (argc != 2) { #endif printf("Error: Invalid number of arguments\n"); usage(); return 0; } int range_max = atoi(argv[1]); printf("Range = %d\n", range_max); #ifdef _OPENMP int num_threads = atoi(argv[2]); #endif if (range_max < 2) { printf("Error: <range> Must be an integer greater than or equal to 2\n"); usage(); return 0; } #ifdef _OPENMP if (num_threads < 1) { printf("Error: <thread count> Must be a positive value between 1 an %d\n", omp_get_max_threads()); usage(); return 0; } else if (num_threads > omp_get_max_threads()) { num_threads = omp_get_max_threads(); } omp_set_num_threads(num_threads); // Make sure we haven't created too many threads. int temp = (range_max - 1) / num_threads; if ((1 + temp) < (int)sqrt((double)range_max)) { printf("Error: Too many threads requested!\n"); printf(" The first thread must have a block size >= sqrt(n)\n"); exit(1); } #endif double stamp; // Solutions count START_COUNT_TIME; int count = eratosthenes(range_max); STOP_COUNT_TIME; // Print the results. printf("Number of primes smaller than or equal to %d = %d\n", range_max, count); printf ("%0.6f\n", stamp); return 0; }
tree-vect-loop.c
/* Loop Vectorization Copyright (C) 2003-2017 Free Software Foundation, Inc. Contributed by Dorit Naishlos <dorit@il.ibm.com> and Ira Rosen <irar@il.ibm.com> This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING3. If not see <http://www.gnu.org/licenses/>. */ #include "config.h" #include "system.h" #include "coretypes.h" #include "backend.h" #include "target.h" #include "rtl.h" #include "tree.h" #include "gimple.h" #include "cfghooks.h" #include "tree-pass.h" #include "ssa.h" #include "optabs-tree.h" #include "diagnostic-core.h" #include "fold-const.h" #include "stor-layout.h" #include "cfganal.h" #include "gimplify.h" #include "gimple-iterator.h" #include "gimplify-me.h" #include "tree-ssa-loop-ivopts.h" #include "tree-ssa-loop-manip.h" #include "tree-ssa-loop-niter.h" #include "tree-ssa-loop.h" #include "cfgloop.h" #include "params.h" #include "tree-scalar-evolution.h" #include "tree-vectorizer.h" #include "gimple-fold.h" #include "cgraph.h" #include "tree-cfg.h" #include "tree-if-conv.h" #include "tree-eh.h" /* Loop Vectorization Pass. This pass tries to vectorize loops. For example, the vectorizer transforms the following simple loop: short a[N]; short b[N]; short c[N]; int i; for (i=0; i<N; i++){ a[i] = b[i] + c[i]; } as if it was manually vectorized by rewriting the source code into: typedef int __attribute__((mode(V8HI))) v8hi; short a[N]; short b[N]; short c[N]; int i; v8hi *pa = (v8hi*)a, *pb = (v8hi*)b, *pc = (v8hi*)c; v8hi va, vb, vc; for (i=0; i<N/8; i++){ vb = pb[i]; vc = pc[i]; va = vb + vc; pa[i] = va; } The main entry to this pass is vectorize_loops(), in which the vectorizer applies a set of analyses on a given set of loops, followed by the actual vectorization transformation for the loops that had successfully passed the analysis phase. Throughout this pass we make a distinction between two types of data: scalars (which are represented by SSA_NAMES), and memory references ("data-refs"). These two types of data require different handling both during analysis and transformation. The types of data-refs that the vectorizer currently supports are ARRAY_REFS which base is an array DECL (not a pointer), and INDIRECT_REFS through pointers; both array and pointer accesses are required to have a simple (consecutive) access pattern. Analysis phase: =============== The driver for the analysis phase is vect_analyze_loop(). It applies a set of analyses, some of which rely on the scalar evolution analyzer (scev) developed by Sebastian Pop. During the analysis phase the vectorizer records some information per stmt in a "stmt_vec_info" struct which is attached to each stmt in the loop, as well as general information about the loop as a whole, which is recorded in a "loop_vec_info" struct attached to each loop. Transformation phase: ===================== The loop transformation phase scans all the stmts in the loop, and creates a vector stmt (or a sequence of stmts) for each scalar stmt S in the loop that needs to be vectorized. It inserts the vector code sequence just before the scalar stmt S, and records a pointer to the vector code in STMT_VINFO_VEC_STMT (stmt_info) (stmt_info is the stmt_vec_info struct attached to S). This pointer will be used for the vectorization of following stmts which use the def of stmt S. Stmt S is removed if it writes to memory; otherwise, we rely on dead code elimination for removing it. For example, say stmt S1 was vectorized into stmt VS1: VS1: vb = px[i]; S1: b = x[i]; STMT_VINFO_VEC_STMT (stmt_info (S1)) = VS1 S2: a = b; To vectorize stmt S2, the vectorizer first finds the stmt that defines the operand 'b' (S1), and gets the relevant vector def 'vb' from the vector stmt VS1 pointed to by STMT_VINFO_VEC_STMT (stmt_info (S1)). The resulting sequence would be: VS1: vb = px[i]; S1: b = x[i]; STMT_VINFO_VEC_STMT (stmt_info (S1)) = VS1 VS2: va = vb; S2: a = b; STMT_VINFO_VEC_STMT (stmt_info (S2)) = VS2 Operands that are not SSA_NAMEs, are data-refs that appear in load/store operations (like 'x[i]' in S1), and are handled differently. Target modeling: ================= Currently the only target specific information that is used is the size of the vector (in bytes) - "TARGET_VECTORIZE_UNITS_PER_SIMD_WORD". Targets that can support different sizes of vectors, for now will need to specify one value for "TARGET_VECTORIZE_UNITS_PER_SIMD_WORD". More flexibility will be added in the future. Since we only vectorize operations which vector form can be expressed using existing tree codes, to verify that an operation is supported, the vectorizer checks the relevant optab at the relevant machine_mode (e.g, optab_handler (add_optab, V8HImode)). If the value found is CODE_FOR_nothing, then there's no target support, and we can't vectorize the stmt. For additional information on this project see: http://gcc.gnu.org/projects/tree-ssa/vectorization.html */ static void vect_estimate_min_profitable_iters (loop_vec_info, int *, int *); /* Function vect_determine_vectorization_factor Determine the vectorization factor (VF). VF is the number of data elements that are operated upon in parallel in a single iteration of the vectorized loop. For example, when vectorizing a loop that operates on 4byte elements, on a target with vector size (VS) 16byte, the VF is set to 4, since 4 elements can fit in a single vector register. We currently support vectorization of loops in which all types operated upon are of the same size. Therefore this function currently sets VF according to the size of the types operated upon, and fails if there are multiple sizes in the loop. VF is also the factor by which the loop iterations are strip-mined, e.g.: original loop: for (i=0; i<N; i++){ a[i] = b[i] + c[i]; } vectorized loop: for (i=0; i<N; i+=VF){ a[i:VF] = b[i:VF] + c[i:VF]; } */ static bool vect_determine_vectorization_factor (loop_vec_info loop_vinfo) { struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo); unsigned nbbs = loop->num_nodes; unsigned int vectorization_factor = 0; tree scalar_type = NULL_TREE; gphi *phi; tree vectype; unsigned int nunits; stmt_vec_info stmt_info; unsigned i; HOST_WIDE_INT dummy; gimple *stmt, *pattern_stmt = NULL; gimple_seq pattern_def_seq = NULL; gimple_stmt_iterator pattern_def_si = gsi_none (); bool analyze_pattern_stmt = false; bool bool_result; auto_vec<stmt_vec_info> mask_producers; if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "=== vect_determine_vectorization_factor ===\n"); for (i = 0; i < nbbs; i++) { basic_block bb = bbs[i]; for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si)) { phi = si.phi (); stmt_info = vinfo_for_stmt (phi); if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "==> examining phi: "); dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0); } gcc_assert (stmt_info); if (STMT_VINFO_RELEVANT_P (stmt_info) || STMT_VINFO_LIVE_P (stmt_info)) { gcc_assert (!STMT_VINFO_VECTYPE (stmt_info)); scalar_type = TREE_TYPE (PHI_RESULT (phi)); if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "get vectype for scalar type: "); dump_generic_expr (MSG_NOTE, TDF_SLIM, scalar_type); dump_printf (MSG_NOTE, "\n"); } vectype = get_vectype_for_scalar_type (scalar_type); if (!vectype) { if (dump_enabled_p ()) { dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: unsupported " "data-type "); dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, scalar_type); dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); } return false; } STMT_VINFO_VECTYPE (stmt_info) = vectype; if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "vectype: "); dump_generic_expr (MSG_NOTE, TDF_SLIM, vectype); dump_printf (MSG_NOTE, "\n"); } nunits = TYPE_VECTOR_SUBPARTS (vectype); if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "nunits = %d\n", nunits); if (!vectorization_factor || (nunits > vectorization_factor)) vectorization_factor = nunits; } } for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si) || analyze_pattern_stmt;) { tree vf_vectype; if (analyze_pattern_stmt) stmt = pattern_stmt; else stmt = gsi_stmt (si); stmt_info = vinfo_for_stmt (stmt); if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "==> examining statement: "); dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0); } gcc_assert (stmt_info); /* Skip stmts which do not need to be vectorized. */ if ((!STMT_VINFO_RELEVANT_P (stmt_info) && !STMT_VINFO_LIVE_P (stmt_info)) || gimple_clobber_p (stmt)) { if (STMT_VINFO_IN_PATTERN_P (stmt_info) && (pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info)) && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt)) || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt)))) { stmt = pattern_stmt; stmt_info = vinfo_for_stmt (pattern_stmt); if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "==> examining pattern statement: "); dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0); } } else { if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "skip.\n"); gsi_next (&si); continue; } } else if (STMT_VINFO_IN_PATTERN_P (stmt_info) && (pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info)) && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt)) || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt)))) analyze_pattern_stmt = true; /* If a pattern statement has def stmts, analyze them too. */ if (is_pattern_stmt_p (stmt_info)) { if (pattern_def_seq == NULL) { pattern_def_seq = STMT_VINFO_PATTERN_DEF_SEQ (stmt_info); pattern_def_si = gsi_start (pattern_def_seq); } else if (!gsi_end_p (pattern_def_si)) gsi_next (&pattern_def_si); if (pattern_def_seq != NULL) { gimple *pattern_def_stmt = NULL; stmt_vec_info pattern_def_stmt_info = NULL; while (!gsi_end_p (pattern_def_si)) { pattern_def_stmt = gsi_stmt (pattern_def_si); pattern_def_stmt_info = vinfo_for_stmt (pattern_def_stmt); if (STMT_VINFO_RELEVANT_P (pattern_def_stmt_info) || STMT_VINFO_LIVE_P (pattern_def_stmt_info)) break; gsi_next (&pattern_def_si); } if (!gsi_end_p (pattern_def_si)) { if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "==> examining pattern def stmt: "); dump_gimple_stmt (MSG_NOTE, TDF_SLIM, pattern_def_stmt, 0); } stmt = pattern_def_stmt; stmt_info = pattern_def_stmt_info; } else { pattern_def_si = gsi_none (); analyze_pattern_stmt = false; } } else analyze_pattern_stmt = false; } if (gimple_get_lhs (stmt) == NULL_TREE /* MASK_STORE has no lhs, but is ok. */ && (!is_gimple_call (stmt) || !gimple_call_internal_p (stmt) || gimple_call_internal_fn (stmt) != IFN_MASK_STORE)) { if (is_gimple_call (stmt)) { /* Ignore calls with no lhs. These must be calls to #pragma omp simd functions, and what vectorization factor it really needs can't be determined until vectorizable_simd_clone_call. */ if (!analyze_pattern_stmt && gsi_end_p (pattern_def_si)) { pattern_def_seq = NULL; gsi_next (&si); } continue; } if (dump_enabled_p ()) { dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: irregular stmt."); dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0); } return false; } if (VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt)))) { if (dump_enabled_p ()) { dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: vector stmt in loop:"); dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0); } return false; } bool_result = false; if (STMT_VINFO_VECTYPE (stmt_info)) { /* The only case when a vectype had been already set is for stmts that contain a dataref, or for "pattern-stmts" (stmts generated by the vectorizer to represent/replace a certain idiom). */ gcc_assert (STMT_VINFO_DATA_REF (stmt_info) || is_pattern_stmt_p (stmt_info) || !gsi_end_p (pattern_def_si)); vectype = STMT_VINFO_VECTYPE (stmt_info); } else { gcc_assert (!STMT_VINFO_DATA_REF (stmt_info)); if (gimple_call_internal_p (stmt, IFN_MASK_STORE)) scalar_type = TREE_TYPE (gimple_call_arg (stmt, 3)); else scalar_type = TREE_TYPE (gimple_get_lhs (stmt)); /* Bool ops don't participate in vectorization factor computation. For comparison use compared types to compute a factor. */ if (VECT_SCALAR_BOOLEAN_TYPE_P (scalar_type) && is_gimple_assign (stmt) && gimple_assign_rhs_code (stmt) != COND_EXPR) { if (STMT_VINFO_RELEVANT_P (stmt_info) || STMT_VINFO_LIVE_P (stmt_info)) mask_producers.safe_push (stmt_info); bool_result = true; if (TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) == tcc_comparison && !VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (gimple_assign_rhs1 (stmt)))) scalar_type = TREE_TYPE (gimple_assign_rhs1 (stmt)); else { if (!analyze_pattern_stmt && gsi_end_p (pattern_def_si)) { pattern_def_seq = NULL; gsi_next (&si); } continue; } } if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "get vectype for scalar type: "); dump_generic_expr (MSG_NOTE, TDF_SLIM, scalar_type); dump_printf (MSG_NOTE, "\n"); } vectype = get_vectype_for_scalar_type (scalar_type); if (!vectype) { if (dump_enabled_p ()) { dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: unsupported " "data-type "); dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, scalar_type); dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); } return false; } if (!bool_result) STMT_VINFO_VECTYPE (stmt_info) = vectype; if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "vectype: "); dump_generic_expr (MSG_NOTE, TDF_SLIM, vectype); dump_printf (MSG_NOTE, "\n"); } } /* Don't try to compute VF out scalar types if we stmt produces boolean vector. Use result vectype instead. */ if (VECTOR_BOOLEAN_TYPE_P (vectype)) vf_vectype = vectype; else { /* The vectorization factor is according to the smallest scalar type (or the largest vector size, but we only support one vector size per loop). */ if (!bool_result) scalar_type = vect_get_smallest_scalar_type (stmt, &dummy, &dummy); if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "get vectype for scalar type: "); dump_generic_expr (MSG_NOTE, TDF_SLIM, scalar_type); dump_printf (MSG_NOTE, "\n"); } vf_vectype = get_vectype_for_scalar_type (scalar_type); } if (!vf_vectype) { if (dump_enabled_p ()) { dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: unsupported data-type "); dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, scalar_type); dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); } return false; } if ((GET_MODE_SIZE (TYPE_MODE (vectype)) != GET_MODE_SIZE (TYPE_MODE (vf_vectype)))) { if (dump_enabled_p ()) { dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: different sized vector " "types in statement, "); dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, vectype); dump_printf (MSG_MISSED_OPTIMIZATION, " and "); dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, vf_vectype); dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); } return false; } if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "vectype: "); dump_generic_expr (MSG_NOTE, TDF_SLIM, vf_vectype); dump_printf (MSG_NOTE, "\n"); } nunits = TYPE_VECTOR_SUBPARTS (vf_vectype); if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "nunits = %d\n", nunits); if (!vectorization_factor || (nunits > vectorization_factor)) vectorization_factor = nunits; if (!analyze_pattern_stmt && gsi_end_p (pattern_def_si)) { pattern_def_seq = NULL; gsi_next (&si); } } } /* TODO: Analyze cost. Decide if worth while to vectorize. */ if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "vectorization factor = %d\n", vectorization_factor); if (vectorization_factor <= 1) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: unsupported data-type\n"); return false; } LOOP_VINFO_VECT_FACTOR (loop_vinfo) = vectorization_factor; for (i = 0; i < mask_producers.length (); i++) { tree mask_type = NULL; stmt = STMT_VINFO_STMT (mask_producers[i]); if (is_gimple_assign (stmt) && TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) == tcc_comparison && !VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (gimple_assign_rhs1 (stmt)))) { scalar_type = TREE_TYPE (gimple_assign_rhs1 (stmt)); mask_type = get_mask_type_for_scalar_type (scalar_type); if (!mask_type) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: unsupported mask\n"); return false; } } else { tree rhs; ssa_op_iter iter; gimple *def_stmt; enum vect_def_type dt; FOR_EACH_SSA_TREE_OPERAND (rhs, stmt, iter, SSA_OP_USE) { if (!vect_is_simple_use (rhs, mask_producers[i]->vinfo, &def_stmt, &dt, &vectype)) { if (dump_enabled_p ()) { dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: can't compute mask type " "for statement, "); dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0); } return false; } /* No vectype probably means external definition. Allow it in case there is another operand which allows to determine mask type. */ if (!vectype) continue; if (!mask_type) mask_type = vectype; else if (TYPE_VECTOR_SUBPARTS (mask_type) != TYPE_VECTOR_SUBPARTS (vectype)) { if (dump_enabled_p ()) { dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: different sized masks " "types in statement, "); dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, mask_type); dump_printf (MSG_MISSED_OPTIMIZATION, " and "); dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, vectype); dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); } return false; } else if (VECTOR_BOOLEAN_TYPE_P (mask_type) != VECTOR_BOOLEAN_TYPE_P (vectype)) { if (dump_enabled_p ()) { dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: mixed mask and " "nonmask vector types in statement, "); dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, mask_type); dump_printf (MSG_MISSED_OPTIMIZATION, " and "); dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, vectype); dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); } return false; } } /* We may compare boolean value loaded as vector of integers. Fix mask_type in such case. */ if (mask_type && !VECTOR_BOOLEAN_TYPE_P (mask_type) && gimple_code (stmt) == GIMPLE_ASSIGN && TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) == tcc_comparison) mask_type = build_same_sized_truth_vector_type (mask_type); } /* No mask_type should mean loop invariant predicate. This is probably a subject for optimization in if-conversion. */ if (!mask_type) { if (dump_enabled_p ()) { dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: can't compute mask type " "for statement, "); dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0); } return false; } STMT_VINFO_VECTYPE (mask_producers[i]) = mask_type; } return true; } /* Function vect_is_simple_iv_evolution. FORNOW: A simple evolution of an induction variables in the loop is considered a polynomial evolution. */ static bool vect_is_simple_iv_evolution (unsigned loop_nb, tree access_fn, tree * init, tree * step) { tree init_expr; tree step_expr; tree evolution_part = evolution_part_in_loop_num (access_fn, loop_nb); basic_block bb; /* When there is no evolution in this loop, the evolution function is not "simple". */ if (evolution_part == NULL_TREE) return false; /* When the evolution is a polynomial of degree >= 2 the evolution function is not "simple". */ if (tree_is_chrec (evolution_part)) return false; step_expr = evolution_part; init_expr = unshare_expr (initial_condition_in_loop_num (access_fn, loop_nb)); if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "step: "); dump_generic_expr (MSG_NOTE, TDF_SLIM, step_expr); dump_printf (MSG_NOTE, ", init: "); dump_generic_expr (MSG_NOTE, TDF_SLIM, init_expr); dump_printf (MSG_NOTE, "\n"); } *init = init_expr; *step = step_expr; if (TREE_CODE (step_expr) != INTEGER_CST && (TREE_CODE (step_expr) != SSA_NAME || ((bb = gimple_bb (SSA_NAME_DEF_STMT (step_expr))) && flow_bb_inside_loop_p (get_loop (cfun, loop_nb), bb)) || (!INTEGRAL_TYPE_P (TREE_TYPE (step_expr)) && (!SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr)) || !flag_associative_math))) && (TREE_CODE (step_expr) != REAL_CST || !flag_associative_math)) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "step unknown.\n"); return false; } return true; } /* Function vect_analyze_scalar_cycles_1. Examine the cross iteration def-use cycles of scalar variables in LOOP. LOOP_VINFO represents the loop that is now being considered for vectorization (can be LOOP, or an outer-loop enclosing LOOP). */ static void vect_analyze_scalar_cycles_1 (loop_vec_info loop_vinfo, struct loop *loop) { basic_block bb = loop->header; tree init, step; auto_vec<gimple *, 64> worklist; gphi_iterator gsi; bool double_reduc; if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "=== vect_analyze_scalar_cycles ===\n"); /* First - identify all inductions. Reduction detection assumes that all the inductions have been identified, therefore, this order must not be changed. */ for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi)) { gphi *phi = gsi.phi (); tree access_fn = NULL; tree def = PHI_RESULT (phi); stmt_vec_info stmt_vinfo = vinfo_for_stmt (phi); if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "Analyze phi: "); dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0); } /* Skip virtual phi's. The data dependences that are associated with virtual defs/uses (i.e., memory accesses) are analyzed elsewhere. */ if (virtual_operand_p (def)) continue; STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_unknown_def_type; /* Analyze the evolution function. */ access_fn = analyze_scalar_evolution (loop, def); if (access_fn) { STRIP_NOPS (access_fn); if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "Access function of PHI: "); dump_generic_expr (MSG_NOTE, TDF_SLIM, access_fn); dump_printf (MSG_NOTE, "\n"); } STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED (stmt_vinfo) = initial_condition_in_loop_num (access_fn, loop->num); STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_vinfo) = evolution_part_in_loop_num (access_fn, loop->num); } if (!access_fn || !vect_is_simple_iv_evolution (loop->num, access_fn, &init, &step) || (LOOP_VINFO_LOOP (loop_vinfo) != loop && TREE_CODE (step) != INTEGER_CST)) { worklist.safe_push (phi); continue; } gcc_assert (STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED (stmt_vinfo) != NULL_TREE); gcc_assert (STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_vinfo) != NULL_TREE); if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "Detected induction.\n"); STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_induction_def; } /* Second - identify all reductions and nested cycles. */ while (worklist.length () > 0) { gimple *phi = worklist.pop (); tree def = PHI_RESULT (phi); stmt_vec_info stmt_vinfo = vinfo_for_stmt (phi); gimple *reduc_stmt; bool nested_cycle; if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "Analyze phi: "); dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0); } gcc_assert (!virtual_operand_p (def) && STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_unknown_def_type); nested_cycle = (loop != LOOP_VINFO_LOOP (loop_vinfo)); reduc_stmt = vect_force_simple_reduction (loop_vinfo, phi, !nested_cycle, &double_reduc, false); if (reduc_stmt) { if (double_reduc) { if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "Detected double reduction.\n"); STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_double_reduction_def; STMT_VINFO_DEF_TYPE (vinfo_for_stmt (reduc_stmt)) = vect_double_reduction_def; } else { if (nested_cycle) { if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "Detected vectorizable nested cycle.\n"); STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_nested_cycle; STMT_VINFO_DEF_TYPE (vinfo_for_stmt (reduc_stmt)) = vect_nested_cycle; } else { if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "Detected reduction.\n"); STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_reduction_def; STMT_VINFO_DEF_TYPE (vinfo_for_stmt (reduc_stmt)) = vect_reduction_def; /* Store the reduction cycles for possible vectorization in loop-aware SLP. */ LOOP_VINFO_REDUCTIONS (loop_vinfo).safe_push (reduc_stmt); } } } else if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "Unknown def-use cycle pattern.\n"); } } /* Function vect_analyze_scalar_cycles. Examine the cross iteration def-use cycles of scalar variables, by analyzing the loop-header PHIs of scalar variables. Classify each cycle as one of the following: invariant, induction, reduction, unknown. We do that for the loop represented by LOOP_VINFO, and also to its inner-loop, if exists. Examples for scalar cycles: Example1: reduction: loop1: for (i=0; i<N; i++) sum += a[i]; Example2: induction: loop2: for (i=0; i<N; i++) a[i] = i; */ static void vect_analyze_scalar_cycles (loop_vec_info loop_vinfo) { struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); vect_analyze_scalar_cycles_1 (loop_vinfo, loop); /* When vectorizing an outer-loop, the inner-loop is executed sequentially. Reductions in such inner-loop therefore have different properties than the reductions in the nest that gets vectorized: 1. When vectorized, they are executed in the same order as in the original scalar loop, so we can't change the order of computation when vectorizing them. 2. FIXME: Inner-loop reductions can be used in the inner-loop, so the current checks are too strict. */ if (loop->inner) vect_analyze_scalar_cycles_1 (loop_vinfo, loop->inner); } /* Transfer group and reduction information from STMT to its pattern stmt. */ static void vect_fixup_reduc_chain (gimple *stmt) { gimple *firstp = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (stmt)); gimple *stmtp; gcc_assert (!GROUP_FIRST_ELEMENT (vinfo_for_stmt (firstp)) && GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt))); GROUP_SIZE (vinfo_for_stmt (firstp)) = GROUP_SIZE (vinfo_for_stmt (stmt)); do { stmtp = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (stmt)); GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmtp)) = firstp; stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (stmt)); if (stmt) GROUP_NEXT_ELEMENT (vinfo_for_stmt (stmtp)) = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (stmt)); } while (stmt); STMT_VINFO_DEF_TYPE (vinfo_for_stmt (stmtp)) = vect_reduction_def; } /* Fixup scalar cycles that now have their stmts detected as patterns. */ static void vect_fixup_scalar_cycles_with_patterns (loop_vec_info loop_vinfo) { gimple *first; unsigned i; FOR_EACH_VEC_ELT (LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo), i, first) if (STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (first))) { gimple *next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (first)); while (next) { if (! STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (next))) break; next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next)); } /* If not all stmt in the chain are patterns try to handle the chain without patterns. */ if (! next) { vect_fixup_reduc_chain (first); LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo)[i] = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (first)); } } } /* Function vect_get_loop_niters. Determine how many iterations the loop is executed and place it in NUMBER_OF_ITERATIONS. Place the number of latch iterations in NUMBER_OF_ITERATIONSM1. Place the condition under which the niter information holds in ASSUMPTIONS. Return the loop exit condition. */ static gcond * vect_get_loop_niters (struct loop *loop, tree *assumptions, tree *number_of_iterations, tree *number_of_iterationsm1) { edge exit = single_exit (loop); struct tree_niter_desc niter_desc; tree niter_assumptions, niter, may_be_zero; gcond *cond = get_loop_exit_condition (loop); *assumptions = boolean_true_node; *number_of_iterationsm1 = chrec_dont_know; *number_of_iterations = chrec_dont_know; if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "=== get_loop_niters ===\n"); if (!exit) return cond; niter = chrec_dont_know; may_be_zero = NULL_TREE; niter_assumptions = boolean_true_node; if (!number_of_iterations_exit_assumptions (loop, exit, &niter_desc, NULL) || chrec_contains_undetermined (niter_desc.niter)) return cond; niter_assumptions = niter_desc.assumptions; may_be_zero = niter_desc.may_be_zero; niter = niter_desc.niter; if (may_be_zero && integer_zerop (may_be_zero)) may_be_zero = NULL_TREE; if (may_be_zero) { if (COMPARISON_CLASS_P (may_be_zero)) { /* Try to combine may_be_zero with assumptions, this can simplify computation of niter expression. */ if (niter_assumptions && !integer_nonzerop (niter_assumptions)) niter_assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node, niter_assumptions, fold_build1 (TRUTH_NOT_EXPR, boolean_type_node, may_be_zero)); else niter = fold_build3 (COND_EXPR, TREE_TYPE (niter), may_be_zero, build_int_cst (TREE_TYPE (niter), 0), rewrite_to_non_trapping_overflow (niter)); may_be_zero = NULL_TREE; } else if (integer_nonzerop (may_be_zero)) { *number_of_iterationsm1 = build_int_cst (TREE_TYPE (niter), 0); *number_of_iterations = build_int_cst (TREE_TYPE (niter), 1); return cond; } else return cond; } *assumptions = niter_assumptions; *number_of_iterationsm1 = niter; /* We want the number of loop header executions which is the number of latch executions plus one. ??? For UINT_MAX latch executions this number overflows to zero for loops like do { n++; } while (n != 0); */ if (niter && !chrec_contains_undetermined (niter)) niter = fold_build2 (PLUS_EXPR, TREE_TYPE (niter), unshare_expr (niter), build_int_cst (TREE_TYPE (niter), 1)); *number_of_iterations = niter; return cond; } /* Function bb_in_loop_p Used as predicate for dfs order traversal of the loop bbs. */ static bool bb_in_loop_p (const_basic_block bb, const void *data) { const struct loop *const loop = (const struct loop *)data; if (flow_bb_inside_loop_p (loop, bb)) return true; return false; } /* Function new_loop_vec_info. Create and initialize a new loop_vec_info struct for LOOP, as well as stmt_vec_info structs for all the stmts in LOOP. */ static loop_vec_info new_loop_vec_info (struct loop *loop) { loop_vec_info res; basic_block *bbs; gimple_stmt_iterator si; unsigned int i, nbbs; res = (loop_vec_info) xcalloc (1, sizeof (struct _loop_vec_info)); res->kind = vec_info::loop; LOOP_VINFO_LOOP (res) = loop; bbs = get_loop_body (loop); /* Create/Update stmt_info for all stmts in the loop. */ for (i = 0; i < loop->num_nodes; i++) { basic_block bb = bbs[i]; for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si)) { gimple *phi = gsi_stmt (si); gimple_set_uid (phi, 0); set_vinfo_for_stmt (phi, new_stmt_vec_info (phi, res)); } for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si)) { gimple *stmt = gsi_stmt (si); gimple_set_uid (stmt, 0); set_vinfo_for_stmt (stmt, new_stmt_vec_info (stmt, res)); } } /* CHECKME: We want to visit all BBs before their successors (except for latch blocks, for which this assertion wouldn't hold). In the simple case of the loop forms we allow, a dfs order of the BBs would the same as reversed postorder traversal, so we are safe. */ free (bbs); bbs = XCNEWVEC (basic_block, loop->num_nodes); nbbs = dfs_enumerate_from (loop->header, 0, bb_in_loop_p, bbs, loop->num_nodes, loop); gcc_assert (nbbs == loop->num_nodes); LOOP_VINFO_BBS (res) = bbs; LOOP_VINFO_NITERSM1 (res) = NULL; LOOP_VINFO_NITERS (res) = NULL; LOOP_VINFO_NITERS_UNCHANGED (res) = NULL; LOOP_VINFO_NITERS_ASSUMPTIONS (res) = NULL; LOOP_VINFO_COST_MODEL_THRESHOLD (res) = 0; LOOP_VINFO_VECTORIZABLE_P (res) = 0; LOOP_VINFO_PEELING_FOR_ALIGNMENT (res) = 0; LOOP_VINFO_VECT_FACTOR (res) = 0; LOOP_VINFO_LOOP_NEST (res) = vNULL; LOOP_VINFO_DATAREFS (res) = vNULL; LOOP_VINFO_DDRS (res) = vNULL; LOOP_VINFO_UNALIGNED_DR (res) = NULL; LOOP_VINFO_MAY_MISALIGN_STMTS (res) = vNULL; LOOP_VINFO_MAY_ALIAS_DDRS (res) = vNULL; LOOP_VINFO_GROUPED_STORES (res) = vNULL; LOOP_VINFO_REDUCTIONS (res) = vNULL; LOOP_VINFO_REDUCTION_CHAINS (res) = vNULL; LOOP_VINFO_SLP_INSTANCES (res) = vNULL; LOOP_VINFO_SLP_UNROLLING_FACTOR (res) = 1; LOOP_VINFO_TARGET_COST_DATA (res) = init_cost (loop); LOOP_VINFO_PEELING_FOR_GAPS (res) = false; LOOP_VINFO_PEELING_FOR_NITER (res) = false; LOOP_VINFO_OPERANDS_SWAPPED (res) = false; LOOP_VINFO_ORIG_LOOP_INFO (res) = NULL; return res; } /* Function destroy_loop_vec_info. Free LOOP_VINFO struct, as well as all the stmt_vec_info structs of all the stmts in the loop. */ void destroy_loop_vec_info (loop_vec_info loop_vinfo, bool clean_stmts) { struct loop *loop; basic_block *bbs; int nbbs; gimple_stmt_iterator si; int j; vec<slp_instance> slp_instances; slp_instance instance; bool swapped; if (!loop_vinfo) return; loop = LOOP_VINFO_LOOP (loop_vinfo); bbs = LOOP_VINFO_BBS (loop_vinfo); nbbs = clean_stmts ? loop->num_nodes : 0; swapped = LOOP_VINFO_OPERANDS_SWAPPED (loop_vinfo); for (j = 0; j < nbbs; j++) { basic_block bb = bbs[j]; for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si)) free_stmt_vec_info (gsi_stmt (si)); for (si = gsi_start_bb (bb); !gsi_end_p (si); ) { gimple *stmt = gsi_stmt (si); /* We may have broken canonical form by moving a constant into RHS1 of a commutative op. Fix such occurrences. */ if (swapped && is_gimple_assign (stmt)) { enum tree_code code = gimple_assign_rhs_code (stmt); if ((code == PLUS_EXPR || code == POINTER_PLUS_EXPR || code == MULT_EXPR) && CONSTANT_CLASS_P (gimple_assign_rhs1 (stmt))) swap_ssa_operands (stmt, gimple_assign_rhs1_ptr (stmt), gimple_assign_rhs2_ptr (stmt)); else if (code == COND_EXPR && CONSTANT_CLASS_P (gimple_assign_rhs2 (stmt))) { tree cond_expr = gimple_assign_rhs1 (stmt); enum tree_code cond_code = TREE_CODE (cond_expr); if (TREE_CODE_CLASS (cond_code) == tcc_comparison) { bool honor_nans = HONOR_NANS (TREE_OPERAND (cond_expr, 0)); cond_code = invert_tree_comparison (cond_code, honor_nans); if (cond_code != ERROR_MARK) { TREE_SET_CODE (cond_expr, cond_code); swap_ssa_operands (stmt, gimple_assign_rhs2_ptr (stmt), gimple_assign_rhs3_ptr (stmt)); } } } } /* Free stmt_vec_info. */ free_stmt_vec_info (stmt); gsi_next (&si); } } free (LOOP_VINFO_BBS (loop_vinfo)); vect_destroy_datarefs (loop_vinfo); free_dependence_relations (LOOP_VINFO_DDRS (loop_vinfo)); LOOP_VINFO_LOOP_NEST (loop_vinfo).release (); LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo).release (); LOOP_VINFO_COMP_ALIAS_DDRS (loop_vinfo).release (); LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo).release (); slp_instances = LOOP_VINFO_SLP_INSTANCES (loop_vinfo); FOR_EACH_VEC_ELT (slp_instances, j, instance) vect_free_slp_instance (instance); LOOP_VINFO_SLP_INSTANCES (loop_vinfo).release (); LOOP_VINFO_GROUPED_STORES (loop_vinfo).release (); LOOP_VINFO_REDUCTIONS (loop_vinfo).release (); LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo).release (); destroy_cost_data (LOOP_VINFO_TARGET_COST_DATA (loop_vinfo)); loop_vinfo->scalar_cost_vec.release (); free (loop_vinfo); loop->aux = NULL; } /* Calculate the cost of one scalar iteration of the loop. */ static void vect_compute_single_scalar_iteration_cost (loop_vec_info loop_vinfo) { struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo); int nbbs = loop->num_nodes, factor, scalar_single_iter_cost = 0; int innerloop_iters, i; /* Count statements in scalar loop. Using this as scalar cost for a single iteration for now. TODO: Add outer loop support. TODO: Consider assigning different costs to different scalar statements. */ /* FORNOW. */ innerloop_iters = 1; if (loop->inner) innerloop_iters = 50; /* FIXME */ for (i = 0; i < nbbs; i++) { gimple_stmt_iterator si; basic_block bb = bbs[i]; if (bb->loop_father == loop->inner) factor = innerloop_iters; else factor = 1; for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si)) { gimple *stmt = gsi_stmt (si); stmt_vec_info stmt_info = vinfo_for_stmt (stmt); if (!is_gimple_assign (stmt) && !is_gimple_call (stmt)) continue; /* Skip stmts that are not vectorized inside the loop. */ if (stmt_info && !STMT_VINFO_RELEVANT_P (stmt_info) && (!STMT_VINFO_LIVE_P (stmt_info) || !VECTORIZABLE_CYCLE_DEF (STMT_VINFO_DEF_TYPE (stmt_info))) && !STMT_VINFO_IN_PATTERN_P (stmt_info)) continue; vect_cost_for_stmt kind; if (STMT_VINFO_DATA_REF (stmt_info)) { if (DR_IS_READ (STMT_VINFO_DATA_REF (stmt_info))) kind = scalar_load; else kind = scalar_store; } else kind = scalar_stmt; scalar_single_iter_cost += record_stmt_cost (&LOOP_VINFO_SCALAR_ITERATION_COST (loop_vinfo), factor, kind, stmt_info, 0, vect_prologue); } } LOOP_VINFO_SINGLE_SCALAR_ITERATION_COST (loop_vinfo) = scalar_single_iter_cost; } /* Function vect_analyze_loop_form_1. Verify that certain CFG restrictions hold, including: - the loop has a pre-header - the loop has a single entry and exit - the loop exit condition is simple enough - the number of iterations can be analyzed, i.e, a countable loop. The niter could be analyzed under some assumptions. */ bool vect_analyze_loop_form_1 (struct loop *loop, gcond **loop_cond, tree *assumptions, tree *number_of_iterationsm1, tree *number_of_iterations, gcond **inner_loop_cond) { if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "=== vect_analyze_loop_form ===\n"); /* Different restrictions apply when we are considering an inner-most loop, vs. an outer (nested) loop. (FORNOW. May want to relax some of these restrictions in the future). */ if (!loop->inner) { /* Inner-most loop. We currently require that the number of BBs is exactly 2 (the header and latch). Vectorizable inner-most loops look like this: (pre-header) | header <--------+ | | | | +--> latch --+ | (exit-bb) */ if (loop->num_nodes != 2) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: control flow in loop.\n"); return false; } if (empty_block_p (loop->header)) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: empty loop.\n"); return false; } } else { struct loop *innerloop = loop->inner; edge entryedge; /* Nested loop. We currently require that the loop is doubly-nested, contains a single inner loop, and the number of BBs is exactly 5. Vectorizable outer-loops look like this: (pre-header) | header <---+ | | inner-loop | | | tail ------+ | (exit-bb) The inner-loop has the properties expected of inner-most loops as described above. */ if ((loop->inner)->inner || (loop->inner)->next) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: multiple nested loops.\n"); return false; } if (loop->num_nodes != 5) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: control flow in loop.\n"); return false; } entryedge = loop_preheader_edge (innerloop); if (entryedge->src != loop->header || !single_exit (innerloop) || single_exit (innerloop)->dest != EDGE_PRED (loop->latch, 0)->src) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: unsupported outerloop form.\n"); return false; } /* Analyze the inner-loop. */ tree inner_niterm1, inner_niter, inner_assumptions; if (! vect_analyze_loop_form_1 (loop->inner, inner_loop_cond, &inner_assumptions, &inner_niterm1, &inner_niter, NULL) /* Don't support analyzing niter under assumptions for inner loop. */ || !integer_onep (inner_assumptions)) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: Bad inner loop.\n"); return false; } if (!expr_invariant_in_loop_p (loop, inner_niter)) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: inner-loop count not" " invariant.\n"); return false; } if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "Considering outer-loop vectorization.\n"); } if (!single_exit (loop) || EDGE_COUNT (loop->header->preds) != 2) { if (dump_enabled_p ()) { if (!single_exit (loop)) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: multiple exits.\n"); else if (EDGE_COUNT (loop->header->preds) != 2) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: too many incoming edges.\n"); } return false; } /* We assume that the loop exit condition is at the end of the loop. i.e, that the loop is represented as a do-while (with a proper if-guard before the loop if needed), where the loop header contains all the executable statements, and the latch is empty. */ if (!empty_block_p (loop->latch) || !gimple_seq_empty_p (phi_nodes (loop->latch))) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: latch block not empty.\n"); return false; } /* Make sure the exit is not abnormal. */ edge e = single_exit (loop); if (e->flags & EDGE_ABNORMAL) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: abnormal loop exit edge.\n"); return false; } *loop_cond = vect_get_loop_niters (loop, assumptions, number_of_iterations, number_of_iterationsm1); if (!*loop_cond) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: complicated exit condition.\n"); return false; } if (integer_zerop (*assumptions) || !*number_of_iterations || chrec_contains_undetermined (*number_of_iterations)) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: number of iterations cannot be " "computed.\n"); return false; } if (integer_zerop (*number_of_iterations)) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: number of iterations = 0.\n"); return false; } return true; } /* Analyze LOOP form and return a loop_vec_info if it is of suitable form. */ loop_vec_info vect_analyze_loop_form (struct loop *loop) { tree assumptions, number_of_iterations, number_of_iterationsm1; gcond *loop_cond, *inner_loop_cond = NULL; if (! vect_analyze_loop_form_1 (loop, &loop_cond, &assumptions, &number_of_iterationsm1, &number_of_iterations, &inner_loop_cond)) return NULL; loop_vec_info loop_vinfo = new_loop_vec_info (loop); LOOP_VINFO_NITERSM1 (loop_vinfo) = number_of_iterationsm1; LOOP_VINFO_NITERS (loop_vinfo) = number_of_iterations; LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo) = number_of_iterations; if (!integer_onep (assumptions)) { /* We consider to vectorize this loop by versioning it under some assumptions. In order to do this, we need to clear existing information computed by scev and niter analyzer. */ scev_reset_htab (); free_numbers_of_iterations_estimates_loop (loop); /* Also set flag for this loop so that following scev and niter analysis are done under the assumptions. */ loop_constraint_set (loop, LOOP_C_FINITE); /* Also record the assumptions for versioning. */ LOOP_VINFO_NITERS_ASSUMPTIONS (loop_vinfo) = assumptions; } if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)) { if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "Symbolic number of iterations is "); dump_generic_expr (MSG_NOTE, TDF_DETAILS, number_of_iterations); dump_printf (MSG_NOTE, "\n"); } } STMT_VINFO_TYPE (vinfo_for_stmt (loop_cond)) = loop_exit_ctrl_vec_info_type; if (inner_loop_cond) STMT_VINFO_TYPE (vinfo_for_stmt (inner_loop_cond)) = loop_exit_ctrl_vec_info_type; gcc_assert (!loop->aux); loop->aux = loop_vinfo; return loop_vinfo; } /* Scan the loop stmts and dependent on whether there are any (non-)SLP statements update the vectorization factor. */ static void vect_update_vf_for_slp (loop_vec_info loop_vinfo) { struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo); int nbbs = loop->num_nodes; unsigned int vectorization_factor; int i; if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "=== vect_update_vf_for_slp ===\n"); vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo); gcc_assert (vectorization_factor != 0); /* If all the stmts in the loop can be SLPed, we perform only SLP, and vectorization factor of the loop is the unrolling factor required by the SLP instances. If that unrolling factor is 1, we say, that we perform pure SLP on loop - cross iteration parallelism is not exploited. */ bool only_slp_in_loop = true; for (i = 0; i < nbbs; i++) { basic_block bb = bbs[i]; for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si)) { gimple *stmt = gsi_stmt (si); stmt_vec_info stmt_info = vinfo_for_stmt (stmt); if (STMT_VINFO_IN_PATTERN_P (stmt_info) && STMT_VINFO_RELATED_STMT (stmt_info)) { stmt = STMT_VINFO_RELATED_STMT (stmt_info); stmt_info = vinfo_for_stmt (stmt); } if ((STMT_VINFO_RELEVANT_P (stmt_info) || VECTORIZABLE_CYCLE_DEF (STMT_VINFO_DEF_TYPE (stmt_info))) && !PURE_SLP_STMT (stmt_info)) /* STMT needs both SLP and loop-based vectorization. */ only_slp_in_loop = false; } } if (only_slp_in_loop) vectorization_factor = LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo); else vectorization_factor = least_common_multiple (vectorization_factor, LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo)); LOOP_VINFO_VECT_FACTOR (loop_vinfo) = vectorization_factor; if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "Updating vectorization factor to %d\n", vectorization_factor); } /* Function vect_analyze_loop_operations. Scan the loop stmts and make sure they are all vectorizable. */ static bool vect_analyze_loop_operations (loop_vec_info loop_vinfo) { struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo); int nbbs = loop->num_nodes; int i; stmt_vec_info stmt_info; bool need_to_vectorize = false; bool ok; if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "=== vect_analyze_loop_operations ===\n"); for (i = 0; i < nbbs; i++) { basic_block bb = bbs[i]; for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si)) { gphi *phi = si.phi (); ok = true; stmt_info = vinfo_for_stmt (phi); if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "examining phi: "); dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0); } if (virtual_operand_p (gimple_phi_result (phi))) continue; /* Inner-loop loop-closed exit phi in outer-loop vectorization (i.e., a phi in the tail of the outer-loop). */ if (! is_loop_header_bb_p (bb)) { /* FORNOW: we currently don't support the case that these phis are not used in the outerloop (unless it is double reduction, i.e., this phi is vect_reduction_def), cause this case requires to actually do something here. */ if ((!STMT_VINFO_RELEVANT_P (stmt_info) || STMT_VINFO_LIVE_P (stmt_info)) && STMT_VINFO_DEF_TYPE (stmt_info) != vect_double_reduction_def) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "Unsupported loop-closed phi in " "outer-loop.\n"); return false; } /* If PHI is used in the outer loop, we check that its operand is defined in the inner loop. */ if (STMT_VINFO_RELEVANT_P (stmt_info)) { tree phi_op; gimple *op_def_stmt; if (gimple_phi_num_args (phi) != 1) return false; phi_op = PHI_ARG_DEF (phi, 0); if (TREE_CODE (phi_op) != SSA_NAME) return false; op_def_stmt = SSA_NAME_DEF_STMT (phi_op); if (gimple_nop_p (op_def_stmt) || !flow_bb_inside_loop_p (loop, gimple_bb (op_def_stmt)) || !vinfo_for_stmt (op_def_stmt)) return false; if (STMT_VINFO_RELEVANT (vinfo_for_stmt (op_def_stmt)) != vect_used_in_outer && STMT_VINFO_RELEVANT (vinfo_for_stmt (op_def_stmt)) != vect_used_in_outer_by_reduction) return false; } continue; } gcc_assert (stmt_info); if ((STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_scope || STMT_VINFO_LIVE_P (stmt_info)) && STMT_VINFO_DEF_TYPE (stmt_info) != vect_induction_def) { /* A scalar-dependence cycle that we don't support. */ if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: scalar dependence cycle.\n"); return false; } if (STMT_VINFO_RELEVANT_P (stmt_info)) { need_to_vectorize = true; if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def) ok = vectorizable_induction (phi, NULL, NULL); } if (ok && STMT_VINFO_LIVE_P (stmt_info)) ok = vectorizable_live_operation (phi, NULL, NULL, -1, NULL); if (!ok) { if (dump_enabled_p ()) { dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: relevant phi not " "supported: "); dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, phi, 0); } return false; } } for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si)) { gimple *stmt = gsi_stmt (si); if (!gimple_clobber_p (stmt) && !vect_analyze_stmt (stmt, &need_to_vectorize, NULL)) return false; } } /* bbs */ /* All operations in the loop are either irrelevant (deal with loop control, or dead), or only used outside the loop and can be moved out of the loop (e.g. invariants, inductions). The loop can be optimized away by scalar optimizations. We're better off not touching this loop. */ if (!need_to_vectorize) { if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "All the computation can be taken out of the loop.\n"); if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: redundant loop. no profit to " "vectorize.\n"); return false; } return true; } /* Function vect_analyze_loop_2. Apply a set of analyses on LOOP, and create a loop_vec_info struct for it. The different analyses will record information in the loop_vec_info struct. */ static bool vect_analyze_loop_2 (loop_vec_info loop_vinfo, bool &fatal) { bool ok; int max_vf = MAX_VECTORIZATION_FACTOR; int min_vf = 2; unsigned int n_stmts = 0; /* The first group of checks is independent of the vector size. */ fatal = true; /* Find all data references in the loop (which correspond to vdefs/vuses) and analyze their evolution in the loop. */ basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo); loop_p loop = LOOP_VINFO_LOOP (loop_vinfo); if (!find_loop_nest (loop, &LOOP_VINFO_LOOP_NEST (loop_vinfo))) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: loop nest containing two " "or more consecutive inner loops cannot be " "vectorized\n"); return false; } for (unsigned i = 0; i < loop->num_nodes; i++) for (gimple_stmt_iterator gsi = gsi_start_bb (bbs[i]); !gsi_end_p (gsi); gsi_next (&gsi)) { gimple *stmt = gsi_stmt (gsi); if (is_gimple_debug (stmt)) continue; ++n_stmts; if (!find_data_references_in_stmt (loop, stmt, &LOOP_VINFO_DATAREFS (loop_vinfo))) { if (is_gimple_call (stmt) && loop->safelen) { tree fndecl = gimple_call_fndecl (stmt), op; if (fndecl != NULL_TREE) { cgraph_node *node = cgraph_node::get (fndecl); if (node != NULL && node->simd_clones != NULL) { unsigned int j, n = gimple_call_num_args (stmt); for (j = 0; j < n; j++) { op = gimple_call_arg (stmt, j); if (DECL_P (op) || (REFERENCE_CLASS_P (op) && get_base_address (op))) break; } op = gimple_call_lhs (stmt); /* Ignore #pragma omp declare simd functions if they don't have data references in the call stmt itself. */ if (j == n && !(op && (DECL_P (op) || (REFERENCE_CLASS_P (op) && get_base_address (op))))) continue; } } } if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: loop contains function " "calls or data references that cannot " "be analyzed\n"); return false; } } /* Analyze the data references and also adjust the minimal vectorization factor according to the loads and stores. */ ok = vect_analyze_data_refs (loop_vinfo, &min_vf); if (!ok) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "bad data references.\n"); return false; } /* Classify all cross-iteration scalar data-flow cycles. Cross-iteration cycles caused by virtual phis are analyzed separately. */ vect_analyze_scalar_cycles (loop_vinfo); vect_pattern_recog (loop_vinfo); vect_fixup_scalar_cycles_with_patterns (loop_vinfo); /* Analyze the access patterns of the data-refs in the loop (consecutive, complex, etc.). FORNOW: Only handle consecutive access pattern. */ ok = vect_analyze_data_ref_accesses (loop_vinfo); if (!ok) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "bad data access.\n"); return false; } /* Data-flow analysis to detect stmts that do not need to be vectorized. */ ok = vect_mark_stmts_to_be_vectorized (loop_vinfo); if (!ok) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "unexpected pattern.\n"); return false; } /* While the rest of the analysis below depends on it in some way. */ fatal = false; /* Analyze data dependences between the data-refs in the loop and adjust the maximum vectorization factor according to the dependences. FORNOW: fail at the first data dependence that we encounter. */ ok = vect_analyze_data_ref_dependences (loop_vinfo, &max_vf); if (!ok || max_vf < min_vf) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "bad data dependence.\n"); return false; } ok = vect_determine_vectorization_factor (loop_vinfo); if (!ok) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "can't determine vectorization factor.\n"); return false; } if (max_vf < LOOP_VINFO_VECT_FACTOR (loop_vinfo)) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "bad data dependence.\n"); return false; } /* Compute the scalar iteration cost. */ vect_compute_single_scalar_iteration_cost (loop_vinfo); int saved_vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo); HOST_WIDE_INT estimated_niter; unsigned th; int min_scalar_loop_bound; /* Check the SLP opportunities in the loop, analyze and build SLP trees. */ ok = vect_analyze_slp (loop_vinfo, n_stmts); if (!ok) return false; /* If there are any SLP instances mark them as pure_slp. */ bool slp = vect_make_slp_decision (loop_vinfo); if (slp) { /* Find stmts that need to be both vectorized and SLPed. */ vect_detect_hybrid_slp (loop_vinfo); /* Update the vectorization factor based on the SLP decision. */ vect_update_vf_for_slp (loop_vinfo); } /* This is the point where we can re-start analysis with SLP forced off. */ start_over: /* Now the vectorization factor is final. */ unsigned vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo); gcc_assert (vectorization_factor != 0); if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo) && dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "vectorization_factor = %d, niters = " HOST_WIDE_INT_PRINT_DEC "\n", vectorization_factor, LOOP_VINFO_INT_NITERS (loop_vinfo)); HOST_WIDE_INT max_niter = likely_max_stmt_executions_int (LOOP_VINFO_LOOP (loop_vinfo)); if ((LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo) && (LOOP_VINFO_INT_NITERS (loop_vinfo) < vectorization_factor)) || (max_niter != -1 && (unsigned HOST_WIDE_INT) max_niter < vectorization_factor)) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: iteration count smaller than " "vectorization factor.\n"); return false; } /* Analyze the alignment of the data-refs in the loop. Fail if a data reference is found that cannot be vectorized. */ ok = vect_analyze_data_refs_alignment (loop_vinfo); if (!ok) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "bad data alignment.\n"); return false; } /* Prune the list of ddrs to be tested at run-time by versioning for alias. It is important to call pruning after vect_analyze_data_ref_accesses, since we use grouping information gathered by interleaving analysis. */ ok = vect_prune_runtime_alias_test_list (loop_vinfo); if (!ok) return false; /* Do not invoke vect_enhance_data_refs_alignment for eplilogue vectorization. */ if (!LOOP_VINFO_EPILOGUE_P (loop_vinfo)) { /* This pass will decide on using loop versioning and/or loop peeling in order to enhance the alignment of data references in the loop. */ ok = vect_enhance_data_refs_alignment (loop_vinfo); if (!ok) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "bad data alignment.\n"); return false; } } if (slp) { /* Analyze operations in the SLP instances. Note this may remove unsupported SLP instances which makes the above SLP kind detection invalid. */ unsigned old_size = LOOP_VINFO_SLP_INSTANCES (loop_vinfo).length (); vect_slp_analyze_operations (LOOP_VINFO_SLP_INSTANCES (loop_vinfo), LOOP_VINFO_TARGET_COST_DATA (loop_vinfo)); if (LOOP_VINFO_SLP_INSTANCES (loop_vinfo).length () != old_size) goto again; } /* Scan all the remaining operations in the loop that are not subject to SLP and make sure they are vectorizable. */ ok = vect_analyze_loop_operations (loop_vinfo); if (!ok) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "bad operation or unsupported loop bound.\n"); return false; } /* If epilog loop is required because of data accesses with gaps, one additional iteration needs to be peeled. Check if there is enough iterations for vectorization. */ if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) && LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)) { int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo); tree scalar_niters = LOOP_VINFO_NITERSM1 (loop_vinfo); if (wi::to_widest (scalar_niters) < vf) { if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "loop has no enough iterations to support" " peeling for gaps.\n"); return false; } } /* Analyze cost. Decide if worth while to vectorize. */ int min_profitable_estimate, min_profitable_iters; vect_estimate_min_profitable_iters (loop_vinfo, &min_profitable_iters, &min_profitable_estimate); if (min_profitable_iters < 0) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: vectorization not profitable.\n"); if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: vector version will never be " "profitable.\n"); goto again; } min_scalar_loop_bound = ((PARAM_VALUE (PARAM_MIN_VECT_LOOP_BOUND) * vectorization_factor) - 1); /* Use the cost model only if it is more conservative than user specified threshold. */ th = (unsigned) min_scalar_loop_bound; if (min_profitable_iters && (!min_scalar_loop_bound || min_profitable_iters > min_scalar_loop_bound)) th = (unsigned) min_profitable_iters; LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo) = th; if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo) && LOOP_VINFO_INT_NITERS (loop_vinfo) <= th) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: vectorization not profitable.\n"); if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "not vectorized: iteration count smaller than user " "specified loop bound parameter or minimum profitable " "iterations (whichever is more conservative).\n"); goto again; } estimated_niter = estimated_stmt_executions_int (LOOP_VINFO_LOOP (loop_vinfo)); if (estimated_niter == -1) estimated_niter = max_niter; if (estimated_niter != -1 && ((unsigned HOST_WIDE_INT) estimated_niter <= MAX (th, (unsigned)min_profitable_estimate))) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: estimated iteration count too " "small.\n"); if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "not vectorized: estimated iteration count smaller " "than specified loop bound parameter or minimum " "profitable iterations (whichever is more " "conservative).\n"); goto again; } /* Decide whether we need to create an epilogue loop to handle remaining scalar iterations. */ th = ((LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo) + 1) / LOOP_VINFO_VECT_FACTOR (loop_vinfo)) * LOOP_VINFO_VECT_FACTOR (loop_vinfo); if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo) && LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) > 0) { if (ctz_hwi (LOOP_VINFO_INT_NITERS (loop_vinfo) - LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo)) < exact_log2 (LOOP_VINFO_VECT_FACTOR (loop_vinfo))) LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo) = true; } else if (LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) || (tree_ctz (LOOP_VINFO_NITERS (loop_vinfo)) < (unsigned)exact_log2 (LOOP_VINFO_VECT_FACTOR (loop_vinfo)) /* In case of versioning, check if the maximum number of iterations is greater than th. If they are identical, the epilogue is unnecessary. */ && (!LOOP_REQUIRES_VERSIONING (loop_vinfo) || (unsigned HOST_WIDE_INT) max_niter > th))) LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo) = true; /* If an epilogue loop is required make sure we can create one. */ if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) || LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo)) { if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "epilog loop required\n"); if (!vect_can_advance_ivs_p (loop_vinfo) || !slpeel_can_duplicate_loop_p (LOOP_VINFO_LOOP (loop_vinfo), single_exit (LOOP_VINFO_LOOP (loop_vinfo)))) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: can't create required " "epilog loop\n"); goto again; } } gcc_assert (vectorization_factor == (unsigned)LOOP_VINFO_VECT_FACTOR (loop_vinfo)); /* Ok to vectorize! */ return true; again: /* Try again with SLP forced off but if we didn't do any SLP there is no point in re-trying. */ if (!slp) return false; /* If there are reduction chains re-trying will fail anyway. */ if (! LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo).is_empty ()) return false; /* Likewise if the grouped loads or stores in the SLP cannot be handled via interleaving or lane instructions. */ slp_instance instance; slp_tree node; unsigned i, j; FOR_EACH_VEC_ELT (LOOP_VINFO_SLP_INSTANCES (loop_vinfo), i, instance) { stmt_vec_info vinfo; vinfo = vinfo_for_stmt (SLP_TREE_SCALAR_STMTS (SLP_INSTANCE_TREE (instance))[0]); if (! STMT_VINFO_GROUPED_ACCESS (vinfo)) continue; vinfo = vinfo_for_stmt (STMT_VINFO_GROUP_FIRST_ELEMENT (vinfo)); unsigned int size = STMT_VINFO_GROUP_SIZE (vinfo); tree vectype = STMT_VINFO_VECTYPE (vinfo); if (! vect_store_lanes_supported (vectype, size) && ! vect_grouped_store_supported (vectype, size)) return false; FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (instance), j, node) { vinfo = vinfo_for_stmt (SLP_TREE_SCALAR_STMTS (node)[0]); vinfo = vinfo_for_stmt (STMT_VINFO_GROUP_FIRST_ELEMENT (vinfo)); bool single_element_p = !STMT_VINFO_GROUP_NEXT_ELEMENT (vinfo); size = STMT_VINFO_GROUP_SIZE (vinfo); vectype = STMT_VINFO_VECTYPE (vinfo); if (! vect_load_lanes_supported (vectype, size) && ! vect_grouped_load_supported (vectype, single_element_p, size)) return false; } } if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "re-trying with SLP disabled\n"); /* Roll back state appropriately. No SLP this time. */ slp = false; /* Restore vectorization factor as it were without SLP. */ LOOP_VINFO_VECT_FACTOR (loop_vinfo) = saved_vectorization_factor; /* Free the SLP instances. */ FOR_EACH_VEC_ELT (LOOP_VINFO_SLP_INSTANCES (loop_vinfo), j, instance) vect_free_slp_instance (instance); LOOP_VINFO_SLP_INSTANCES (loop_vinfo).release (); /* Reset SLP type to loop_vect on all stmts. */ for (i = 0; i < LOOP_VINFO_LOOP (loop_vinfo)->num_nodes; ++i) { basic_block bb = LOOP_VINFO_BBS (loop_vinfo)[i]; for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si)) { stmt_vec_info stmt_info = vinfo_for_stmt (gsi_stmt (si)); STMT_SLP_TYPE (stmt_info) = loop_vect; if (STMT_VINFO_IN_PATTERN_P (stmt_info)) { stmt_info = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info)); STMT_SLP_TYPE (stmt_info) = loop_vect; for (gimple_stmt_iterator pi = gsi_start (STMT_VINFO_PATTERN_DEF_SEQ (stmt_info)); !gsi_end_p (pi); gsi_next (&pi)) { gimple *pstmt = gsi_stmt (pi); STMT_SLP_TYPE (vinfo_for_stmt (pstmt)) = loop_vect; } } } } /* Free optimized alias test DDRS. */ LOOP_VINFO_COMP_ALIAS_DDRS (loop_vinfo).release (); /* Reset target cost data. */ destroy_cost_data (LOOP_VINFO_TARGET_COST_DATA (loop_vinfo)); LOOP_VINFO_TARGET_COST_DATA (loop_vinfo) = init_cost (LOOP_VINFO_LOOP (loop_vinfo)); /* Reset assorted flags. */ LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo) = false; LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) = false; LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo) = 0; goto start_over; } /* Function vect_analyze_loop. Apply a set of analyses on LOOP, and create a loop_vec_info struct for it. The different analyses will record information in the loop_vec_info struct. If ORIG_LOOP_VINFO is not NULL epilogue must be vectorized. */ loop_vec_info vect_analyze_loop (struct loop *loop, loop_vec_info orig_loop_vinfo) { loop_vec_info loop_vinfo; unsigned int vector_sizes; /* Autodetect first vector size we try. */ current_vector_size = 0; vector_sizes = targetm.vectorize.autovectorize_vector_sizes (); if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "===== analyze_loop_nest =====\n"); if (loop_outer (loop) && loop_vec_info_for_loop (loop_outer (loop)) && LOOP_VINFO_VECTORIZABLE_P (loop_vec_info_for_loop (loop_outer (loop)))) { if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "outer-loop already vectorized.\n"); return NULL; } while (1) { /* Check the CFG characteristics of the loop (nesting, entry/exit). */ loop_vinfo = vect_analyze_loop_form (loop); if (!loop_vinfo) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "bad loop form.\n"); return NULL; } bool fatal = false; if (orig_loop_vinfo) LOOP_VINFO_ORIG_LOOP_INFO (loop_vinfo) = orig_loop_vinfo; if (vect_analyze_loop_2 (loop_vinfo, fatal)) { LOOP_VINFO_VECTORIZABLE_P (loop_vinfo) = 1; return loop_vinfo; } destroy_loop_vec_info (loop_vinfo, true); vector_sizes &= ~current_vector_size; if (fatal || vector_sizes == 0 || current_vector_size == 0) return NULL; /* Try the next biggest vector size. */ current_vector_size = 1 << floor_log2 (vector_sizes); if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "***** Re-trying analysis with " "vector size %d\n", current_vector_size); } } /* Function reduction_code_for_scalar_code Input: CODE - tree_code of a reduction operations. Output: REDUC_CODE - the corresponding tree-code to be used to reduce the vector of partial results into a single scalar result, or ERROR_MARK if the operation is a supported reduction operation, but does not have such a tree-code. Return FALSE if CODE currently cannot be vectorized as reduction. */ static bool reduction_code_for_scalar_code (enum tree_code code, enum tree_code *reduc_code) { switch (code) { case MAX_EXPR: *reduc_code = REDUC_MAX_EXPR; return true; case MIN_EXPR: *reduc_code = REDUC_MIN_EXPR; return true; case PLUS_EXPR: *reduc_code = REDUC_PLUS_EXPR; return true; case MULT_EXPR: case MINUS_EXPR: case BIT_IOR_EXPR: case BIT_XOR_EXPR: case BIT_AND_EXPR: *reduc_code = ERROR_MARK; return true; default: return false; } } /* Error reporting helper for vect_is_simple_reduction below. GIMPLE statement STMT is printed with a message MSG. */ static void report_vect_op (int msg_type, gimple *stmt, const char *msg) { dump_printf_loc (msg_type, vect_location, "%s", msg); dump_gimple_stmt (msg_type, TDF_SLIM, stmt, 0); } /* Detect SLP reduction of the form: #a1 = phi <a5, a0> a2 = operation (a1) a3 = operation (a2) a4 = operation (a3) a5 = operation (a4) #a = phi <a5> PHI is the reduction phi node (#a1 = phi <a5, a0> above) FIRST_STMT is the first reduction stmt in the chain (a2 = operation (a1)). Return TRUE if a reduction chain was detected. */ static bool vect_is_slp_reduction (loop_vec_info loop_info, gimple *phi, gimple *first_stmt) { struct loop *loop = (gimple_bb (phi))->loop_father; struct loop *vect_loop = LOOP_VINFO_LOOP (loop_info); enum tree_code code; gimple *current_stmt = NULL, *loop_use_stmt = NULL, *first, *next_stmt; stmt_vec_info use_stmt_info, current_stmt_info; tree lhs; imm_use_iterator imm_iter; use_operand_p use_p; int nloop_uses, size = 0, n_out_of_loop_uses; bool found = false; if (loop != vect_loop) return false; lhs = PHI_RESULT (phi); code = gimple_assign_rhs_code (first_stmt); while (1) { nloop_uses = 0; n_out_of_loop_uses = 0; FOR_EACH_IMM_USE_FAST (use_p, imm_iter, lhs) { gimple *use_stmt = USE_STMT (use_p); if (is_gimple_debug (use_stmt)) continue; /* Check if we got back to the reduction phi. */ if (use_stmt == phi) { loop_use_stmt = use_stmt; found = true; break; } if (flow_bb_inside_loop_p (loop, gimple_bb (use_stmt))) { loop_use_stmt = use_stmt; nloop_uses++; } else n_out_of_loop_uses++; /* There are can be either a single use in the loop or two uses in phi nodes. */ if (nloop_uses > 1 || (n_out_of_loop_uses && nloop_uses)) return false; } if (found) break; /* We reached a statement with no loop uses. */ if (nloop_uses == 0) return false; /* This is a loop exit phi, and we haven't reached the reduction phi. */ if (gimple_code (loop_use_stmt) == GIMPLE_PHI) return false; if (!is_gimple_assign (loop_use_stmt) || code != gimple_assign_rhs_code (loop_use_stmt) || !flow_bb_inside_loop_p (loop, gimple_bb (loop_use_stmt))) return false; /* Insert USE_STMT into reduction chain. */ use_stmt_info = vinfo_for_stmt (loop_use_stmt); if (current_stmt) { current_stmt_info = vinfo_for_stmt (current_stmt); GROUP_NEXT_ELEMENT (current_stmt_info) = loop_use_stmt; GROUP_FIRST_ELEMENT (use_stmt_info) = GROUP_FIRST_ELEMENT (current_stmt_info); } else GROUP_FIRST_ELEMENT (use_stmt_info) = loop_use_stmt; lhs = gimple_assign_lhs (loop_use_stmt); current_stmt = loop_use_stmt; size++; } if (!found || loop_use_stmt != phi || size < 2) return false; /* Swap the operands, if needed, to make the reduction operand be the second operand. */ lhs = PHI_RESULT (phi); next_stmt = GROUP_FIRST_ELEMENT (vinfo_for_stmt (current_stmt)); while (next_stmt) { if (gimple_assign_rhs2 (next_stmt) == lhs) { tree op = gimple_assign_rhs1 (next_stmt); gimple *def_stmt = NULL; if (TREE_CODE (op) == SSA_NAME) def_stmt = SSA_NAME_DEF_STMT (op); /* Check that the other def is either defined in the loop ("vect_internal_def"), or it's an induction (defined by a loop-header phi-node). */ if (def_stmt && gimple_bb (def_stmt) && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt)) && (is_gimple_assign (def_stmt) || is_gimple_call (def_stmt) || STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt)) == vect_induction_def || (gimple_code (def_stmt) == GIMPLE_PHI && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt)) == vect_internal_def && !is_loop_header_bb_p (gimple_bb (def_stmt))))) { lhs = gimple_assign_lhs (next_stmt); next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt)); continue; } return false; } else { tree op = gimple_assign_rhs2 (next_stmt); gimple *def_stmt = NULL; if (TREE_CODE (op) == SSA_NAME) def_stmt = SSA_NAME_DEF_STMT (op); /* Check that the other def is either defined in the loop ("vect_internal_def"), or it's an induction (defined by a loop-header phi-node). */ if (def_stmt && gimple_bb (def_stmt) && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt)) && (is_gimple_assign (def_stmt) || is_gimple_call (def_stmt) || STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt)) == vect_induction_def || (gimple_code (def_stmt) == GIMPLE_PHI && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt)) == vect_internal_def && !is_loop_header_bb_p (gimple_bb (def_stmt))))) { if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "swapping oprnds: "); dump_gimple_stmt (MSG_NOTE, TDF_SLIM, next_stmt, 0); } swap_ssa_operands (next_stmt, gimple_assign_rhs1_ptr (next_stmt), gimple_assign_rhs2_ptr (next_stmt)); update_stmt (next_stmt); if (CONSTANT_CLASS_P (gimple_assign_rhs1 (next_stmt))) LOOP_VINFO_OPERANDS_SWAPPED (loop_info) = true; } else return false; } lhs = gimple_assign_lhs (next_stmt); next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt)); } /* Save the chain for further analysis in SLP detection. */ first = GROUP_FIRST_ELEMENT (vinfo_for_stmt (current_stmt)); LOOP_VINFO_REDUCTION_CHAINS (loop_info).safe_push (first); GROUP_SIZE (vinfo_for_stmt (first)) = size; return true; } /* Function vect_is_simple_reduction_1 (1) Detect a cross-iteration def-use cycle that represents a simple reduction computation. We look for the following pattern: loop_header: a1 = phi < a0, a2 > a3 = ... a2 = operation (a3, a1) or a3 = ... loop_header: a1 = phi < a0, a2 > a2 = operation (a3, a1) such that: 1. operation is commutative and associative and it is safe to change the order of the computation (if CHECK_REDUCTION is true) 2. no uses for a2 in the loop (a2 is used out of the loop) 3. no uses of a1 in the loop besides the reduction operation 4. no uses of a1 outside the loop. Conditions 1,4 are tested here. Conditions 2,3 are tested in vect_mark_stmts_to_be_vectorized. (2) Detect a cross-iteration def-use cycle in nested loops, i.e., nested cycles, if CHECK_REDUCTION is false. (3) Detect cycles of phi nodes in outer-loop vectorization, i.e., double reductions: a1 = phi < a0, a2 > inner loop (def of a3) a2 = phi < a3 > (4) Detect condition expressions, ie: for (int i = 0; i < N; i++) if (a[i] < val) ret_val = a[i]; */ static gimple * vect_is_simple_reduction (loop_vec_info loop_info, gimple *phi, bool check_reduction, bool *double_reduc, bool need_wrapping_integral_overflow, enum vect_reduction_type *v_reduc_type) { struct loop *loop = (gimple_bb (phi))->loop_father; struct loop *vect_loop = LOOP_VINFO_LOOP (loop_info); edge latch_e = loop_latch_edge (loop); tree loop_arg = PHI_ARG_DEF_FROM_EDGE (phi, latch_e); gimple *def_stmt, *def1 = NULL, *def2 = NULL, *phi_use_stmt = NULL; enum tree_code orig_code, code; tree op1, op2, op3 = NULL_TREE, op4 = NULL_TREE; tree type; int nloop_uses; tree name; imm_use_iterator imm_iter; use_operand_p use_p; bool phi_def; *double_reduc = false; *v_reduc_type = TREE_CODE_REDUCTION; /* If CHECK_REDUCTION is true, we assume inner-most loop vectorization, otherwise, we assume outer loop vectorization. */ gcc_assert ((check_reduction && loop == vect_loop) || (!check_reduction && flow_loop_nested_p (vect_loop, loop))); name = PHI_RESULT (phi); /* ??? If there are no uses of the PHI result the inner loop reduction won't be detected as possibly double-reduction by vectorizable_reduction because that tries to walk the PHI arg from the preheader edge which can be constant. See PR60382. */ if (has_zero_uses (name)) return NULL; nloop_uses = 0; FOR_EACH_IMM_USE_FAST (use_p, imm_iter, name) { gimple *use_stmt = USE_STMT (use_p); if (is_gimple_debug (use_stmt)) continue; if (!flow_bb_inside_loop_p (loop, gimple_bb (use_stmt))) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "intermediate value used outside loop.\n"); return NULL; } nloop_uses++; if (nloop_uses > 1) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "reduction used in loop.\n"); return NULL; } phi_use_stmt = use_stmt; } if (TREE_CODE (loop_arg) != SSA_NAME) { if (dump_enabled_p ()) { dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "reduction: not ssa_name: "); dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, loop_arg); dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); } return NULL; } def_stmt = SSA_NAME_DEF_STMT (loop_arg); if (!def_stmt) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "reduction: no def_stmt.\n"); return NULL; } if (!is_gimple_assign (def_stmt) && gimple_code (def_stmt) != GIMPLE_PHI) { if (dump_enabled_p ()) dump_gimple_stmt (MSG_NOTE, TDF_SLIM, def_stmt, 0); return NULL; } if (is_gimple_assign (def_stmt)) { name = gimple_assign_lhs (def_stmt); phi_def = false; } else { name = PHI_RESULT (def_stmt); phi_def = true; } nloop_uses = 0; FOR_EACH_IMM_USE_FAST (use_p, imm_iter, name) { gimple *use_stmt = USE_STMT (use_p); if (is_gimple_debug (use_stmt)) continue; if (flow_bb_inside_loop_p (loop, gimple_bb (use_stmt))) nloop_uses++; if (nloop_uses > 1) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "reduction used in loop.\n"); return NULL; } } /* If DEF_STMT is a phi node itself, we expect it to have a single argument defined in the inner loop. */ if (phi_def) { op1 = PHI_ARG_DEF (def_stmt, 0); if (gimple_phi_num_args (def_stmt) != 1 || TREE_CODE (op1) != SSA_NAME) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "unsupported phi node definition.\n"); return NULL; } def1 = SSA_NAME_DEF_STMT (op1); if (gimple_bb (def1) && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt)) && loop->inner && flow_bb_inside_loop_p (loop->inner, gimple_bb (def1)) && is_gimple_assign (def1) && flow_bb_inside_loop_p (loop->inner, gimple_bb (phi_use_stmt))) { if (dump_enabled_p ()) report_vect_op (MSG_NOTE, def_stmt, "detected double reduction: "); *double_reduc = true; return def_stmt; } return NULL; } code = orig_code = gimple_assign_rhs_code (def_stmt); /* We can handle "res -= x[i]", which is non-associative by simply rewriting this into "res += -x[i]". Avoid changing gimple instruction for the first simple tests and only do this if we're allowed to change code at all. */ if (code == MINUS_EXPR && (op1 = gimple_assign_rhs1 (def_stmt)) && TREE_CODE (op1) == SSA_NAME && SSA_NAME_DEF_STMT (op1) == phi) code = PLUS_EXPR; if (code == COND_EXPR) { if (check_reduction) *v_reduc_type = COND_REDUCTION; } else if (!commutative_tree_code (code) || !associative_tree_code (code)) { if (dump_enabled_p ()) report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt, "reduction: not commutative/associative: "); return NULL; } if (get_gimple_rhs_class (code) != GIMPLE_BINARY_RHS) { if (code != COND_EXPR) { if (dump_enabled_p ()) report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt, "reduction: not binary operation: "); return NULL; } op3 = gimple_assign_rhs1 (def_stmt); if (COMPARISON_CLASS_P (op3)) { op4 = TREE_OPERAND (op3, 1); op3 = TREE_OPERAND (op3, 0); } op1 = gimple_assign_rhs2 (def_stmt); op2 = gimple_assign_rhs3 (def_stmt); if (TREE_CODE (op1) != SSA_NAME && TREE_CODE (op2) != SSA_NAME) { if (dump_enabled_p ()) report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt, "reduction: uses not ssa_names: "); return NULL; } } else { op1 = gimple_assign_rhs1 (def_stmt); op2 = gimple_assign_rhs2 (def_stmt); if (TREE_CODE (op1) != SSA_NAME && TREE_CODE (op2) != SSA_NAME) { if (dump_enabled_p ()) report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt, "reduction: uses not ssa_names: "); return NULL; } } type = TREE_TYPE (gimple_assign_lhs (def_stmt)); if ((TREE_CODE (op1) == SSA_NAME && !types_compatible_p (type,TREE_TYPE (op1))) || (TREE_CODE (op2) == SSA_NAME && !types_compatible_p (type, TREE_TYPE (op2))) || (op3 && TREE_CODE (op3) == SSA_NAME && !types_compatible_p (type, TREE_TYPE (op3))) || (op4 && TREE_CODE (op4) == SSA_NAME && !types_compatible_p (type, TREE_TYPE (op4)))) { if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "reduction: multiple types: operation type: "); dump_generic_expr (MSG_NOTE, TDF_SLIM, type); dump_printf (MSG_NOTE, ", operands types: "); dump_generic_expr (MSG_NOTE, TDF_SLIM, TREE_TYPE (op1)); dump_printf (MSG_NOTE, ","); dump_generic_expr (MSG_NOTE, TDF_SLIM, TREE_TYPE (op2)); if (op3) { dump_printf (MSG_NOTE, ","); dump_generic_expr (MSG_NOTE, TDF_SLIM, TREE_TYPE (op3)); } if (op4) { dump_printf (MSG_NOTE, ","); dump_generic_expr (MSG_NOTE, TDF_SLIM, TREE_TYPE (op4)); } dump_printf (MSG_NOTE, "\n"); } return NULL; } /* Check that it's ok to change the order of the computation. Generally, when vectorizing a reduction we change the order of the computation. This may change the behavior of the program in some cases, so we need to check that this is ok. One exception is when vectorizing an outer-loop: the inner-loop is executed sequentially, and therefore vectorizing reductions in the inner-loop during outer-loop vectorization is safe. */ if (*v_reduc_type != COND_REDUCTION && check_reduction) { /* CHECKME: check for !flag_finite_math_only too? */ if (SCALAR_FLOAT_TYPE_P (type) && !flag_associative_math) { /* Changing the order of operations changes the semantics. */ if (dump_enabled_p ()) report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt, "reduction: unsafe fp math optimization: "); return NULL; } else if (INTEGRAL_TYPE_P (type)) { if (!operation_no_trapping_overflow (type, code)) { /* Changing the order of operations changes the semantics. */ if (dump_enabled_p ()) report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt, "reduction: unsafe int math optimization" " (overflow traps): "); return NULL; } if (need_wrapping_integral_overflow && !TYPE_OVERFLOW_WRAPS (type) && operation_can_overflow (code)) { /* Changing the order of operations changes the semantics. */ if (dump_enabled_p ()) report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt, "reduction: unsafe int math optimization" " (overflow doesn't wrap): "); return NULL; } } else if (SAT_FIXED_POINT_TYPE_P (type)) { /* Changing the order of operations changes the semantics. */ if (dump_enabled_p ()) report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt, "reduction: unsafe fixed-point math optimization: "); return NULL; } } /* Reduction is safe. We're dealing with one of the following: 1) integer arithmetic and no trapv 2) floating point arithmetic, and special flags permit this optimization 3) nested cycle (i.e., outer loop vectorization). */ if (TREE_CODE (op1) == SSA_NAME) def1 = SSA_NAME_DEF_STMT (op1); if (TREE_CODE (op2) == SSA_NAME) def2 = SSA_NAME_DEF_STMT (op2); if (code != COND_EXPR && ((!def1 || gimple_nop_p (def1)) && (!def2 || gimple_nop_p (def2)))) { if (dump_enabled_p ()) report_vect_op (MSG_NOTE, def_stmt, "reduction: no defs for operands: "); return NULL; } /* Check that one def is the reduction def, defined by PHI, the other def is either defined in the loop ("vect_internal_def"), or it's an induction (defined by a loop-header phi-node). */ if (def2 && def2 == phi && (code == COND_EXPR || !def1 || gimple_nop_p (def1) || !flow_bb_inside_loop_p (loop, gimple_bb (def1)) || (def1 && flow_bb_inside_loop_p (loop, gimple_bb (def1)) && (is_gimple_assign (def1) || is_gimple_call (def1) || STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def1)) == vect_induction_def || (gimple_code (def1) == GIMPLE_PHI && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def1)) == vect_internal_def && !is_loop_header_bb_p (gimple_bb (def1))))))) { if (dump_enabled_p ()) report_vect_op (MSG_NOTE, def_stmt, "detected reduction: "); return def_stmt; } if (def1 && def1 == phi && (code == COND_EXPR || !def2 || gimple_nop_p (def2) || !flow_bb_inside_loop_p (loop, gimple_bb (def2)) || (def2 && flow_bb_inside_loop_p (loop, gimple_bb (def2)) && (is_gimple_assign (def2) || is_gimple_call (def2) || STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def2)) == vect_induction_def || (gimple_code (def2) == GIMPLE_PHI && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def2)) == vect_internal_def && !is_loop_header_bb_p (gimple_bb (def2))))))) { if (check_reduction && orig_code != MINUS_EXPR) { /* Check if we can swap operands (just for simplicity - so that the rest of the code can assume that the reduction variable is always the last (second) argument). */ if (code == COND_EXPR) { /* Swap cond_expr by inverting the condition. */ tree cond_expr = gimple_assign_rhs1 (def_stmt); enum tree_code invert_code = ERROR_MARK; enum tree_code cond_code = TREE_CODE (cond_expr); if (TREE_CODE_CLASS (cond_code) == tcc_comparison) { bool honor_nans = HONOR_NANS (TREE_OPERAND (cond_expr, 0)); invert_code = invert_tree_comparison (cond_code, honor_nans); } if (invert_code != ERROR_MARK) { TREE_SET_CODE (cond_expr, invert_code); swap_ssa_operands (def_stmt, gimple_assign_rhs2_ptr (def_stmt), gimple_assign_rhs3_ptr (def_stmt)); } else { if (dump_enabled_p ()) report_vect_op (MSG_NOTE, def_stmt, "detected reduction: cannot swap operands " "for cond_expr"); return NULL; } } else swap_ssa_operands (def_stmt, gimple_assign_rhs1_ptr (def_stmt), gimple_assign_rhs2_ptr (def_stmt)); if (dump_enabled_p ()) report_vect_op (MSG_NOTE, def_stmt, "detected reduction: need to swap operands: "); if (CONSTANT_CLASS_P (gimple_assign_rhs1 (def_stmt))) LOOP_VINFO_OPERANDS_SWAPPED (loop_info) = true; } else { if (dump_enabled_p ()) report_vect_op (MSG_NOTE, def_stmt, "detected reduction: "); } return def_stmt; } /* Try to find SLP reduction chain. */ if (check_reduction && code != COND_EXPR && vect_is_slp_reduction (loop_info, phi, def_stmt)) { if (dump_enabled_p ()) report_vect_op (MSG_NOTE, def_stmt, "reduction: detected reduction chain: "); return def_stmt; } if (dump_enabled_p ()) report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt, "reduction: unknown pattern: "); return NULL; } /* Wrapper around vect_is_simple_reduction_1, which will modify code in-place if it enables detection of more reductions. Arguments as there. */ gimple * vect_force_simple_reduction (loop_vec_info loop_info, gimple *phi, bool check_reduction, bool *double_reduc, bool need_wrapping_integral_overflow) { enum vect_reduction_type v_reduc_type; return vect_is_simple_reduction (loop_info, phi, check_reduction, double_reduc, need_wrapping_integral_overflow, &v_reduc_type); } /* Calculate cost of peeling the loop PEEL_ITERS_PROLOGUE times. */ int vect_get_known_peeling_cost (loop_vec_info loop_vinfo, int peel_iters_prologue, int *peel_iters_epilogue, stmt_vector_for_cost *scalar_cost_vec, stmt_vector_for_cost *prologue_cost_vec, stmt_vector_for_cost *epilogue_cost_vec) { int retval = 0; int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo); if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)) { *peel_iters_epilogue = vf/2; if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "cost model: epilogue peel iters set to vf/2 " "because loop iterations are unknown .\n"); /* If peeled iterations are known but number of scalar loop iterations are unknown, count a taken branch per peeled loop. */ retval = record_stmt_cost (prologue_cost_vec, 1, cond_branch_taken, NULL, 0, vect_prologue); retval = record_stmt_cost (prologue_cost_vec, 1, cond_branch_taken, NULL, 0, vect_epilogue); } else { int niters = LOOP_VINFO_INT_NITERS (loop_vinfo); peel_iters_prologue = niters < peel_iters_prologue ? niters : peel_iters_prologue; *peel_iters_epilogue = (niters - peel_iters_prologue) % vf; /* If we need to peel for gaps, but no peeling is required, we have to peel VF iterations. */ if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) && !*peel_iters_epilogue) *peel_iters_epilogue = vf; } stmt_info_for_cost *si; int j; if (peel_iters_prologue) FOR_EACH_VEC_ELT (*scalar_cost_vec, j, si) { stmt_vec_info stmt_info = si->stmt ? vinfo_for_stmt (si->stmt) : NULL; retval += record_stmt_cost (prologue_cost_vec, si->count * peel_iters_prologue, si->kind, stmt_info, si->misalign, vect_prologue); } if (*peel_iters_epilogue) FOR_EACH_VEC_ELT (*scalar_cost_vec, j, si) { stmt_vec_info stmt_info = si->stmt ? vinfo_for_stmt (si->stmt) : NULL; retval += record_stmt_cost (epilogue_cost_vec, si->count * *peel_iters_epilogue, si->kind, stmt_info, si->misalign, vect_epilogue); } return retval; } /* Function vect_estimate_min_profitable_iters Return the number of iterations required for the vector version of the loop to be profitable relative to the cost of the scalar version of the loop. *RET_MIN_PROFITABLE_NITERS is a cost model profitability threshold of iterations for vectorization. -1 value means loop vectorization is not profitable. This returned value may be used for dynamic profitability check. *RET_MIN_PROFITABLE_ESTIMATE is a profitability threshold to be used for static check against estimated number of iterations. */ static void vect_estimate_min_profitable_iters (loop_vec_info loop_vinfo, int *ret_min_profitable_niters, int *ret_min_profitable_estimate) { int min_profitable_iters; int min_profitable_estimate; int peel_iters_prologue; int peel_iters_epilogue; unsigned vec_inside_cost = 0; int vec_outside_cost = 0; unsigned vec_prologue_cost = 0; unsigned vec_epilogue_cost = 0; int scalar_single_iter_cost = 0; int scalar_outside_cost = 0; int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo); int npeel = LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo); void *target_cost_data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo); /* Cost model disabled. */ if (unlimited_cost_model (LOOP_VINFO_LOOP (loop_vinfo))) { dump_printf_loc (MSG_NOTE, vect_location, "cost model disabled.\n"); *ret_min_profitable_niters = 0; *ret_min_profitable_estimate = 0; return; } /* Requires loop versioning tests to handle misalignment. */ if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo)) { /* FIXME: Make cost depend on complexity of individual check. */ unsigned len = LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo).length (); (void) add_stmt_cost (target_cost_data, len, vector_stmt, NULL, 0, vect_prologue); dump_printf (MSG_NOTE, "cost model: Adding cost of checks for loop " "versioning to treat misalignment.\n"); } /* Requires loop versioning with alias checks. */ if (LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo)) { /* FIXME: Make cost depend on complexity of individual check. */ unsigned len = LOOP_VINFO_COMP_ALIAS_DDRS (loop_vinfo).length (); (void) add_stmt_cost (target_cost_data, len, vector_stmt, NULL, 0, vect_prologue); dump_printf (MSG_NOTE, "cost model: Adding cost of checks for loop " "versioning aliasing.\n"); } /* Requires loop versioning with niter checks. */ if (LOOP_REQUIRES_VERSIONING_FOR_NITERS (loop_vinfo)) { /* FIXME: Make cost depend on complexity of individual check. */ (void) add_stmt_cost (target_cost_data, 1, vector_stmt, NULL, 0, vect_prologue); dump_printf (MSG_NOTE, "cost model: Adding cost of checks for loop " "versioning niters.\n"); } if (LOOP_REQUIRES_VERSIONING (loop_vinfo)) (void) add_stmt_cost (target_cost_data, 1, cond_branch_taken, NULL, 0, vect_prologue); /* Count statements in scalar loop. Using this as scalar cost for a single iteration for now. TODO: Add outer loop support. TODO: Consider assigning different costs to different scalar statements. */ scalar_single_iter_cost = LOOP_VINFO_SINGLE_SCALAR_ITERATION_COST (loop_vinfo); /* Add additional cost for the peeled instructions in prologue and epilogue loop. FORNOW: If we don't know the value of peel_iters for prologue or epilogue at compile-time - we assume it's vf/2 (the worst would be vf-1). TODO: Build an expression that represents peel_iters for prologue and epilogue to be used in a run-time test. */ if (npeel < 0) { peel_iters_prologue = vf/2; dump_printf (MSG_NOTE, "cost model: " "prologue peel iters set to vf/2.\n"); /* If peeling for alignment is unknown, loop bound of main loop becomes unknown. */ peel_iters_epilogue = vf/2; dump_printf (MSG_NOTE, "cost model: " "epilogue peel iters set to vf/2 because " "peeling for alignment is unknown.\n"); /* If peeled iterations are unknown, count a taken branch and a not taken branch per peeled loop. Even if scalar loop iterations are known, vector iterations are not known since peeled prologue iterations are not known. Hence guards remain the same. */ (void) add_stmt_cost (target_cost_data, 1, cond_branch_taken, NULL, 0, vect_prologue); (void) add_stmt_cost (target_cost_data, 1, cond_branch_not_taken, NULL, 0, vect_prologue); (void) add_stmt_cost (target_cost_data, 1, cond_branch_taken, NULL, 0, vect_epilogue); (void) add_stmt_cost (target_cost_data, 1, cond_branch_not_taken, NULL, 0, vect_epilogue); stmt_info_for_cost *si; int j; FOR_EACH_VEC_ELT (LOOP_VINFO_SCALAR_ITERATION_COST (loop_vinfo), j, si) { struct _stmt_vec_info *stmt_info = si->stmt ? vinfo_for_stmt (si->stmt) : NULL; (void) add_stmt_cost (target_cost_data, si->count * peel_iters_prologue, si->kind, stmt_info, si->misalign, vect_prologue); (void) add_stmt_cost (target_cost_data, si->count * peel_iters_epilogue, si->kind, stmt_info, si->misalign, vect_epilogue); } } else { stmt_vector_for_cost prologue_cost_vec, epilogue_cost_vec; stmt_info_for_cost *si; int j; void *data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo); prologue_cost_vec.create (2); epilogue_cost_vec.create (2); peel_iters_prologue = npeel; (void) vect_get_known_peeling_cost (loop_vinfo, peel_iters_prologue, &peel_iters_epilogue, &LOOP_VINFO_SCALAR_ITERATION_COST (loop_vinfo), &prologue_cost_vec, &epilogue_cost_vec); FOR_EACH_VEC_ELT (prologue_cost_vec, j, si) { struct _stmt_vec_info *stmt_info = si->stmt ? vinfo_for_stmt (si->stmt) : NULL; (void) add_stmt_cost (data, si->count, si->kind, stmt_info, si->misalign, vect_prologue); } FOR_EACH_VEC_ELT (epilogue_cost_vec, j, si) { struct _stmt_vec_info *stmt_info = si->stmt ? vinfo_for_stmt (si->stmt) : NULL; (void) add_stmt_cost (data, si->count, si->kind, stmt_info, si->misalign, vect_epilogue); } prologue_cost_vec.release (); epilogue_cost_vec.release (); } /* FORNOW: The scalar outside cost is incremented in one of the following ways: 1. The vectorizer checks for alignment and aliasing and generates a condition that allows dynamic vectorization. A cost model check is ANDED with the versioning condition. Hence scalar code path now has the added cost of the versioning check. if (cost > th & versioning_check) jmp to vector code Hence run-time scalar is incremented by not-taken branch cost. 2. The vectorizer then checks if a prologue is required. If the cost model check was not done before during versioning, it has to be done before the prologue check. if (cost <= th) prologue = scalar_iters if (prologue == 0) jmp to vector code else execute prologue if (prologue == num_iters) go to exit Hence the run-time scalar cost is incremented by a taken branch, plus a not-taken branch, plus a taken branch cost. 3. The vectorizer then checks if an epilogue is required. If the cost model check was not done before during prologue check, it has to be done with the epilogue check. if (prologue == 0) jmp to vector code else execute prologue if (prologue == num_iters) go to exit vector code: if ((cost <= th) | (scalar_iters-prologue-epilogue == 0)) jmp to epilogue Hence the run-time scalar cost should be incremented by 2 taken branches. TODO: The back end may reorder the BBS's differently and reverse conditions/branch directions. Change the estimates below to something more reasonable. */ /* If the number of iterations is known and we do not do versioning, we can decide whether to vectorize at compile time. Hence the scalar version do not carry cost model guard costs. */ if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo) || LOOP_REQUIRES_VERSIONING (loop_vinfo)) { /* Cost model check occurs at versioning. */ if (LOOP_REQUIRES_VERSIONING (loop_vinfo)) scalar_outside_cost += vect_get_stmt_cost (cond_branch_not_taken); else { /* Cost model check occurs at prologue generation. */ if (LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) < 0) scalar_outside_cost += 2 * vect_get_stmt_cost (cond_branch_taken) + vect_get_stmt_cost (cond_branch_not_taken); /* Cost model check occurs at epilogue generation. */ else scalar_outside_cost += 2 * vect_get_stmt_cost (cond_branch_taken); } } /* Complete the target-specific cost calculations. */ finish_cost (LOOP_VINFO_TARGET_COST_DATA (loop_vinfo), &vec_prologue_cost, &vec_inside_cost, &vec_epilogue_cost); vec_outside_cost = (int)(vec_prologue_cost + vec_epilogue_cost); if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "Cost model analysis: \n"); dump_printf (MSG_NOTE, " Vector inside of loop cost: %d\n", vec_inside_cost); dump_printf (MSG_NOTE, " Vector prologue cost: %d\n", vec_prologue_cost); dump_printf (MSG_NOTE, " Vector epilogue cost: %d\n", vec_epilogue_cost); dump_printf (MSG_NOTE, " Scalar iteration cost: %d\n", scalar_single_iter_cost); dump_printf (MSG_NOTE, " Scalar outside cost: %d\n", scalar_outside_cost); dump_printf (MSG_NOTE, " Vector outside cost: %d\n", vec_outside_cost); dump_printf (MSG_NOTE, " prologue iterations: %d\n", peel_iters_prologue); dump_printf (MSG_NOTE, " epilogue iterations: %d\n", peel_iters_epilogue); } /* Calculate number of iterations required to make the vector version profitable, relative to the loop bodies only. The following condition must hold true: SIC * niters + SOC > VIC * ((niters-PL_ITERS-EP_ITERS)/VF) + VOC where SIC = scalar iteration cost, VIC = vector iteration cost, VOC = vector outside cost, VF = vectorization factor, PL_ITERS = prologue iterations, EP_ITERS= epilogue iterations SOC = scalar outside cost for run time cost model check. */ if ((scalar_single_iter_cost * vf) > (int) vec_inside_cost) { if (vec_outside_cost <= 0) min_profitable_iters = 1; else { min_profitable_iters = ((vec_outside_cost - scalar_outside_cost) * vf - vec_inside_cost * peel_iters_prologue - vec_inside_cost * peel_iters_epilogue) / ((scalar_single_iter_cost * vf) - vec_inside_cost); if ((scalar_single_iter_cost * vf * min_profitable_iters) <= (((int) vec_inside_cost * min_profitable_iters) + (((int) vec_outside_cost - scalar_outside_cost) * vf))) min_profitable_iters++; } } /* vector version will never be profitable. */ else { if (LOOP_VINFO_LOOP (loop_vinfo)->force_vectorize) warning_at (vect_location, OPT_Wopenmp_simd, "vectorization " "did not happen for a simd loop"); if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "cost model: the vector iteration cost = %d " "divided by the scalar iteration cost = %d " "is greater or equal to the vectorization factor = %d" ".\n", vec_inside_cost, scalar_single_iter_cost, vf); *ret_min_profitable_niters = -1; *ret_min_profitable_estimate = -1; return; } dump_printf (MSG_NOTE, " Calculated minimum iters for profitability: %d\n", min_profitable_iters); min_profitable_iters = min_profitable_iters < vf ? vf : min_profitable_iters; /* Because the condition we create is: if (niters <= min_profitable_iters) then skip the vectorized loop. */ min_profitable_iters--; if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, " Runtime profitability threshold = %d\n", min_profitable_iters); *ret_min_profitable_niters = min_profitable_iters; /* Calculate number of iterations required to make the vector version profitable, relative to the loop bodies only. Non-vectorized variant is SIC * niters and it must win over vector variant on the expected loop trip count. The following condition must hold true: SIC * niters > VIC * ((niters-PL_ITERS-EP_ITERS)/VF) + VOC + SOC */ if (vec_outside_cost <= 0) min_profitable_estimate = 1; else { min_profitable_estimate = ((vec_outside_cost + scalar_outside_cost) * vf - vec_inside_cost * peel_iters_prologue - vec_inside_cost * peel_iters_epilogue) / ((scalar_single_iter_cost * vf) - vec_inside_cost); } min_profitable_estimate --; min_profitable_estimate = MAX (min_profitable_estimate, min_profitable_iters); if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, " Static estimate profitability threshold = %d\n", min_profitable_estimate); *ret_min_profitable_estimate = min_profitable_estimate; } /* Writes into SEL a mask for a vec_perm, equivalent to a vec_shr by OFFSET vector elements (not bits) for a vector of mode MODE. */ static void calc_vec_perm_mask_for_shift (enum machine_mode mode, unsigned int offset, unsigned char *sel) { unsigned int i, nelt = GET_MODE_NUNITS (mode); for (i = 0; i < nelt; i++) sel[i] = (i + offset) & (2*nelt - 1); } /* Checks whether the target supports whole-vector shifts for vectors of mode MODE. This is the case if _either_ the platform handles vec_shr_optab, _or_ it supports vec_perm_const with masks for all necessary shift amounts. */ static bool have_whole_vector_shift (enum machine_mode mode) { if (optab_handler (vec_shr_optab, mode) != CODE_FOR_nothing) return true; if (direct_optab_handler (vec_perm_const_optab, mode) == CODE_FOR_nothing) return false; unsigned int i, nelt = GET_MODE_NUNITS (mode); unsigned char *sel = XALLOCAVEC (unsigned char, nelt); for (i = nelt/2; i >= 1; i/=2) { calc_vec_perm_mask_for_shift (mode, i, sel); if (!can_vec_perm_p (mode, false, sel)) return false; } return true; } /* Return the reduction operand (with index REDUC_INDEX) of STMT. */ static tree get_reduction_op (gimple *stmt, int reduc_index) { switch (get_gimple_rhs_class (gimple_assign_rhs_code (stmt))) { case GIMPLE_SINGLE_RHS: gcc_assert (TREE_OPERAND_LENGTH (gimple_assign_rhs1 (stmt)) == ternary_op); return TREE_OPERAND (gimple_assign_rhs1 (stmt), reduc_index); case GIMPLE_UNARY_RHS: return gimple_assign_rhs1 (stmt); case GIMPLE_BINARY_RHS: return (reduc_index ? gimple_assign_rhs2 (stmt) : gimple_assign_rhs1 (stmt)); case GIMPLE_TERNARY_RHS: return gimple_op (stmt, reduc_index + 1); default: gcc_unreachable (); } } /* TODO: Close dependency between vect_model_*_cost and vectorizable_* functions. Design better to avoid maintenance issues. */ /* Function vect_model_reduction_cost. Models cost for a reduction operation, including the vector ops generated within the strip-mine loop, the initial definition before the loop, and the epilogue code that must be generated. */ static bool vect_model_reduction_cost (stmt_vec_info stmt_info, enum tree_code reduc_code, int ncopies, int reduc_index) { int prologue_cost = 0, epilogue_cost = 0; enum tree_code code; optab optab; tree vectype; gimple *stmt, *orig_stmt; tree reduction_op; machine_mode mode; loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); struct loop *loop = NULL; void *target_cost_data; if (loop_vinfo) { loop = LOOP_VINFO_LOOP (loop_vinfo); target_cost_data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo); } else target_cost_data = BB_VINFO_TARGET_COST_DATA (STMT_VINFO_BB_VINFO (stmt_info)); /* Condition reductions generate two reductions in the loop. */ if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) == COND_REDUCTION) ncopies *= 2; /* Cost of reduction op inside loop. */ unsigned inside_cost = add_stmt_cost (target_cost_data, ncopies, vector_stmt, stmt_info, 0, vect_body); stmt = STMT_VINFO_STMT (stmt_info); reduction_op = get_reduction_op (stmt, reduc_index); vectype = get_vectype_for_scalar_type (TREE_TYPE (reduction_op)); if (!vectype) { if (dump_enabled_p ()) { dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "unsupported data-type "); dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, TREE_TYPE (reduction_op)); dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); } return false; } mode = TYPE_MODE (vectype); orig_stmt = STMT_VINFO_RELATED_STMT (stmt_info); if (!orig_stmt) orig_stmt = STMT_VINFO_STMT (stmt_info); code = gimple_assign_rhs_code (orig_stmt); /* Add in cost for initial definition. For cond reduction we have four vectors: initial index, step, initial result of the data reduction, initial value of the index reduction. */ int prologue_stmts = STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) == COND_REDUCTION ? 4 : 1; prologue_cost += add_stmt_cost (target_cost_data, prologue_stmts, scalar_to_vec, stmt_info, 0, vect_prologue); /* Determine cost of epilogue code. We have a reduction operator that will reduce the vector in one statement. Also requires scalar extract. */ if (!loop || !nested_in_vect_loop_p (loop, orig_stmt)) { if (reduc_code != ERROR_MARK) { if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) == COND_REDUCTION) { /* An EQ stmt and an COND_EXPR stmt. */ epilogue_cost += add_stmt_cost (target_cost_data, 2, vector_stmt, stmt_info, 0, vect_epilogue); /* Reduction of the max index and a reduction of the found values. */ epilogue_cost += add_stmt_cost (target_cost_data, 2, vec_to_scalar, stmt_info, 0, vect_epilogue); /* A broadcast of the max value. */ epilogue_cost += add_stmt_cost (target_cost_data, 1, scalar_to_vec, stmt_info, 0, vect_epilogue); } else { epilogue_cost += add_stmt_cost (target_cost_data, 1, vector_stmt, stmt_info, 0, vect_epilogue); epilogue_cost += add_stmt_cost (target_cost_data, 1, vec_to_scalar, stmt_info, 0, vect_epilogue); } } else { int vec_size_in_bits = tree_to_uhwi (TYPE_SIZE (vectype)); tree bitsize = TYPE_SIZE (TREE_TYPE (gimple_assign_lhs (orig_stmt))); int element_bitsize = tree_to_uhwi (bitsize); int nelements = vec_size_in_bits / element_bitsize; optab = optab_for_tree_code (code, vectype, optab_default); /* We have a whole vector shift available. */ if (VECTOR_MODE_P (mode) && optab_handler (optab, mode) != CODE_FOR_nothing && have_whole_vector_shift (mode)) { /* Final reduction via vector shifts and the reduction operator. Also requires scalar extract. */ epilogue_cost += add_stmt_cost (target_cost_data, exact_log2 (nelements) * 2, vector_stmt, stmt_info, 0, vect_epilogue); epilogue_cost += add_stmt_cost (target_cost_data, 1, vec_to_scalar, stmt_info, 0, vect_epilogue); } else /* Use extracts and reduction op for final reduction. For N elements, we have N extracts and N-1 reduction ops. */ epilogue_cost += add_stmt_cost (target_cost_data, nelements + nelements - 1, vector_stmt, stmt_info, 0, vect_epilogue); } } if (dump_enabled_p ()) dump_printf (MSG_NOTE, "vect_model_reduction_cost: inside_cost = %d, " "prologue_cost = %d, epilogue_cost = %d .\n", inside_cost, prologue_cost, epilogue_cost); return true; } /* Function vect_model_induction_cost. Models cost for induction operations. */ static void vect_model_induction_cost (stmt_vec_info stmt_info, int ncopies) { loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); void *target_cost_data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo); unsigned inside_cost, prologue_cost; /* loop cost for vec_loop. */ inside_cost = add_stmt_cost (target_cost_data, ncopies, vector_stmt, stmt_info, 0, vect_body); /* prologue cost for vec_init and vec_step. */ prologue_cost = add_stmt_cost (target_cost_data, 2, scalar_to_vec, stmt_info, 0, vect_prologue); if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "vect_model_induction_cost: inside_cost = %d, " "prologue_cost = %d .\n", inside_cost, prologue_cost); } /* Function get_initial_def_for_induction Input: STMT - a stmt that performs an induction operation in the loop. IV_PHI - the initial value of the induction variable Output: Return a vector variable, initialized with the first VF values of the induction variable. E.g., for an iv with IV_PHI='X' and evolution S, for a vector of 4 units, we want to return: [X, X + S, X + 2*S, X + 3*S]. */ static tree get_initial_def_for_induction (gimple *iv_phi) { stmt_vec_info stmt_vinfo = vinfo_for_stmt (iv_phi); loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo); struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); tree vectype; int nunits; edge pe = loop_preheader_edge (loop); struct loop *iv_loop; basic_block new_bb; tree new_vec, vec_init, vec_step, t; tree new_name; gimple *new_stmt; gphi *induction_phi; tree induc_def, vec_def, vec_dest; tree init_expr, step_expr; int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo); int i; int ncopies; tree expr; stmt_vec_info phi_info = vinfo_for_stmt (iv_phi); bool nested_in_vect_loop = false; gimple_seq stmts; imm_use_iterator imm_iter; use_operand_p use_p; gimple *exit_phi; edge latch_e; tree loop_arg; gimple_stmt_iterator si; basic_block bb = gimple_bb (iv_phi); tree stepvectype; tree resvectype; /* Is phi in an inner-loop, while vectorizing an enclosing outer-loop? */ if (nested_in_vect_loop_p (loop, iv_phi)) { nested_in_vect_loop = true; iv_loop = loop->inner; } else iv_loop = loop; gcc_assert (iv_loop == (gimple_bb (iv_phi))->loop_father); latch_e = loop_latch_edge (iv_loop); loop_arg = PHI_ARG_DEF_FROM_EDGE (iv_phi, latch_e); step_expr = STMT_VINFO_LOOP_PHI_EVOLUTION_PART (phi_info); gcc_assert (step_expr != NULL_TREE); pe = loop_preheader_edge (iv_loop); init_expr = PHI_ARG_DEF_FROM_EDGE (iv_phi, loop_preheader_edge (iv_loop)); vectype = get_vectype_for_scalar_type (TREE_TYPE (init_expr)); resvectype = get_vectype_for_scalar_type (TREE_TYPE (PHI_RESULT (iv_phi))); gcc_assert (vectype); nunits = TYPE_VECTOR_SUBPARTS (vectype); ncopies = vf / nunits; gcc_assert (phi_info); gcc_assert (ncopies >= 1); /* Convert the step to the desired type. */ stmts = NULL; step_expr = gimple_convert (&stmts, TREE_TYPE (vectype), step_expr); if (stmts) { new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts); gcc_assert (!new_bb); } /* Find the first insertion point in the BB. */ si = gsi_after_labels (bb); /* Create the vector that holds the initial_value of the induction. */ if (nested_in_vect_loop) { /* iv_loop is nested in the loop to be vectorized. init_expr had already been created during vectorization of previous stmts. We obtain it from the STMT_VINFO_VEC_STMT of the defining stmt. */ vec_init = vect_get_vec_def_for_operand (init_expr, iv_phi); /* If the initial value is not of proper type, convert it. */ if (!useless_type_conversion_p (vectype, TREE_TYPE (vec_init))) { new_stmt = gimple_build_assign (vect_get_new_ssa_name (vectype, vect_simple_var, "vec_iv_"), VIEW_CONVERT_EXPR, build1 (VIEW_CONVERT_EXPR, vectype, vec_init)); vec_init = gimple_assign_lhs (new_stmt); new_bb = gsi_insert_on_edge_immediate (loop_preheader_edge (iv_loop), new_stmt); gcc_assert (!new_bb); set_vinfo_for_stmt (new_stmt, new_stmt_vec_info (new_stmt, loop_vinfo)); } } else { vec<constructor_elt, va_gc> *v; /* iv_loop is the loop to be vectorized. Create: vec_init = [X, X+S, X+2*S, X+3*S] (S = step_expr, X = init_expr) */ stmts = NULL; new_name = gimple_convert (&stmts, TREE_TYPE (vectype), init_expr); vec_alloc (v, nunits); bool constant_p = is_gimple_min_invariant (new_name); CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, new_name); for (i = 1; i < nunits; i++) { /* Create: new_name_i = new_name + step_expr */ new_name = gimple_build (&stmts, PLUS_EXPR, TREE_TYPE (new_name), new_name, step_expr); if (!is_gimple_min_invariant (new_name)) constant_p = false; CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, new_name); } if (stmts) { new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts); gcc_assert (!new_bb); } /* Create a vector from [new_name_0, new_name_1, ..., new_name_nunits-1] */ if (constant_p) new_vec = build_vector_from_ctor (vectype, v); else new_vec = build_constructor (vectype, v); vec_init = vect_init_vector (iv_phi, new_vec, vectype, NULL); } /* Create the vector that holds the step of the induction. */ if (nested_in_vect_loop) /* iv_loop is nested in the loop to be vectorized. Generate: vec_step = [S, S, S, S] */ new_name = step_expr; else { /* iv_loop is the loop to be vectorized. Generate: vec_step = [VF*S, VF*S, VF*S, VF*S] */ if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr))) { expr = build_int_cst (integer_type_node, vf); expr = fold_convert (TREE_TYPE (step_expr), expr); } else expr = build_int_cst (TREE_TYPE (step_expr), vf); new_name = fold_build2 (MULT_EXPR, TREE_TYPE (step_expr), expr, step_expr); if (TREE_CODE (step_expr) == SSA_NAME) new_name = vect_init_vector (iv_phi, new_name, TREE_TYPE (step_expr), NULL); } t = unshare_expr (new_name); gcc_assert (CONSTANT_CLASS_P (new_name) || TREE_CODE (new_name) == SSA_NAME); stepvectype = get_vectype_for_scalar_type (TREE_TYPE (new_name)); gcc_assert (stepvectype); new_vec = build_vector_from_val (stepvectype, t); vec_step = vect_init_vector (iv_phi, new_vec, stepvectype, NULL); /* Create the following def-use cycle: loop prolog: vec_init = ... vec_step = ... loop: vec_iv = PHI <vec_init, vec_loop> ... STMT ... vec_loop = vec_iv + vec_step; */ /* Create the induction-phi that defines the induction-operand. */ vec_dest = vect_get_new_vect_var (vectype, vect_simple_var, "vec_iv_"); induction_phi = create_phi_node (vec_dest, iv_loop->header); set_vinfo_for_stmt (induction_phi, new_stmt_vec_info (induction_phi, loop_vinfo)); induc_def = PHI_RESULT (induction_phi); /* Create the iv update inside the loop */ new_stmt = gimple_build_assign (vec_dest, PLUS_EXPR, induc_def, vec_step); vec_def = make_ssa_name (vec_dest, new_stmt); gimple_assign_set_lhs (new_stmt, vec_def); gsi_insert_before (&si, new_stmt, GSI_SAME_STMT); set_vinfo_for_stmt (new_stmt, new_stmt_vec_info (new_stmt, loop_vinfo)); /* Set the arguments of the phi node: */ add_phi_arg (induction_phi, vec_init, pe, UNKNOWN_LOCATION); add_phi_arg (induction_phi, vec_def, loop_latch_edge (iv_loop), UNKNOWN_LOCATION); /* In case that vectorization factor (VF) is bigger than the number of elements that we can fit in a vectype (nunits), we have to generate more than one vector stmt - i.e - we need to "unroll" the vector stmt by a factor VF/nunits. For more details see documentation in vectorizable_operation. */ if (ncopies > 1) { stmt_vec_info prev_stmt_vinfo; /* FORNOW. This restriction should be relaxed. */ gcc_assert (!nested_in_vect_loop); /* Create the vector that holds the step of the induction. */ if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr))) { expr = build_int_cst (integer_type_node, nunits); expr = fold_convert (TREE_TYPE (step_expr), expr); } else expr = build_int_cst (TREE_TYPE (step_expr), nunits); new_name = fold_build2 (MULT_EXPR, TREE_TYPE (step_expr), expr, step_expr); if (TREE_CODE (step_expr) == SSA_NAME) new_name = vect_init_vector (iv_phi, new_name, TREE_TYPE (step_expr), NULL); t = unshare_expr (new_name); gcc_assert (CONSTANT_CLASS_P (new_name) || TREE_CODE (new_name) == SSA_NAME); new_vec = build_vector_from_val (stepvectype, t); vec_step = vect_init_vector (iv_phi, new_vec, stepvectype, NULL); vec_def = induc_def; prev_stmt_vinfo = vinfo_for_stmt (induction_phi); for (i = 1; i < ncopies; i++) { /* vec_i = vec_prev + vec_step */ new_stmt = gimple_build_assign (vec_dest, PLUS_EXPR, vec_def, vec_step); vec_def = make_ssa_name (vec_dest, new_stmt); gimple_assign_set_lhs (new_stmt, vec_def); gsi_insert_before (&si, new_stmt, GSI_SAME_STMT); if (!useless_type_conversion_p (resvectype, vectype)) { new_stmt = gimple_build_assign (vect_get_new_vect_var (resvectype, vect_simple_var, "vec_iv_"), VIEW_CONVERT_EXPR, build1 (VIEW_CONVERT_EXPR, resvectype, gimple_assign_lhs (new_stmt))); gimple_assign_set_lhs (new_stmt, make_ssa_name (gimple_assign_lhs (new_stmt), new_stmt)); gsi_insert_before (&si, new_stmt, GSI_SAME_STMT); } set_vinfo_for_stmt (new_stmt, new_stmt_vec_info (new_stmt, loop_vinfo)); STMT_VINFO_RELATED_STMT (prev_stmt_vinfo) = new_stmt; prev_stmt_vinfo = vinfo_for_stmt (new_stmt); } } if (nested_in_vect_loop) { /* Find the loop-closed exit-phi of the induction, and record the final vector of induction results: */ exit_phi = NULL; FOR_EACH_IMM_USE_FAST (use_p, imm_iter, loop_arg) { gimple *use_stmt = USE_STMT (use_p); if (is_gimple_debug (use_stmt)) continue; if (!flow_bb_inside_loop_p (iv_loop, gimple_bb (use_stmt))) { exit_phi = use_stmt; break; } } if (exit_phi) { stmt_vec_info stmt_vinfo = vinfo_for_stmt (exit_phi); /* FORNOW. Currently not supporting the case that an inner-loop induction is not used in the outer-loop (i.e. only outside the outer-loop). */ gcc_assert (STMT_VINFO_RELEVANT_P (stmt_vinfo) && !STMT_VINFO_LIVE_P (stmt_vinfo)); STMT_VINFO_VEC_STMT (stmt_vinfo) = new_stmt; if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "vector of inductions after inner-loop:"); dump_gimple_stmt (MSG_NOTE, TDF_SLIM, new_stmt, 0); } } } if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "transform induction: created def-use cycle: "); dump_gimple_stmt (MSG_NOTE, TDF_SLIM, induction_phi, 0); dump_gimple_stmt (MSG_NOTE, TDF_SLIM, SSA_NAME_DEF_STMT (vec_def), 0); } STMT_VINFO_VEC_STMT (phi_info) = induction_phi; if (!useless_type_conversion_p (resvectype, vectype)) { new_stmt = gimple_build_assign (vect_get_new_vect_var (resvectype, vect_simple_var, "vec_iv_"), VIEW_CONVERT_EXPR, build1 (VIEW_CONVERT_EXPR, resvectype, induc_def)); induc_def = make_ssa_name (gimple_assign_lhs (new_stmt), new_stmt); gimple_assign_set_lhs (new_stmt, induc_def); si = gsi_after_labels (bb); gsi_insert_before (&si, new_stmt, GSI_SAME_STMT); set_vinfo_for_stmt (new_stmt, new_stmt_vec_info (new_stmt, loop_vinfo)); STMT_VINFO_RELATED_STMT (vinfo_for_stmt (new_stmt)) = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (induction_phi)); } return induc_def; } /* Function get_initial_def_for_reduction Input: STMT - a stmt that performs a reduction operation in the loop. INIT_VAL - the initial value of the reduction variable Output: ADJUSTMENT_DEF - a tree that holds a value to be added to the final result of the reduction (used for adjusting the epilog - see below). Return a vector variable, initialized according to the operation that STMT performs. This vector will be used as the initial value of the vector of partial results. Option1 (adjust in epilog): Initialize the vector as follows: add/bit or/xor: [0,0,...,0,0] mult/bit and: [1,1,...,1,1] min/max/cond_expr: [init_val,init_val,..,init_val,init_val] and when necessary (e.g. add/mult case) let the caller know that it needs to adjust the result by init_val. Option2: Initialize the vector as follows: add/bit or/xor: [init_val,0,0,...,0] mult/bit and: [init_val,1,1,...,1] min/max/cond_expr: [init_val,init_val,...,init_val] and no adjustments are needed. For example, for the following code: s = init_val; for (i=0;i<n;i++) s = s + a[i]; STMT is 's = s + a[i]', and the reduction variable is 's'. For a vector of 4 units, we want to return either [0,0,0,init_val], or [0,0,0,0] and let the caller know that it needs to adjust the result at the end by 'init_val'. FORNOW, we are using the 'adjust in epilog' scheme, because this way the initialization vector is simpler (same element in all entries), if ADJUSTMENT_DEF is not NULL, and Option2 otherwise. A cost model should help decide between these two schemes. */ tree get_initial_def_for_reduction (gimple *stmt, tree init_val, tree *adjustment_def) { stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt); loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo); struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); tree scalar_type = TREE_TYPE (init_val); tree vectype = get_vectype_for_scalar_type (scalar_type); int nunits; enum tree_code code = gimple_assign_rhs_code (stmt); tree def_for_init; tree init_def; tree *elts; int i; bool nested_in_vect_loop = false; REAL_VALUE_TYPE real_init_val = dconst0; int int_init_val = 0; gimple *def_stmt = NULL; gimple_seq stmts = NULL; gcc_assert (vectype); nunits = TYPE_VECTOR_SUBPARTS (vectype); gcc_assert (POINTER_TYPE_P (scalar_type) || INTEGRAL_TYPE_P (scalar_type) || SCALAR_FLOAT_TYPE_P (scalar_type)); if (nested_in_vect_loop_p (loop, stmt)) nested_in_vect_loop = true; else gcc_assert (loop == (gimple_bb (stmt))->loop_father); /* In case of double reduction we only create a vector variable to be put in the reduction phi node. The actual statement creation is done in vect_create_epilog_for_reduction. */ if (adjustment_def && nested_in_vect_loop && TREE_CODE (init_val) == SSA_NAME && (def_stmt = SSA_NAME_DEF_STMT (init_val)) && gimple_code (def_stmt) == GIMPLE_PHI && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt)) && vinfo_for_stmt (def_stmt) && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt)) == vect_double_reduction_def) { *adjustment_def = NULL; return vect_create_destination_var (init_val, vectype); } /* In case of a nested reduction do not use an adjustment def as that case is not supported by the epilogue generation correctly if ncopies is not one. */ if (adjustment_def && nested_in_vect_loop) { *adjustment_def = NULL; return vect_get_vec_def_for_operand (init_val, stmt); } switch (code) { case WIDEN_SUM_EXPR: case DOT_PROD_EXPR: case SAD_EXPR: case PLUS_EXPR: case MINUS_EXPR: case BIT_IOR_EXPR: case BIT_XOR_EXPR: case MULT_EXPR: case BIT_AND_EXPR: /* ADJUSTMENT_DEF is NULL when called from vect_create_epilog_for_reduction to vectorize double reduction. */ if (adjustment_def) *adjustment_def = init_val; if (code == MULT_EXPR) { real_init_val = dconst1; int_init_val = 1; } if (code == BIT_AND_EXPR) int_init_val = -1; if (SCALAR_FLOAT_TYPE_P (scalar_type)) def_for_init = build_real (scalar_type, real_init_val); else def_for_init = build_int_cst (scalar_type, int_init_val); /* Create a vector of '0' or '1' except the first element. */ elts = XALLOCAVEC (tree, nunits); for (i = nunits - 2; i >= 0; --i) elts[i + 1] = def_for_init; /* Option1: the first element is '0' or '1' as well. */ if (adjustment_def) { elts[0] = def_for_init; init_def = build_vector (vectype, elts); break; } /* Option2: the first element is INIT_VAL. */ elts[0] = init_val; if (TREE_CONSTANT (init_val)) init_def = build_vector (vectype, elts); else { vec<constructor_elt, va_gc> *v; vec_alloc (v, nunits); CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, init_val); for (i = 1; i < nunits; ++i) CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, elts[i]); init_def = build_constructor (vectype, v); } break; case MIN_EXPR: case MAX_EXPR: case COND_EXPR: if (adjustment_def) { *adjustment_def = NULL_TREE; if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_vinfo) != COND_REDUCTION) { init_def = vect_get_vec_def_for_operand (init_val, stmt); break; } } init_val = gimple_convert (&stmts, TREE_TYPE (vectype), init_val); if (! gimple_seq_empty_p (stmts)) gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts); init_def = build_vector_from_val (vectype, init_val); break; default: gcc_unreachable (); } return init_def; } /* Function vect_create_epilog_for_reduction Create code at the loop-epilog to finalize the result of a reduction computation. VECT_DEFS is list of vector of partial results, i.e., the lhs's of vector reduction statements. STMT is the scalar reduction stmt that is being vectorized. NCOPIES is > 1 in case the vectorization factor (VF) is bigger than the number of elements that we can fit in a vectype (nunits). In this case we have to generate more than one vector stmt - i.e - we need to "unroll" the vector stmt by a factor VF/nunits. For more details see documentation in vectorizable_operation. REDUC_CODE is the tree-code for the epilog reduction. REDUCTION_PHIS is a list of the phi-nodes that carry the reduction computation. REDUC_INDEX is the index of the operand in the right hand side of the statement that is defined by REDUCTION_PHI. DOUBLE_REDUC is TRUE if double reduction phi nodes should be handled. SLP_NODE is an SLP node containing a group of reduction statements. The first one in this group is STMT. INDUCTION_INDEX is the index of the loop for condition reductions. Otherwise it is undefined. INDUC_VAL is for INTEGER_INDUC_COND_REDUCTION the value to use for the case when the COND_EXPR is never true in the loop. It needs to be smaller than any value of the IV in the loop. This function: 1. Creates the reduction def-use cycles: sets the arguments for REDUCTION_PHIS: The loop-entry argument is the vectorized initial-value of the reduction. The loop-latch argument is taken from VECT_DEFS - the vector of partial sums. 2. "Reduces" each vector of partial results VECT_DEFS into a single result, by applying the operation specified by REDUC_CODE if available, or by other means (whole-vector shifts or a scalar loop). The function also creates a new phi node at the loop exit to preserve loop-closed form, as illustrated below. The flow at the entry to this function: loop: vec_def = phi <null, null> # REDUCTION_PHI VECT_DEF = vector_stmt # vectorized form of STMT s_loop = scalar_stmt # (scalar) STMT loop_exit: s_out0 = phi <s_loop> # (scalar) EXIT_PHI use <s_out0> use <s_out0> The above is transformed by this function into: loop: vec_def = phi <vec_init, VECT_DEF> # REDUCTION_PHI VECT_DEF = vector_stmt # vectorized form of STMT s_loop = scalar_stmt # (scalar) STMT loop_exit: s_out0 = phi <s_loop> # (scalar) EXIT_PHI v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI v_out2 = reduce <v_out1> s_out3 = extract_field <v_out2, 0> s_out4 = adjust_result <s_out3> use <s_out4> use <s_out4> */ static void vect_create_epilog_for_reduction (vec<tree> vect_defs, gimple *stmt, int ncopies, enum tree_code reduc_code, vec<gimple *> reduction_phis, int reduc_index, bool double_reduc, slp_tree slp_node, tree induction_index, tree induc_val) { stmt_vec_info stmt_info = vinfo_for_stmt (stmt); stmt_vec_info prev_phi_info; tree vectype; machine_mode mode; loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo), *outer_loop = NULL; basic_block exit_bb; tree scalar_dest; tree scalar_type; gimple *new_phi = NULL, *phi; gimple_stmt_iterator exit_gsi; tree vec_dest; tree new_temp = NULL_TREE, new_dest, new_name, new_scalar_dest; gimple *epilog_stmt = NULL; enum tree_code code = gimple_assign_rhs_code (stmt); gimple *exit_phi; tree bitsize; tree adjustment_def = NULL; tree vec_initial_def = NULL; tree reduction_op, expr, def, initial_def = NULL; tree orig_name, scalar_result; imm_use_iterator imm_iter, phi_imm_iter; use_operand_p use_p, phi_use_p; gimple *use_stmt, *orig_stmt, *reduction_phi = NULL; bool nested_in_vect_loop = false; auto_vec<gimple *> new_phis; auto_vec<gimple *> inner_phis; enum vect_def_type dt = vect_unknown_def_type; int j, i; auto_vec<tree> scalar_results; unsigned int group_size = 1, k, ratio; auto_vec<tree> vec_initial_defs; auto_vec<gimple *> phis; bool slp_reduc = false; tree new_phi_result; gimple *inner_phi = NULL; if (slp_node) group_size = SLP_TREE_SCALAR_STMTS (slp_node).length (); if (nested_in_vect_loop_p (loop, stmt)) { outer_loop = loop; loop = loop->inner; nested_in_vect_loop = true; gcc_assert (!slp_node); } reduction_op = get_reduction_op (stmt, reduc_index); vectype = get_vectype_for_scalar_type (TREE_TYPE (reduction_op)); gcc_assert (vectype); mode = TYPE_MODE (vectype); /* 1. Create the reduction def-use cycle: Set the arguments of REDUCTION_PHIS, i.e., transform loop: vec_def = phi <null, null> # REDUCTION_PHI VECT_DEF = vector_stmt # vectorized form of STMT ... into: loop: vec_def = phi <vec_init, VECT_DEF> # REDUCTION_PHI VECT_DEF = vector_stmt # vectorized form of STMT ... (in case of SLP, do it for all the phis). */ /* Get the loop-entry arguments. */ enum vect_def_type initial_def_dt = vect_unknown_def_type; if (slp_node) vect_get_vec_defs (reduction_op, NULL_TREE, stmt, &vec_initial_defs, NULL, slp_node, reduc_index); else { /* Get at the scalar def before the loop, that defines the initial value of the reduction variable. */ gimple *def_stmt = SSA_NAME_DEF_STMT (reduction_op); initial_def = PHI_ARG_DEF_FROM_EDGE (def_stmt, loop_preheader_edge (loop)); /* Optimize: if initial_def is for REDUC_MAX smaller than the base and we can't use zero for induc_val, use initial_def. Similarly for REDUC_MIN and initial_def larger than the base. */ if (TREE_CODE (initial_def) == INTEGER_CST && (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) == INTEGER_INDUC_COND_REDUCTION) && !integer_zerop (induc_val) && tree_int_cst_lt (initial_def, induc_val)) induc_val = initial_def; vect_is_simple_use (initial_def, loop_vinfo, &def_stmt, &initial_def_dt); vec_initial_def = get_initial_def_for_reduction (stmt, initial_def, &adjustment_def); vec_initial_defs.create (1); vec_initial_defs.quick_push (vec_initial_def); } /* Set phi nodes arguments. */ FOR_EACH_VEC_ELT (reduction_phis, i, phi) { tree vec_init_def, def; gimple_seq stmts; vec_init_def = force_gimple_operand (vec_initial_defs[i], &stmts, true, NULL_TREE); if (stmts) gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts); def = vect_defs[i]; for (j = 0; j < ncopies; j++) { if (j != 0) { phi = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (phi)); if (nested_in_vect_loop) vec_init_def = vect_get_vec_def_for_stmt_copy (initial_def_dt, vec_init_def); } /* Set the loop-entry arg of the reduction-phi. */ if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) == INTEGER_INDUC_COND_REDUCTION) { /* Initialise the reduction phi to zero. This prevents initial values of non-zero interferring with the reduction op. */ gcc_assert (ncopies == 1); gcc_assert (i == 0); tree vec_init_def_type = TREE_TYPE (vec_init_def); tree induc_val_vec = build_vector_from_val (vec_init_def_type, induc_val); add_phi_arg (as_a <gphi *> (phi), induc_val_vec, loop_preheader_edge (loop), UNKNOWN_LOCATION); } else add_phi_arg (as_a <gphi *> (phi), vec_init_def, loop_preheader_edge (loop), UNKNOWN_LOCATION); /* Set the loop-latch arg for the reduction-phi. */ if (j > 0) def = vect_get_vec_def_for_stmt_copy (vect_unknown_def_type, def); add_phi_arg (as_a <gphi *> (phi), def, loop_latch_edge (loop), UNKNOWN_LOCATION); if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "transform reduction: created def-use cycle: "); dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0); dump_gimple_stmt (MSG_NOTE, TDF_SLIM, SSA_NAME_DEF_STMT (def), 0); } } } /* 2. Create epilog code. The reduction epilog code operates across the elements of the vector of partial results computed by the vectorized loop. The reduction epilog code consists of: step 1: compute the scalar result in a vector (v_out2) step 2: extract the scalar result (s_out3) from the vector (v_out2) step 3: adjust the scalar result (s_out3) if needed. Step 1 can be accomplished using one the following three schemes: (scheme 1) using reduc_code, if available. (scheme 2) using whole-vector shifts, if available. (scheme 3) using a scalar loop. In this case steps 1+2 above are combined. The overall epilog code looks like this: s_out0 = phi <s_loop> # original EXIT_PHI v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI v_out2 = reduce <v_out1> # step 1 s_out3 = extract_field <v_out2, 0> # step 2 s_out4 = adjust_result <s_out3> # step 3 (step 3 is optional, and steps 1 and 2 may be combined). Lastly, the uses of s_out0 are replaced by s_out4. */ /* 2.1 Create new loop-exit-phis to preserve loop-closed form: v_out1 = phi <VECT_DEF> Store them in NEW_PHIS. */ exit_bb = single_exit (loop)->dest; prev_phi_info = NULL; new_phis.create (vect_defs.length ()); FOR_EACH_VEC_ELT (vect_defs, i, def) { for (j = 0; j < ncopies; j++) { tree new_def = copy_ssa_name (def); phi = create_phi_node (new_def, exit_bb); set_vinfo_for_stmt (phi, new_stmt_vec_info (phi, loop_vinfo)); if (j == 0) new_phis.quick_push (phi); else { def = vect_get_vec_def_for_stmt_copy (dt, def); STMT_VINFO_RELATED_STMT (prev_phi_info) = phi; } SET_PHI_ARG_DEF (phi, single_exit (loop)->dest_idx, def); prev_phi_info = vinfo_for_stmt (phi); } } /* The epilogue is created for the outer-loop, i.e., for the loop being vectorized. Create exit phis for the outer loop. */ if (double_reduc) { loop = outer_loop; exit_bb = single_exit (loop)->dest; inner_phis.create (vect_defs.length ()); FOR_EACH_VEC_ELT (new_phis, i, phi) { tree new_result = copy_ssa_name (PHI_RESULT (phi)); gphi *outer_phi = create_phi_node (new_result, exit_bb); SET_PHI_ARG_DEF (outer_phi, single_exit (loop)->dest_idx, PHI_RESULT (phi)); set_vinfo_for_stmt (outer_phi, new_stmt_vec_info (outer_phi, loop_vinfo)); inner_phis.quick_push (phi); new_phis[i] = outer_phi; prev_phi_info = vinfo_for_stmt (outer_phi); while (STMT_VINFO_RELATED_STMT (vinfo_for_stmt (phi))) { phi = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (phi)); new_result = copy_ssa_name (PHI_RESULT (phi)); outer_phi = create_phi_node (new_result, exit_bb); SET_PHI_ARG_DEF (outer_phi, single_exit (loop)->dest_idx, PHI_RESULT (phi)); set_vinfo_for_stmt (outer_phi, new_stmt_vec_info (outer_phi, loop_vinfo)); STMT_VINFO_RELATED_STMT (prev_phi_info) = outer_phi; prev_phi_info = vinfo_for_stmt (outer_phi); } } } exit_gsi = gsi_after_labels (exit_bb); /* 2.2 Get the relevant tree-code to use in the epilog for schemes 2,3 (i.e. when reduc_code is not available) and in the final adjustment code (if needed). Also get the original scalar reduction variable as defined in the loop. In case STMT is a "pattern-stmt" (i.e. - it represents a reduction pattern), the tree-code and scalar-def are taken from the original stmt that the pattern-stmt (STMT) replaces. Otherwise (it is a regular reduction) - the tree-code and scalar-def are taken from STMT. */ orig_stmt = STMT_VINFO_RELATED_STMT (stmt_info); if (!orig_stmt) { /* Regular reduction */ orig_stmt = stmt; } else { /* Reduction pattern */ stmt_vec_info stmt_vinfo = vinfo_for_stmt (orig_stmt); gcc_assert (STMT_VINFO_IN_PATTERN_P (stmt_vinfo)); gcc_assert (STMT_VINFO_RELATED_STMT (stmt_vinfo) == stmt); } code = gimple_assign_rhs_code (orig_stmt); /* For MINUS_EXPR the initial vector is [init_val,0,...,0], therefore, partial results are added and not subtracted. */ if (code == MINUS_EXPR) code = PLUS_EXPR; scalar_dest = gimple_assign_lhs (orig_stmt); scalar_type = TREE_TYPE (scalar_dest); scalar_results.create (group_size); new_scalar_dest = vect_create_destination_var (scalar_dest, NULL); bitsize = TYPE_SIZE (scalar_type); /* In case this is a reduction in an inner-loop while vectorizing an outer loop - we don't need to extract a single scalar result at the end of the inner-loop (unless it is double reduction, i.e., the use of reduction is outside the outer-loop). The final vector of partial results will be used in the vectorized outer-loop, or reduced to a scalar result at the end of the outer-loop. */ if (nested_in_vect_loop && !double_reduc) goto vect_finalize_reduction; /* SLP reduction without reduction chain, e.g., # a1 = phi <a2, a0> # b1 = phi <b2, b0> a2 = operation (a1) b2 = operation (b1) */ slp_reduc = (slp_node && !GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt))); /* In case of reduction chain, e.g., # a1 = phi <a3, a0> a2 = operation (a1) a3 = operation (a2), we may end up with more than one vector result. Here we reduce them to one vector. */ if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt))) { tree first_vect = PHI_RESULT (new_phis[0]); tree tmp; gassign *new_vec_stmt = NULL; vec_dest = vect_create_destination_var (scalar_dest, vectype); for (k = 1; k < new_phis.length (); k++) { gimple *next_phi = new_phis[k]; tree second_vect = PHI_RESULT (next_phi); tmp = build2 (code, vectype, first_vect, second_vect); new_vec_stmt = gimple_build_assign (vec_dest, tmp); first_vect = make_ssa_name (vec_dest, new_vec_stmt); gimple_assign_set_lhs (new_vec_stmt, first_vect); gsi_insert_before (&exit_gsi, new_vec_stmt, GSI_SAME_STMT); } new_phi_result = first_vect; if (new_vec_stmt) { new_phis.truncate (0); new_phis.safe_push (new_vec_stmt); } } else new_phi_result = PHI_RESULT (new_phis[0]); if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) == COND_REDUCTION) { /* For condition reductions, we have a vector (NEW_PHI_RESULT) containing various data values where the condition matched and another vector (INDUCTION_INDEX) containing all the indexes of those matches. We need to extract the last matching index (which will be the index with highest value) and use this to index into the data vector. For the case where there were no matches, the data vector will contain all default values and the index vector will be all zeros. */ /* Get various versions of the type of the vector of indexes. */ tree index_vec_type = TREE_TYPE (induction_index); gcc_checking_assert (TYPE_UNSIGNED (index_vec_type)); tree index_scalar_type = TREE_TYPE (index_vec_type); tree index_vec_cmp_type = build_same_sized_truth_vector_type (index_vec_type); /* Get an unsigned integer version of the type of the data vector. */ int scalar_precision = GET_MODE_PRECISION (TYPE_MODE (scalar_type)); tree scalar_type_unsigned = make_unsigned_type (scalar_precision); tree vectype_unsigned = build_vector_type (scalar_type_unsigned, TYPE_VECTOR_SUBPARTS (vectype)); /* First we need to create a vector (ZERO_VEC) of zeros and another vector (MAX_INDEX_VEC) filled with the last matching index, which we can create using a MAX reduction and then expanding. In the case where the loop never made any matches, the max index will be zero. */ /* Vector of {0, 0, 0,...}. */ tree zero_vec = make_ssa_name (vectype); tree zero_vec_rhs = build_zero_cst (vectype); gimple *zero_vec_stmt = gimple_build_assign (zero_vec, zero_vec_rhs); gsi_insert_before (&exit_gsi, zero_vec_stmt, GSI_SAME_STMT); /* Find maximum value from the vector of found indexes. */ tree max_index = make_ssa_name (index_scalar_type); gimple *max_index_stmt = gimple_build_assign (max_index, REDUC_MAX_EXPR, induction_index); gsi_insert_before (&exit_gsi, max_index_stmt, GSI_SAME_STMT); /* Vector of {max_index, max_index, max_index,...}. */ tree max_index_vec = make_ssa_name (index_vec_type); tree max_index_vec_rhs = build_vector_from_val (index_vec_type, max_index); gimple *max_index_vec_stmt = gimple_build_assign (max_index_vec, max_index_vec_rhs); gsi_insert_before (&exit_gsi, max_index_vec_stmt, GSI_SAME_STMT); /* Next we compare the new vector (MAX_INDEX_VEC) full of max indexes with the vector (INDUCTION_INDEX) of found indexes, choosing values from the data vector (NEW_PHI_RESULT) for matches, 0 (ZERO_VEC) otherwise. Only one value should match, resulting in a vector (VEC_COND) with one data value and the rest zeros. In the case where the loop never made any matches, every index will match, resulting in a vector with all data values (which will all be the default value). */ /* Compare the max index vector to the vector of found indexes to find the position of the max value. */ tree vec_compare = make_ssa_name (index_vec_cmp_type); gimple *vec_compare_stmt = gimple_build_assign (vec_compare, EQ_EXPR, induction_index, max_index_vec); gsi_insert_before (&exit_gsi, vec_compare_stmt, GSI_SAME_STMT); /* Use the compare to choose either values from the data vector or zero. */ tree vec_cond = make_ssa_name (vectype); gimple *vec_cond_stmt = gimple_build_assign (vec_cond, VEC_COND_EXPR, vec_compare, new_phi_result, zero_vec); gsi_insert_before (&exit_gsi, vec_cond_stmt, GSI_SAME_STMT); /* Finally we need to extract the data value from the vector (VEC_COND) into a scalar (MATCHED_DATA_REDUC). Logically we want to do a OR reduction, but because this doesn't exist, we can use a MAX reduction instead. The data value might be signed or a float so we need to cast it first. In the case where the loop never made any matches, the data values are all identical, and so will reduce down correctly. */ /* Make the matched data values unsigned. */ tree vec_cond_cast = make_ssa_name (vectype_unsigned); tree vec_cond_cast_rhs = build1 (VIEW_CONVERT_EXPR, vectype_unsigned, vec_cond); gimple *vec_cond_cast_stmt = gimple_build_assign (vec_cond_cast, VIEW_CONVERT_EXPR, vec_cond_cast_rhs); gsi_insert_before (&exit_gsi, vec_cond_cast_stmt, GSI_SAME_STMT); /* Reduce down to a scalar value. */ tree data_reduc = make_ssa_name (scalar_type_unsigned); optab ot = optab_for_tree_code (REDUC_MAX_EXPR, vectype_unsigned, optab_default); gcc_assert (optab_handler (ot, TYPE_MODE (vectype_unsigned)) != CODE_FOR_nothing); gimple *data_reduc_stmt = gimple_build_assign (data_reduc, REDUC_MAX_EXPR, vec_cond_cast); gsi_insert_before (&exit_gsi, data_reduc_stmt, GSI_SAME_STMT); /* Convert the reduced value back to the result type and set as the result. */ tree data_reduc_cast = build1 (VIEW_CONVERT_EXPR, scalar_type, data_reduc); epilog_stmt = gimple_build_assign (new_scalar_dest, data_reduc_cast); new_temp = make_ssa_name (new_scalar_dest, epilog_stmt); gimple_assign_set_lhs (epilog_stmt, new_temp); gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT); scalar_results.safe_push (new_temp); } /* 2.3 Create the reduction code, using one of the three schemes described above. In SLP we simply need to extract all the elements from the vector (without reducing them), so we use scalar shifts. */ else if (reduc_code != ERROR_MARK && !slp_reduc) { tree tmp; tree vec_elem_type; /*** Case 1: Create: v_out2 = reduc_expr <v_out1> */ if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "Reduce using direct vector reduction.\n"); vec_elem_type = TREE_TYPE (TREE_TYPE (new_phi_result)); if (!useless_type_conversion_p (scalar_type, vec_elem_type)) { tree tmp_dest = vect_create_destination_var (scalar_dest, vec_elem_type); tmp = build1 (reduc_code, vec_elem_type, new_phi_result); epilog_stmt = gimple_build_assign (tmp_dest, tmp); new_temp = make_ssa_name (tmp_dest, epilog_stmt); gimple_assign_set_lhs (epilog_stmt, new_temp); gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT); tmp = build1 (NOP_EXPR, scalar_type, new_temp); } else tmp = build1 (reduc_code, scalar_type, new_phi_result); epilog_stmt = gimple_build_assign (new_scalar_dest, tmp); new_temp = make_ssa_name (new_scalar_dest, epilog_stmt); gimple_assign_set_lhs (epilog_stmt, new_temp); gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT); if ((STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) == INTEGER_INDUC_COND_REDUCTION) && !operand_equal_p (initial_def, induc_val, 0)) { /* Earlier we set the initial value to be a vector if induc_val values. Check the result and if it is induc_val then replace with the original initial value, unless induc_val is the same as initial_def already. */ tree zcompare = build2 (EQ_EXPR, boolean_type_node, new_temp, induc_val); tmp = make_ssa_name (new_scalar_dest); epilog_stmt = gimple_build_assign (tmp, COND_EXPR, zcompare, initial_def, new_temp); gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT); new_temp = tmp; } scalar_results.safe_push (new_temp); } else { bool reduce_with_shift = have_whole_vector_shift (mode); int element_bitsize = tree_to_uhwi (bitsize); int vec_size_in_bits = tree_to_uhwi (TYPE_SIZE (vectype)); tree vec_temp; /* Regardless of whether we have a whole vector shift, if we're emulating the operation via tree-vect-generic, we don't want to use it. Only the first round of the reduction is likely to still be profitable via emulation. */ /* ??? It might be better to emit a reduction tree code here, so that tree-vect-generic can expand the first round via bit tricks. */ if (!VECTOR_MODE_P (mode)) reduce_with_shift = false; else { optab optab = optab_for_tree_code (code, vectype, optab_default); if (optab_handler (optab, mode) == CODE_FOR_nothing) reduce_with_shift = false; } if (reduce_with_shift && !slp_reduc) { int nelements = vec_size_in_bits / element_bitsize; unsigned char *sel = XALLOCAVEC (unsigned char, nelements); int elt_offset; tree zero_vec = build_zero_cst (vectype); /*** Case 2: Create: for (offset = nelements/2; offset >= 1; offset/=2) { Create: va' = vec_shift <va, offset> Create: va = vop <va, va'> } */ tree rhs; if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "Reduce using vector shifts\n"); vec_dest = vect_create_destination_var (scalar_dest, vectype); new_temp = new_phi_result; for (elt_offset = nelements / 2; elt_offset >= 1; elt_offset /= 2) { calc_vec_perm_mask_for_shift (mode, elt_offset, sel); tree mask = vect_gen_perm_mask_any (vectype, sel); epilog_stmt = gimple_build_assign (vec_dest, VEC_PERM_EXPR, new_temp, zero_vec, mask); new_name = make_ssa_name (vec_dest, epilog_stmt); gimple_assign_set_lhs (epilog_stmt, new_name); gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT); epilog_stmt = gimple_build_assign (vec_dest, code, new_name, new_temp); new_temp = make_ssa_name (vec_dest, epilog_stmt); gimple_assign_set_lhs (epilog_stmt, new_temp); gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT); } /* 2.4 Extract the final scalar result. Create: s_out3 = extract_field <v_out2, bitpos> */ if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "extract scalar result\n"); rhs = build3 (BIT_FIELD_REF, scalar_type, new_temp, bitsize, bitsize_zero_node); epilog_stmt = gimple_build_assign (new_scalar_dest, rhs); new_temp = make_ssa_name (new_scalar_dest, epilog_stmt); gimple_assign_set_lhs (epilog_stmt, new_temp); gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT); scalar_results.safe_push (new_temp); } else { /*** Case 3: Create: s = extract_field <v_out2, 0> for (offset = element_size; offset < vector_size; offset += element_size;) { Create: s' = extract_field <v_out2, offset> Create: s = op <s, s'> // For non SLP cases } */ if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "Reduce using scalar code.\n"); vec_size_in_bits = tree_to_uhwi (TYPE_SIZE (vectype)); FOR_EACH_VEC_ELT (new_phis, i, new_phi) { int bit_offset; if (gimple_code (new_phi) == GIMPLE_PHI) vec_temp = PHI_RESULT (new_phi); else vec_temp = gimple_assign_lhs (new_phi); tree rhs = build3 (BIT_FIELD_REF, scalar_type, vec_temp, bitsize, bitsize_zero_node); epilog_stmt = gimple_build_assign (new_scalar_dest, rhs); new_temp = make_ssa_name (new_scalar_dest, epilog_stmt); gimple_assign_set_lhs (epilog_stmt, new_temp); gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT); /* In SLP we don't need to apply reduction operation, so we just collect s' values in SCALAR_RESULTS. */ if (slp_reduc) scalar_results.safe_push (new_temp); for (bit_offset = element_bitsize; bit_offset < vec_size_in_bits; bit_offset += element_bitsize) { tree bitpos = bitsize_int (bit_offset); tree rhs = build3 (BIT_FIELD_REF, scalar_type, vec_temp, bitsize, bitpos); epilog_stmt = gimple_build_assign (new_scalar_dest, rhs); new_name = make_ssa_name (new_scalar_dest, epilog_stmt); gimple_assign_set_lhs (epilog_stmt, new_name); gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT); if (slp_reduc) { /* In SLP we don't need to apply reduction operation, so we just collect s' values in SCALAR_RESULTS. */ new_temp = new_name; scalar_results.safe_push (new_name); } else { epilog_stmt = gimple_build_assign (new_scalar_dest, code, new_name, new_temp); new_temp = make_ssa_name (new_scalar_dest, epilog_stmt); gimple_assign_set_lhs (epilog_stmt, new_temp); gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT); } } } /* The only case where we need to reduce scalar results in SLP, is unrolling. If the size of SCALAR_RESULTS is greater than GROUP_SIZE, we reduce them combining elements modulo GROUP_SIZE. */ if (slp_reduc) { tree res, first_res, new_res; gimple *new_stmt; /* Reduce multiple scalar results in case of SLP unrolling. */ for (j = group_size; scalar_results.iterate (j, &res); j++) { first_res = scalar_results[j % group_size]; new_stmt = gimple_build_assign (new_scalar_dest, code, first_res, res); new_res = make_ssa_name (new_scalar_dest, new_stmt); gimple_assign_set_lhs (new_stmt, new_res); gsi_insert_before (&exit_gsi, new_stmt, GSI_SAME_STMT); scalar_results[j % group_size] = new_res; } } else /* Not SLP - we have one scalar to keep in SCALAR_RESULTS. */ scalar_results.safe_push (new_temp); } } vect_finalize_reduction: if (double_reduc) loop = loop->inner; /* 2.5 Adjust the final result by the initial value of the reduction variable. (When such adjustment is not needed, then 'adjustment_def' is zero). For example, if code is PLUS we create: new_temp = loop_exit_def + adjustment_def */ if (adjustment_def) { gcc_assert (!slp_reduc); if (nested_in_vect_loop) { new_phi = new_phis[0]; gcc_assert (TREE_CODE (TREE_TYPE (adjustment_def)) == VECTOR_TYPE); expr = build2 (code, vectype, PHI_RESULT (new_phi), adjustment_def); new_dest = vect_create_destination_var (scalar_dest, vectype); } else { new_temp = scalar_results[0]; gcc_assert (TREE_CODE (TREE_TYPE (adjustment_def)) != VECTOR_TYPE); expr = build2 (code, scalar_type, new_temp, adjustment_def); new_dest = vect_create_destination_var (scalar_dest, scalar_type); } epilog_stmt = gimple_build_assign (new_dest, expr); new_temp = make_ssa_name (new_dest, epilog_stmt); gimple_assign_set_lhs (epilog_stmt, new_temp); gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT); if (nested_in_vect_loop) { set_vinfo_for_stmt (epilog_stmt, new_stmt_vec_info (epilog_stmt, loop_vinfo)); STMT_VINFO_RELATED_STMT (vinfo_for_stmt (epilog_stmt)) = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (new_phi)); if (!double_reduc) scalar_results.quick_push (new_temp); else scalar_results[0] = new_temp; } else scalar_results[0] = new_temp; new_phis[0] = epilog_stmt; } /* 2.6 Handle the loop-exit phis. Replace the uses of scalar loop-exit phis with new adjusted scalar results, i.e., replace use <s_out0> with use <s_out4>. Transform: loop_exit: s_out0 = phi <s_loop> # (scalar) EXIT_PHI v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI v_out2 = reduce <v_out1> s_out3 = extract_field <v_out2, 0> s_out4 = adjust_result <s_out3> use <s_out0> use <s_out0> into: loop_exit: s_out0 = phi <s_loop> # (scalar) EXIT_PHI v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI v_out2 = reduce <v_out1> s_out3 = extract_field <v_out2, 0> s_out4 = adjust_result <s_out3> use <s_out4> use <s_out4> */ /* In SLP reduction chain we reduce vector results into one vector if necessary, hence we set here GROUP_SIZE to 1. SCALAR_DEST is the LHS of the last stmt in the reduction chain, since we are looking for the loop exit phi node. */ if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt))) { gimple *dest_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[group_size - 1]; /* Handle reduction patterns. */ if (STMT_VINFO_RELATED_STMT (vinfo_for_stmt (dest_stmt))) dest_stmt = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (dest_stmt)); scalar_dest = gimple_assign_lhs (dest_stmt); group_size = 1; } /* In SLP we may have several statements in NEW_PHIS and REDUCTION_PHIS (in case that GROUP_SIZE is greater than vectorization factor). Therefore, we need to match SCALAR_RESULTS with corresponding statements. The first (GROUP_SIZE / number of new vector stmts) scalar results correspond to the first vector stmt, etc. (RATIO is equal to (GROUP_SIZE / number of new vector stmts)). */ if (group_size > new_phis.length ()) { ratio = group_size / new_phis.length (); gcc_assert (!(group_size % new_phis.length ())); } else ratio = 1; for (k = 0; k < group_size; k++) { if (k % ratio == 0) { epilog_stmt = new_phis[k / ratio]; reduction_phi = reduction_phis[k / ratio]; if (double_reduc) inner_phi = inner_phis[k / ratio]; } if (slp_reduc) { gimple *current_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[k]; orig_stmt = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (current_stmt)); /* SLP statements can't participate in patterns. */ gcc_assert (!orig_stmt); scalar_dest = gimple_assign_lhs (current_stmt); } phis.create (3); /* Find the loop-closed-use at the loop exit of the original scalar result. (The reduction result is expected to have two immediate uses - one at the latch block, and one at the loop exit). */ FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest) if (!flow_bb_inside_loop_p (loop, gimple_bb (USE_STMT (use_p))) && !is_gimple_debug (USE_STMT (use_p))) phis.safe_push (USE_STMT (use_p)); /* While we expect to have found an exit_phi because of loop-closed-ssa form we can end up without one if the scalar cycle is dead. */ FOR_EACH_VEC_ELT (phis, i, exit_phi) { if (outer_loop) { stmt_vec_info exit_phi_vinfo = vinfo_for_stmt (exit_phi); gphi *vect_phi; /* FORNOW. Currently not supporting the case that an inner-loop reduction is not used in the outer-loop (but only outside the outer-loop), unless it is double reduction. */ gcc_assert ((STMT_VINFO_RELEVANT_P (exit_phi_vinfo) && !STMT_VINFO_LIVE_P (exit_phi_vinfo)) || double_reduc); if (double_reduc) STMT_VINFO_VEC_STMT (exit_phi_vinfo) = inner_phi; else STMT_VINFO_VEC_STMT (exit_phi_vinfo) = epilog_stmt; if (!double_reduc || STMT_VINFO_DEF_TYPE (exit_phi_vinfo) != vect_double_reduction_def) continue; /* Handle double reduction: stmt1: s1 = phi <s0, s2> - double reduction phi (outer loop) stmt2: s3 = phi <s1, s4> - (regular) reduc phi (inner loop) stmt3: s4 = use (s3) - (regular) reduc stmt (inner loop) stmt4: s2 = phi <s4> - double reduction stmt (outer loop) At that point the regular reduction (stmt2 and stmt3) is already vectorized, as well as the exit phi node, stmt4. Here we vectorize the phi node of double reduction, stmt1, and update all relevant statements. */ /* Go through all the uses of s2 to find double reduction phi node, i.e., stmt1 above. */ orig_name = PHI_RESULT (exit_phi); FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, orig_name) { stmt_vec_info use_stmt_vinfo; stmt_vec_info new_phi_vinfo; tree vect_phi_init, preheader_arg, vect_phi_res, init_def; basic_block bb = gimple_bb (use_stmt); gimple *use; /* Check that USE_STMT is really double reduction phi node. */ if (gimple_code (use_stmt) != GIMPLE_PHI || gimple_phi_num_args (use_stmt) != 2 || bb->loop_father != outer_loop) continue; use_stmt_vinfo = vinfo_for_stmt (use_stmt); if (!use_stmt_vinfo || STMT_VINFO_DEF_TYPE (use_stmt_vinfo) != vect_double_reduction_def) continue; /* Create vector phi node for double reduction: vs1 = phi <vs0, vs2> vs1 was created previously in this function by a call to vect_get_vec_def_for_operand and is stored in vec_initial_def; vs2 is defined by INNER_PHI, the vectorized EXIT_PHI; vs0 is created here. */ /* Create vector phi node. */ vect_phi = create_phi_node (vec_initial_def, bb); new_phi_vinfo = new_stmt_vec_info (vect_phi, loop_vec_info_for_loop (outer_loop)); set_vinfo_for_stmt (vect_phi, new_phi_vinfo); /* Create vs0 - initial def of the double reduction phi. */ preheader_arg = PHI_ARG_DEF_FROM_EDGE (use_stmt, loop_preheader_edge (outer_loop)); init_def = get_initial_def_for_reduction (stmt, preheader_arg, NULL); vect_phi_init = vect_init_vector (use_stmt, init_def, vectype, NULL); /* Update phi node arguments with vs0 and vs2. */ add_phi_arg (vect_phi, vect_phi_init, loop_preheader_edge (outer_loop), UNKNOWN_LOCATION); add_phi_arg (vect_phi, PHI_RESULT (inner_phi), loop_latch_edge (outer_loop), UNKNOWN_LOCATION); if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "created double reduction phi node: "); dump_gimple_stmt (MSG_NOTE, TDF_SLIM, vect_phi, 0); } vect_phi_res = PHI_RESULT (vect_phi); /* Replace the use, i.e., set the correct vs1 in the regular reduction phi node. FORNOW, NCOPIES is always 1, so the loop is redundant. */ use = reduction_phi; for (j = 0; j < ncopies; j++) { edge pr_edge = loop_preheader_edge (loop); SET_PHI_ARG_DEF (use, pr_edge->dest_idx, vect_phi_res); use = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (use)); } } } } phis.release (); if (nested_in_vect_loop) { if (double_reduc) loop = outer_loop; else continue; } phis.create (3); /* Find the loop-closed-use at the loop exit of the original scalar result. (The reduction result is expected to have two immediate uses, one at the latch block, and one at the loop exit). For double reductions we are looking for exit phis of the outer loop. */ FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest) { if (!flow_bb_inside_loop_p (loop, gimple_bb (USE_STMT (use_p)))) { if (!is_gimple_debug (USE_STMT (use_p))) phis.safe_push (USE_STMT (use_p)); } else { if (double_reduc && gimple_code (USE_STMT (use_p)) == GIMPLE_PHI) { tree phi_res = PHI_RESULT (USE_STMT (use_p)); FOR_EACH_IMM_USE_FAST (phi_use_p, phi_imm_iter, phi_res) { if (!flow_bb_inside_loop_p (loop, gimple_bb (USE_STMT (phi_use_p))) && !is_gimple_debug (USE_STMT (phi_use_p))) phis.safe_push (USE_STMT (phi_use_p)); } } } } FOR_EACH_VEC_ELT (phis, i, exit_phi) { /* Replace the uses: */ orig_name = PHI_RESULT (exit_phi); scalar_result = scalar_results[k]; FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, orig_name) FOR_EACH_IMM_USE_ON_STMT (use_p, imm_iter) SET_USE (use_p, scalar_result); } phis.release (); } } /* Function is_nonwrapping_integer_induction. Check if STMT (which is part of loop LOOP) both increments and does not cause overflow. */ static bool is_nonwrapping_integer_induction (gimple *stmt, struct loop *loop) { stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt); tree base = STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED (stmt_vinfo); tree step = STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_vinfo); tree lhs_type = TREE_TYPE (gimple_phi_result (stmt)); widest_int ni, max_loop_value, lhs_max; bool overflow = false; /* Make sure the loop is integer based. */ if (TREE_CODE (base) != INTEGER_CST || TREE_CODE (step) != INTEGER_CST) return false; /* Check that the induction increments. */ if (tree_int_cst_sgn (step) == -1) return false; /* Check that the max size of the loop will not wrap. */ if (TYPE_OVERFLOW_UNDEFINED (lhs_type)) return true; if (! max_stmt_executions (loop, &ni)) return false; max_loop_value = wi::mul (wi::to_widest (step), ni, TYPE_SIGN (lhs_type), &overflow); if (overflow) return false; max_loop_value = wi::add (wi::to_widest (base), max_loop_value, TYPE_SIGN (lhs_type), &overflow); if (overflow) return false; return (wi::min_precision (max_loop_value, TYPE_SIGN (lhs_type)) <= TYPE_PRECISION (lhs_type)); } /* Function vectorizable_reduction. Check if STMT performs a reduction operation that can be vectorized. If VEC_STMT is also passed, vectorize the STMT: create a vectorized stmt to replace it, put it in VEC_STMT, and insert it at GSI. Return FALSE if not a vectorizable STMT, TRUE otherwise. This function also handles reduction idioms (patterns) that have been recognized in advance during vect_pattern_recog. In this case, STMT may be of this form: X = pattern_expr (arg0, arg1, ..., X) and it's STMT_VINFO_RELATED_STMT points to the last stmt in the original sequence that had been detected and replaced by the pattern-stmt (STMT). This function also handles reduction of condition expressions, for example: for (int i = 0; i < N; i++) if (a[i] < value) last = a[i]; This is handled by vectorising the loop and creating an additional vector containing the loop indexes for which "a[i] < value" was true. In the function epilogue this is reduced to a single max value and then used to index into the vector of results. In some cases of reduction patterns, the type of the reduction variable X is different than the type of the other arguments of STMT. In such cases, the vectype that is used when transforming STMT into a vector stmt is different than the vectype that is used to determine the vectorization factor, because it consists of a different number of elements than the actual number of elements that are being operated upon in parallel. For example, consider an accumulation of shorts into an int accumulator. On some targets it's possible to vectorize this pattern operating on 8 shorts at a time (hence, the vectype for purposes of determining the vectorization factor should be V8HI); on the other hand, the vectype that is used to create the vector form is actually V4SI (the type of the result). Upon entry to this function, STMT_VINFO_VECTYPE records the vectype that indicates what is the actual level of parallelism (V8HI in the example), so that the right vectorization factor would be derived. This vectype corresponds to the type of arguments to the reduction stmt, and should *NOT* be used to create the vectorized stmt. The right vectype for the vectorized stmt is obtained from the type of the result X: get_vectype_for_scalar_type (TREE_TYPE (X)) This means that, contrary to "regular" reductions (or "regular" stmts in general), the following equation: STMT_VINFO_VECTYPE == get_vectype_for_scalar_type (TREE_TYPE (X)) does *NOT* necessarily hold for reduction patterns. */ bool vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt, slp_tree slp_node) { tree vec_dest; tree scalar_dest; tree loop_vec_def0 = NULL_TREE, loop_vec_def1 = NULL_TREE; stmt_vec_info stmt_info = vinfo_for_stmt (stmt); tree vectype_out = STMT_VINFO_VECTYPE (stmt_info); tree vectype_in = NULL_TREE; loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); enum tree_code code, orig_code, epilog_reduc_code; machine_mode vec_mode; int op_type; optab optab, reduc_optab; tree new_temp = NULL_TREE; gimple *def_stmt; enum vect_def_type dt, cond_reduc_dt = vect_unknown_def_type; gphi *new_phi = NULL; gimple *cond_reduc_def_stmt = NULL; tree scalar_type; bool is_simple_use; gimple *orig_stmt; stmt_vec_info orig_stmt_info; tree expr = NULL_TREE; int i; int ncopies; int epilog_copies; stmt_vec_info prev_stmt_info, prev_phi_info; bool single_defuse_cycle = false; tree reduc_def = NULL_TREE; gimple *new_stmt = NULL; int j; tree ops[3]; bool nested_cycle = false, found_nested_cycle_def = false; gimple *reduc_def_stmt = NULL; bool double_reduc = false, dummy; basic_block def_bb; struct loop * def_stmt_loop, *outer_loop = NULL; tree def_arg; gimple *def_arg_stmt; auto_vec<tree> vec_oprnds0; auto_vec<tree> vec_oprnds1; auto_vec<tree> vect_defs; auto_vec<gimple *> phis; int vec_num; tree def0, def1, tem, op1 = NULL_TREE; bool first_p = true; tree cr_index_scalar_type = NULL_TREE, cr_index_vector_type = NULL_TREE; tree cond_reduc_val = NULL_TREE; /* In case of reduction chain we switch to the first stmt in the chain, but we don't update STMT_INFO, since only the last stmt is marked as reduction and has reduction properties. */ if (GROUP_FIRST_ELEMENT (stmt_info) && GROUP_FIRST_ELEMENT (stmt_info) != stmt) { stmt = GROUP_FIRST_ELEMENT (stmt_info); first_p = false; } if (nested_in_vect_loop_p (loop, stmt)) { outer_loop = loop; loop = loop->inner; nested_cycle = true; } /* 1. Is vectorizable reduction? */ /* Not supportable if the reduction variable is used in the loop, unless it's a reduction chain. */ if (STMT_VINFO_RELEVANT (stmt_info) > vect_used_in_outer && !GROUP_FIRST_ELEMENT (stmt_info)) return false; /* Reductions that are not used even in an enclosing outer-loop, are expected to be "live" (used out of the loop). */ if (STMT_VINFO_RELEVANT (stmt_info) == vect_unused_in_scope && !STMT_VINFO_LIVE_P (stmt_info)) return false; /* Make sure it was already recognized as a reduction computation. */ if (STMT_VINFO_DEF_TYPE (vinfo_for_stmt (stmt)) != vect_reduction_def && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (stmt)) != vect_nested_cycle) return false; /* 2. Has this been recognized as a reduction pattern? Check if STMT represents a pattern that has been recognized in earlier analysis stages. For stmts that represent a pattern, the STMT_VINFO_RELATED_STMT field records the last stmt in the original sequence that constitutes the pattern. */ orig_stmt = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (stmt)); if (orig_stmt) { orig_stmt_info = vinfo_for_stmt (orig_stmt); gcc_assert (STMT_VINFO_IN_PATTERN_P (orig_stmt_info)); gcc_assert (!STMT_VINFO_IN_PATTERN_P (stmt_info)); } /* 3. Check the operands of the operation. The first operands are defined inside the loop body. The last operand is the reduction variable, which is defined by the loop-header-phi. */ gcc_assert (is_gimple_assign (stmt)); /* Flatten RHS. */ switch (get_gimple_rhs_class (gimple_assign_rhs_code (stmt))) { case GIMPLE_SINGLE_RHS: op_type = TREE_OPERAND_LENGTH (gimple_assign_rhs1 (stmt)); if (op_type == ternary_op) { tree rhs = gimple_assign_rhs1 (stmt); ops[0] = TREE_OPERAND (rhs, 0); ops[1] = TREE_OPERAND (rhs, 1); ops[2] = TREE_OPERAND (rhs, 2); code = TREE_CODE (rhs); } else return false; break; case GIMPLE_BINARY_RHS: code = gimple_assign_rhs_code (stmt); op_type = TREE_CODE_LENGTH (code); gcc_assert (op_type == binary_op); ops[0] = gimple_assign_rhs1 (stmt); ops[1] = gimple_assign_rhs2 (stmt); break; case GIMPLE_TERNARY_RHS: code = gimple_assign_rhs_code (stmt); op_type = TREE_CODE_LENGTH (code); gcc_assert (op_type == ternary_op); ops[0] = gimple_assign_rhs1 (stmt); ops[1] = gimple_assign_rhs2 (stmt); ops[2] = gimple_assign_rhs3 (stmt); break; case GIMPLE_UNARY_RHS: return false; default: gcc_unreachable (); } /* The default is that the reduction variable is the last in statement. */ int reduc_index = op_type - 1; if (code == MINUS_EXPR) reduc_index = 0; if (code == COND_EXPR && slp_node) return false; scalar_dest = gimple_assign_lhs (stmt); scalar_type = TREE_TYPE (scalar_dest); if (!POINTER_TYPE_P (scalar_type) && !INTEGRAL_TYPE_P (scalar_type) && !SCALAR_FLOAT_TYPE_P (scalar_type)) return false; /* Do not try to vectorize bit-precision reductions. */ if ((TYPE_PRECISION (scalar_type) != GET_MODE_PRECISION (TYPE_MODE (scalar_type)))) return false; /* All uses but the last are expected to be defined in the loop. The last use is the reduction variable. In case of nested cycle this assumption is not true: we use reduc_index to record the index of the reduction variable. */ for (i = 0; i < op_type; i++) { if (i == reduc_index) continue; /* The condition of COND_EXPR is checked in vectorizable_condition(). */ if (i == 0 && code == COND_EXPR) continue; is_simple_use = vect_is_simple_use (ops[i], loop_vinfo, &def_stmt, &dt, &tem); if (!vectype_in) vectype_in = tem; gcc_assert (is_simple_use); if (dt != vect_internal_def && dt != vect_external_def && dt != vect_constant_def && dt != vect_induction_def && !(dt == vect_nested_cycle && nested_cycle)) return false; if (dt == vect_nested_cycle) { found_nested_cycle_def = true; reduc_def_stmt = def_stmt; reduc_index = i; } if (i == 1 && code == COND_EXPR) { /* Record how value of COND_EXPR is defined. */ if (dt == vect_constant_def) { cond_reduc_dt = dt; cond_reduc_val = ops[i]; } if (dt == vect_induction_def && def_stmt != NULL && is_nonwrapping_integer_induction (def_stmt, loop)) { cond_reduc_dt = dt; cond_reduc_def_stmt = def_stmt; } } } is_simple_use = vect_is_simple_use (ops[reduc_index], loop_vinfo, &def_stmt, &dt, &tem); if (!vectype_in) vectype_in = tem; gcc_assert (is_simple_use); if (!found_nested_cycle_def) reduc_def_stmt = def_stmt; if (reduc_def_stmt && gimple_code (reduc_def_stmt) != GIMPLE_PHI) return false; if (!(dt == vect_reduction_def || dt == vect_nested_cycle || ((dt == vect_internal_def || dt == vect_external_def || dt == vect_constant_def || dt == vect_induction_def) && nested_cycle && found_nested_cycle_def))) { /* For pattern recognized stmts, orig_stmt might be a reduction, but some helper statements for the pattern might not, or might be COND_EXPRs with reduction uses in the condition. */ gcc_assert (orig_stmt); return false; } enum vect_reduction_type v_reduc_type; gimple *tmp = vect_is_simple_reduction (loop_vinfo, reduc_def_stmt, !nested_cycle, &dummy, false, &v_reduc_type); STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) = v_reduc_type; /* If we have a condition reduction, see if we can simplify it further. */ if (v_reduc_type == COND_REDUCTION) { if (cond_reduc_dt == vect_induction_def) { stmt_vec_info cond_stmt_vinfo = vinfo_for_stmt (cond_reduc_def_stmt); tree base = STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED (cond_stmt_vinfo); gcc_assert (TREE_CODE (base) == INTEGER_CST); cond_reduc_val = NULL_TREE; /* Find a suitable value below base; punt if base is the minimum value of the type for now. */ if (tree_int_cst_sgn (base) == 1) cond_reduc_val = build_int_cst (TREE_TYPE (base), 0); else if (tree_int_cst_lt (TYPE_MIN_VALUE (TREE_TYPE (base)), base)) cond_reduc_val = int_const_binop (MINUS_EXPR, base, integer_one_node); if (cond_reduc_val) { if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "condition expression based on " "integer induction.\n"); STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) = INTEGER_INDUC_COND_REDUCTION; } } /* Loop peeling modifies initial value of reduction PHI, which makes the reduction stmt to be transformed different to the original stmt analyzed. We need to record reduction code for CONST_COND_REDUCTION type reduction at analyzing stage, thus it can be used directly at transform stage. */ if (STMT_VINFO_VEC_CONST_COND_REDUC_CODE (stmt_info) == MAX_EXPR || STMT_VINFO_VEC_CONST_COND_REDUC_CODE (stmt_info) == MIN_EXPR) { /* Also set the reduction type to CONST_COND_REDUCTION. */ gcc_assert (cond_reduc_dt == vect_constant_def); STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) = CONST_COND_REDUCTION; } else if (cond_reduc_dt == vect_constant_def) { enum vect_def_type cond_initial_dt; gimple *def_stmt = SSA_NAME_DEF_STMT (ops[reduc_index]); tree cond_initial_val = PHI_ARG_DEF_FROM_EDGE (def_stmt, loop_preheader_edge (loop)); gcc_assert (cond_reduc_val != NULL_TREE); vect_is_simple_use (cond_initial_val, loop_vinfo, &def_stmt, &cond_initial_dt); if (cond_initial_dt == vect_constant_def && types_compatible_p (TREE_TYPE (cond_initial_val), TREE_TYPE (cond_reduc_val))) { tree e = fold_build2 (LE_EXPR, boolean_type_node, cond_initial_val, cond_reduc_val); if (e && (integer_onep (e) || integer_zerop (e))) { if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "condition expression based on " "compile time constant.\n"); /* Record reduction code at analysis stage. */ STMT_VINFO_VEC_CONST_COND_REDUC_CODE (stmt_info) = integer_onep (e) ? MAX_EXPR : MIN_EXPR; STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) = CONST_COND_REDUCTION; } } } } if (orig_stmt) gcc_assert (tmp == orig_stmt || GROUP_FIRST_ELEMENT (vinfo_for_stmt (tmp)) == orig_stmt); else /* We changed STMT to be the first stmt in reduction chain, hence we check that in this case the first element in the chain is STMT. */ gcc_assert (stmt == tmp || GROUP_FIRST_ELEMENT (vinfo_for_stmt (tmp)) == stmt); if (STMT_VINFO_LIVE_P (vinfo_for_stmt (reduc_def_stmt))) return false; if (slp_node) ncopies = 1; else ncopies = (LOOP_VINFO_VECT_FACTOR (loop_vinfo) / TYPE_VECTOR_SUBPARTS (vectype_in)); gcc_assert (ncopies >= 1); vec_mode = TYPE_MODE (vectype_in); if (code == COND_EXPR) { /* Only call during the analysis stage, otherwise we'll lose STMT_VINFO_TYPE. */ if (!vec_stmt && !vectorizable_condition (stmt, gsi, NULL, ops[reduc_index], 0, NULL)) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "unsupported condition in reduction\n"); return false; } } else { /* 4. Supportable by target? */ if (code == LSHIFT_EXPR || code == RSHIFT_EXPR || code == LROTATE_EXPR || code == RROTATE_EXPR) { /* Shifts and rotates are only supported by vectorizable_shifts, not vectorizable_reduction. */ if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "unsupported shift or rotation.\n"); return false; } /* 4.1. check support for the operation in the loop */ optab = optab_for_tree_code (code, vectype_in, optab_default); if (!optab) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "no optab.\n"); return false; } if (optab_handler (optab, vec_mode) == CODE_FOR_nothing) { if (dump_enabled_p ()) dump_printf (MSG_NOTE, "op not supported by target.\n"); if (GET_MODE_SIZE (vec_mode) != UNITS_PER_WORD || LOOP_VINFO_VECT_FACTOR (loop_vinfo) < vect_min_worthwhile_factor (code)) return false; if (dump_enabled_p ()) dump_printf (MSG_NOTE, "proceeding using word mode.\n"); } /* Worthwhile without SIMD support? */ if (!VECTOR_MODE_P (TYPE_MODE (vectype_in)) && LOOP_VINFO_VECT_FACTOR (loop_vinfo) < vect_min_worthwhile_factor (code)) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not worthwhile without SIMD support.\n"); return false; } } /* 4.2. Check support for the epilog operation. If STMT represents a reduction pattern, then the type of the reduction variable may be different than the type of the rest of the arguments. For example, consider the case of accumulation of shorts into an int accumulator; The original code: S1: int_a = (int) short_a; orig_stmt-> S2: int_acc = plus <int_a ,int_acc>; was replaced with: STMT: int_acc = widen_sum <short_a, int_acc> This means that: 1. The tree-code that is used to create the vector operation in the epilog code (that reduces the partial results) is not the tree-code of STMT, but is rather the tree-code of the original stmt from the pattern that STMT is replacing. I.e, in the example above we want to use 'widen_sum' in the loop, but 'plus' in the epilog. 2. The type (mode) we use to check available target support for the vector operation to be created in the *epilog*, is determined by the type of the reduction variable (in the example above we'd check this: optab_handler (plus_optab, vect_int_mode])). However the type (mode) we use to check available target support for the vector operation to be created *inside the loop*, is determined by the type of the other arguments to STMT (in the example we'd check this: optab_handler (widen_sum_optab, vect_short_mode)). This is contrary to "regular" reductions, in which the types of all the arguments are the same as the type of the reduction variable. For "regular" reductions we can therefore use the same vector type (and also the same tree-code) when generating the epilog code and when generating the code inside the loop. */ if (orig_stmt) { /* This is a reduction pattern: get the vectype from the type of the reduction variable, and get the tree-code from orig_stmt. */ gcc_assert (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) == TREE_CODE_REDUCTION); orig_code = gimple_assign_rhs_code (orig_stmt); gcc_assert (vectype_out); vec_mode = TYPE_MODE (vectype_out); } else { /* Regular reduction: use the same vectype and tree-code as used for the vector code inside the loop can be used for the epilog code. */ orig_code = code; if (code == MINUS_EXPR) orig_code = PLUS_EXPR; /* For simple condition reductions, replace with the actual expression we want to base our reduction around. */ if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) == CONST_COND_REDUCTION) { orig_code = STMT_VINFO_VEC_CONST_COND_REDUC_CODE (stmt_info); gcc_assert (orig_code == MAX_EXPR || orig_code == MIN_EXPR); } else if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) == INTEGER_INDUC_COND_REDUCTION) orig_code = MAX_EXPR; } if (nested_cycle) { def_bb = gimple_bb (reduc_def_stmt); def_stmt_loop = def_bb->loop_father; def_arg = PHI_ARG_DEF_FROM_EDGE (reduc_def_stmt, loop_preheader_edge (def_stmt_loop)); if (TREE_CODE (def_arg) == SSA_NAME && (def_arg_stmt = SSA_NAME_DEF_STMT (def_arg)) && gimple_code (def_arg_stmt) == GIMPLE_PHI && flow_bb_inside_loop_p (outer_loop, gimple_bb (def_arg_stmt)) && vinfo_for_stmt (def_arg_stmt) && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_arg_stmt)) == vect_double_reduction_def) double_reduc = true; } epilog_reduc_code = ERROR_MARK; if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) != COND_REDUCTION) { if (reduction_code_for_scalar_code (orig_code, &epilog_reduc_code)) { reduc_optab = optab_for_tree_code (epilog_reduc_code, vectype_out, optab_default); if (!reduc_optab) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "no optab for reduction.\n"); epilog_reduc_code = ERROR_MARK; } else if (optab_handler (reduc_optab, vec_mode) == CODE_FOR_nothing) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "reduc op not supported by target.\n"); epilog_reduc_code = ERROR_MARK; } /* When epilog_reduc_code is ERROR_MARK then a reduction will be generated in the epilog using multiple expressions. This does not work for condition reductions. */ if (epilog_reduc_code == ERROR_MARK && (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) == INTEGER_INDUC_COND_REDUCTION || STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) == CONST_COND_REDUCTION)) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "no reduc code for scalar code.\n"); return false; } } else { if (!nested_cycle || double_reduc) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "no reduc code for scalar code.\n"); return false; } } } else { int scalar_precision = GET_MODE_PRECISION (TYPE_MODE (scalar_type)); cr_index_scalar_type = make_unsigned_type (scalar_precision); cr_index_vector_type = build_vector_type (cr_index_scalar_type, TYPE_VECTOR_SUBPARTS (vectype_out)); epilog_reduc_code = REDUC_MAX_EXPR; optab = optab_for_tree_code (REDUC_MAX_EXPR, cr_index_vector_type, optab_default); if (optab_handler (optab, TYPE_MODE (cr_index_vector_type)) == CODE_FOR_nothing) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "reduc max op not supported by target.\n"); return false; } } if ((double_reduc || STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) != TREE_CODE_REDUCTION) && ncopies > 1) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "multiple types in double reduction or condition " "reduction.\n"); return false; } /* In case of widenning multiplication by a constant, we update the type of the constant to be the type of the other operand. We check that the constant fits the type in the pattern recognition pass. */ if (code == DOT_PROD_EXPR && !types_compatible_p (TREE_TYPE (ops[0]), TREE_TYPE (ops[1]))) { if (TREE_CODE (ops[0]) == INTEGER_CST) ops[0] = fold_convert (TREE_TYPE (ops[1]), ops[0]); else if (TREE_CODE (ops[1]) == INTEGER_CST) ops[1] = fold_convert (TREE_TYPE (ops[0]), ops[1]); else { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "invalid types in dot-prod\n"); return false; } } if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) == COND_REDUCTION) { widest_int ni; if (! max_loop_iterations (loop, &ni)) { if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "loop count not known, cannot create cond " "reduction.\n"); return false; } /* Convert backedges to iterations. */ ni += 1; /* The additional index will be the same type as the condition. Check that the loop can fit into this less one (because we'll use up the zero slot for when there are no matches). */ tree max_index = TYPE_MAX_VALUE (cr_index_scalar_type); if (wi::geu_p (ni, wi::to_widest (max_index))) { if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "loop size is greater than data size.\n"); return false; } } if (!vec_stmt) /* transformation not required. */ { if (first_p && !vect_model_reduction_cost (stmt_info, epilog_reduc_code, ncopies, reduc_index)) return false; STMT_VINFO_TYPE (stmt_info) = reduc_vec_info_type; return true; } /** Transform. **/ if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "transform reduction.\n"); /* FORNOW: Multiple types are not supported for condition. */ if (code == COND_EXPR) gcc_assert (ncopies == 1); /* Create the destination vector */ vec_dest = vect_create_destination_var (scalar_dest, vectype_out); /* In case the vectorization factor (VF) is bigger than the number of elements that we can fit in a vectype (nunits), we have to generate more than one vector stmt - i.e - we need to "unroll" the vector stmt by a factor VF/nunits. For more details see documentation in vectorizable_operation. */ /* If the reduction is used in an outer loop we need to generate VF intermediate results, like so (e.g. for ncopies=2): r0 = phi (init, r0) r1 = phi (init, r1) r0 = x0 + r0; r1 = x1 + r1; (i.e. we generate VF results in 2 registers). In this case we have a separate def-use cycle for each copy, and therefore for each copy we get the vector def for the reduction variable from the respective phi node created for this copy. Otherwise (the reduction is unused in the loop nest), we can combine together intermediate results, like so (e.g. for ncopies=2): r = phi (init, r) r = x0 + r; r = x1 + r; (i.e. we generate VF/2 results in a single register). In this case for each copy we get the vector def for the reduction variable from the vectorized reduction operation generated in the previous iteration. */ if (STMT_VINFO_RELEVANT (stmt_info) <= vect_used_only_live) { single_defuse_cycle = true; epilog_copies = 1; } else epilog_copies = ncopies; prev_stmt_info = NULL; prev_phi_info = NULL; if (slp_node) vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node); else { vec_num = 1; vec_oprnds0.create (1); if (op_type == ternary_op) vec_oprnds1.create (1); } phis.create (vec_num); vect_defs.create (vec_num); if (!slp_node) vect_defs.quick_push (NULL_TREE); for (j = 0; j < ncopies; j++) { if (j == 0 || !single_defuse_cycle) { for (i = 0; i < vec_num; i++) { /* Create the reduction-phi that defines the reduction operand. */ new_phi = create_phi_node (vec_dest, loop->header); set_vinfo_for_stmt (new_phi, new_stmt_vec_info (new_phi, loop_vinfo)); if (j == 0 || slp_node) phis.quick_push (new_phi); } } if (code == COND_EXPR) { gcc_assert (!slp_node); vectorizable_condition (stmt, gsi, vec_stmt, PHI_RESULT (phis[0]), reduc_index, NULL); /* Multiple types are not supported for condition. */ break; } /* Handle uses. */ if (j == 0) { if (slp_node) { /* Get vec defs for all the operands except the reduction index, ensuring the ordering of the ops in the vector is kept. */ auto_vec<tree, 3> slp_ops; auto_vec<vec<tree>, 3> vec_defs; slp_ops.quick_push (reduc_index == 0 ? NULL : ops[0]); slp_ops.quick_push (reduc_index == 1 ? NULL : ops[1]); if (op_type == ternary_op) slp_ops.quick_push (reduc_index == 2 ? NULL : ops[2]); vect_get_slp_defs (slp_ops, slp_node, &vec_defs, -1); vec_oprnds0.safe_splice (vec_defs[reduc_index == 0 ? 1 : 0]); vec_defs[reduc_index == 0 ? 1 : 0].release (); if (op_type == ternary_op) { vec_oprnds1.safe_splice (vec_defs[reduc_index == 2 ? 1 : 2]); vec_defs[reduc_index == 2 ? 1 : 2].release (); } } else { loop_vec_def0 = vect_get_vec_def_for_operand (ops[!reduc_index], stmt); vec_oprnds0.quick_push (loop_vec_def0); if (op_type == ternary_op) { op1 = reduc_index == 0 ? ops[2] : ops[1]; loop_vec_def1 = vect_get_vec_def_for_operand (op1, stmt); vec_oprnds1.quick_push (loop_vec_def1); } } } else { if (!slp_node) { enum vect_def_type dt; gimple *dummy_stmt; vect_is_simple_use (ops[!reduc_index], loop_vinfo, &dummy_stmt, &dt); loop_vec_def0 = vect_get_vec_def_for_stmt_copy (dt, loop_vec_def0); vec_oprnds0[0] = loop_vec_def0; if (op_type == ternary_op) { vect_is_simple_use (op1, loop_vinfo, &dummy_stmt, &dt); loop_vec_def1 = vect_get_vec_def_for_stmt_copy (dt, loop_vec_def1); vec_oprnds1[0] = loop_vec_def1; } } if (single_defuse_cycle) reduc_def = gimple_assign_lhs (new_stmt); STMT_VINFO_RELATED_STMT (prev_phi_info) = new_phi; } FOR_EACH_VEC_ELT (vec_oprnds0, i, def0) { if (slp_node) reduc_def = PHI_RESULT (phis[i]); else { if (!single_defuse_cycle || j == 0) reduc_def = PHI_RESULT (new_phi); } def1 = ((op_type == ternary_op) ? vec_oprnds1[i] : NULL); if (op_type == binary_op) { if (reduc_index == 0) expr = build2 (code, vectype_out, reduc_def, def0); else expr = build2 (code, vectype_out, def0, reduc_def); } else { if (reduc_index == 0) expr = build3 (code, vectype_out, reduc_def, def0, def1); else { if (reduc_index == 1) expr = build3 (code, vectype_out, def0, reduc_def, def1); else expr = build3 (code, vectype_out, def0, def1, reduc_def); } } new_stmt = gimple_build_assign (vec_dest, expr); new_temp = make_ssa_name (vec_dest, new_stmt); gimple_assign_set_lhs (new_stmt, new_temp); vect_finish_stmt_generation (stmt, new_stmt, gsi); if (slp_node) { SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt); vect_defs.quick_push (new_temp); } else vect_defs[0] = new_temp; } if (slp_node) continue; if (j == 0) STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt; else STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt; prev_stmt_info = vinfo_for_stmt (new_stmt); prev_phi_info = vinfo_for_stmt (new_phi); } tree indx_before_incr, indx_after_incr, cond_name = NULL; /* Finalize the reduction-phi (set its arguments) and create the epilog reduction code. */ if ((!single_defuse_cycle || code == COND_EXPR) && !slp_node) { new_temp = gimple_assign_lhs (*vec_stmt); vect_defs[0] = new_temp; /* For cond reductions we want to create a new vector (INDEX_COND_EXPR) which is updated with the current index of the loop for every match of the original loop's cond_expr (VEC_STMT). This results in a vector containing the last time the condition passed for that vector lane. The first match will be a 1 to allow 0 to be used for non-matching indexes. If there are no matches at all then the vector will be all zeroes. */ if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) == COND_REDUCTION) { int nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out); int k; gcc_assert (gimple_assign_rhs_code (*vec_stmt) == VEC_COND_EXPR); /* First we create a simple vector induction variable which starts with the values {1,2,3,...} (SERIES_VECT) and increments by the vector size (STEP). */ /* Create a {1,2,3,...} vector. */ tree *vtemp = XALLOCAVEC (tree, nunits_out); for (k = 0; k < nunits_out; ++k) vtemp[k] = build_int_cst (cr_index_scalar_type, k + 1); tree series_vect = build_vector (cr_index_vector_type, vtemp); /* Create a vector of the step value. */ tree step = build_int_cst (cr_index_scalar_type, nunits_out); tree vec_step = build_vector_from_val (cr_index_vector_type, step); /* Create an induction variable. */ gimple_stmt_iterator incr_gsi; bool insert_after; standard_iv_increment_position (loop, &incr_gsi, &insert_after); create_iv (series_vect, vec_step, NULL_TREE, loop, &incr_gsi, insert_after, &indx_before_incr, &indx_after_incr); /* Next create a new phi node vector (NEW_PHI_TREE) which starts filled with zeros (VEC_ZERO). */ /* Create a vector of 0s. */ tree zero = build_zero_cst (cr_index_scalar_type); tree vec_zero = build_vector_from_val (cr_index_vector_type, zero); /* Create a vector phi node. */ tree new_phi_tree = make_ssa_name (cr_index_vector_type); new_phi = create_phi_node (new_phi_tree, loop->header); set_vinfo_for_stmt (new_phi, new_stmt_vec_info (new_phi, loop_vinfo)); add_phi_arg (new_phi, vec_zero, loop_preheader_edge (loop), UNKNOWN_LOCATION); /* Now take the condition from the loops original cond_expr (VEC_STMT) and produce a new cond_expr (INDEX_COND_EXPR) which for every match uses values from the induction variable (INDEX_BEFORE_INCR) otherwise uses values from the phi node (NEW_PHI_TREE). Finally, we update the phi (NEW_PHI_TREE) to take the value of the new cond_expr (INDEX_COND_EXPR). */ /* Duplicate the condition from vec_stmt. */ tree ccompare = unshare_expr (gimple_assign_rhs1 (*vec_stmt)); /* Create a conditional, where the condition is taken from vec_stmt (CCOMPARE), then is the induction index (INDEX_BEFORE_INCR) and else is the phi (NEW_PHI_TREE). */ tree index_cond_expr = build3 (VEC_COND_EXPR, cr_index_vector_type, ccompare, indx_before_incr, new_phi_tree); cond_name = make_ssa_name (cr_index_vector_type); gimple *index_condition = gimple_build_assign (cond_name, index_cond_expr); gsi_insert_before (&incr_gsi, index_condition, GSI_SAME_STMT); stmt_vec_info index_vec_info = new_stmt_vec_info (index_condition, loop_vinfo); STMT_VINFO_VECTYPE (index_vec_info) = cr_index_vector_type; set_vinfo_for_stmt (index_condition, index_vec_info); /* Update the phi with the vec cond. */ add_phi_arg (new_phi, cond_name, loop_latch_edge (loop), UNKNOWN_LOCATION); } } vect_create_epilog_for_reduction (vect_defs, stmt, epilog_copies, epilog_reduc_code, phis, reduc_index, double_reduc, slp_node, cond_name, cond_reduc_val); return true; } /* Function vect_min_worthwhile_factor. For a loop where we could vectorize the operation indicated by CODE, return the minimum vectorization factor that makes it worthwhile to use generic vectors. */ int vect_min_worthwhile_factor (enum tree_code code) { switch (code) { case PLUS_EXPR: case MINUS_EXPR: case NEGATE_EXPR: return 4; case BIT_AND_EXPR: case BIT_IOR_EXPR: case BIT_XOR_EXPR: case BIT_NOT_EXPR: return 2; default: return INT_MAX; } } /* Function vectorizable_induction Check if PHI performs an induction computation that can be vectorized. If VEC_STMT is also passed, vectorize the induction PHI: create a vectorized phi to replace it, put it in VEC_STMT, and add it to the same basic block. Return FALSE if not a vectorizable STMT, TRUE otherwise. */ bool vectorizable_induction (gimple *phi, gimple_stmt_iterator *gsi ATTRIBUTE_UNUSED, gimple **vec_stmt) { stmt_vec_info stmt_info = vinfo_for_stmt (phi); tree vectype = STMT_VINFO_VECTYPE (stmt_info); loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); int nunits = TYPE_VECTOR_SUBPARTS (vectype); int ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits; tree vec_def; gcc_assert (ncopies >= 1); /* FORNOW. These restrictions should be relaxed. */ if (nested_in_vect_loop_p (loop, phi)) { imm_use_iterator imm_iter; use_operand_p use_p; gimple *exit_phi; edge latch_e; tree loop_arg; if (ncopies > 1) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "multiple types in nested loop.\n"); return false; } exit_phi = NULL; latch_e = loop_latch_edge (loop->inner); loop_arg = PHI_ARG_DEF_FROM_EDGE (phi, latch_e); FOR_EACH_IMM_USE_FAST (use_p, imm_iter, loop_arg) { gimple *use_stmt = USE_STMT (use_p); if (is_gimple_debug (use_stmt)) continue; if (!flow_bb_inside_loop_p (loop->inner, gimple_bb (use_stmt))) { exit_phi = use_stmt; break; } } if (exit_phi) { stmt_vec_info exit_phi_vinfo = vinfo_for_stmt (exit_phi); if (!(STMT_VINFO_RELEVANT_P (exit_phi_vinfo) && !STMT_VINFO_LIVE_P (exit_phi_vinfo))) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "inner-loop induction only used outside " "of the outer vectorized loop.\n"); return false; } } } if (!STMT_VINFO_RELEVANT_P (stmt_info)) return false; /* FORNOW: SLP not supported. */ if (STMT_SLP_TYPE (stmt_info)) return false; gcc_assert (STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def); if (gimple_code (phi) != GIMPLE_PHI) return false; if (!vec_stmt) /* transformation not required. */ { STMT_VINFO_TYPE (stmt_info) = induc_vec_info_type; if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "=== vectorizable_induction ===\n"); vect_model_induction_cost (stmt_info, ncopies); return true; } /** Transform. **/ if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "transform induction phi.\n"); vec_def = get_initial_def_for_induction (phi); *vec_stmt = SSA_NAME_DEF_STMT (vec_def); return true; } /* Function vectorizable_live_operation. STMT computes a value that is used outside the loop. Check if it can be supported. */ bool vectorizable_live_operation (gimple *stmt, gimple_stmt_iterator *gsi ATTRIBUTE_UNUSED, slp_tree slp_node, int slp_index, gimple **vec_stmt) { stmt_vec_info stmt_info = vinfo_for_stmt (stmt); loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); imm_use_iterator imm_iter; tree lhs, lhs_type, bitsize, vec_bitsize; tree vectype = STMT_VINFO_VECTYPE (stmt_info); int nunits = TYPE_VECTOR_SUBPARTS (vectype); int ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits; gimple *use_stmt; auto_vec<tree> vec_oprnds; gcc_assert (STMT_VINFO_LIVE_P (stmt_info)); if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def) return false; /* FORNOW. CHECKME. */ if (nested_in_vect_loop_p (loop, stmt)) return false; /* If STMT is not relevant and it is a simple assignment and its inputs are invariant then it can remain in place, unvectorized. The original last scalar value that it computes will be used. */ if (!STMT_VINFO_RELEVANT_P (stmt_info)) { gcc_assert (is_simple_and_all_uses_invariant (stmt, loop_vinfo)); if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "statement is simple and uses invariant. Leaving in " "place.\n"); return true; } if (!vec_stmt) /* No transformation required. */ return true; /* If stmt has a related stmt, then use that for getting the lhs. */ if (is_pattern_stmt_p (stmt_info)) stmt = STMT_VINFO_RELATED_STMT (stmt_info); lhs = (is_a <gphi *> (stmt)) ? gimple_phi_result (stmt) : gimple_get_lhs (stmt); lhs_type = TREE_TYPE (lhs); bitsize = TYPE_SIZE (TREE_TYPE (vectype)); vec_bitsize = TYPE_SIZE (vectype); /* Get the vectorized lhs of STMT and the lane to use (counted in bits). */ tree vec_lhs, bitstart; if (slp_node) { gcc_assert (slp_index >= 0); int num_scalar = SLP_TREE_SCALAR_STMTS (slp_node).length (); int num_vec = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node); /* Get the last occurrence of the scalar index from the concatenation of all the slp vectors. Calculate which slp vector it is and the index within. */ int pos = (num_vec * nunits) - num_scalar + slp_index; int vec_entry = pos / nunits; int vec_index = pos % nunits; /* Get the correct slp vectorized stmt. */ vec_lhs = gimple_get_lhs (SLP_TREE_VEC_STMTS (slp_node)[vec_entry]); /* Get entry to use. */ bitstart = build_int_cst (unsigned_type_node, vec_index); bitstart = int_const_binop (MULT_EXPR, bitsize, bitstart); } else { enum vect_def_type dt = STMT_VINFO_DEF_TYPE (stmt_info); vec_lhs = vect_get_vec_def_for_operand_1 (stmt, dt); /* For multiple copies, get the last copy. */ for (int i = 1; i < ncopies; ++i) vec_lhs = vect_get_vec_def_for_stmt_copy (vect_unknown_def_type, vec_lhs); /* Get the last lane in the vector. */ bitstart = int_const_binop (MINUS_EXPR, vec_bitsize, bitsize); } /* Create a new vectorized stmt for the uses of STMT and insert outside the loop. */ gimple_seq stmts = NULL; tree bftype = TREE_TYPE (vectype); if (VECTOR_BOOLEAN_TYPE_P (vectype)) bftype = build_nonstandard_integer_type (tree_to_uhwi (bitsize), 1); tree new_tree = build3 (BIT_FIELD_REF, bftype, vec_lhs, bitsize, bitstart); new_tree = force_gimple_operand (fold_convert (lhs_type, new_tree), &stmts, true, NULL_TREE); if (stmts) gsi_insert_seq_on_edge_immediate (single_exit (loop), stmts); /* Replace use of lhs with newly computed result. If the use stmt is a single arg PHI, just replace all uses of PHI result. It's necessary because lcssa PHI defining lhs may be before newly inserted stmt. */ use_operand_p use_p; FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, lhs) if (!flow_bb_inside_loop_p (loop, gimple_bb (use_stmt)) && !is_gimple_debug (use_stmt)) { if (gimple_code (use_stmt) == GIMPLE_PHI && gimple_phi_num_args (use_stmt) == 1) { replace_uses_by (gimple_phi_result (use_stmt), new_tree); } else { FOR_EACH_IMM_USE_ON_STMT (use_p, imm_iter) SET_USE (use_p, new_tree); } update_stmt (use_stmt); } return true; } /* Kill any debug uses outside LOOP of SSA names defined in STMT. */ static void vect_loop_kill_debug_uses (struct loop *loop, gimple *stmt) { ssa_op_iter op_iter; imm_use_iterator imm_iter; def_operand_p def_p; gimple *ustmt; FOR_EACH_PHI_OR_STMT_DEF (def_p, stmt, op_iter, SSA_OP_DEF) { FOR_EACH_IMM_USE_STMT (ustmt, imm_iter, DEF_FROM_PTR (def_p)) { basic_block bb; if (!is_gimple_debug (ustmt)) continue; bb = gimple_bb (ustmt); if (!flow_bb_inside_loop_p (loop, bb)) { if (gimple_debug_bind_p (ustmt)) { if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "killing debug use\n"); gimple_debug_bind_reset_value (ustmt); update_stmt (ustmt); } else gcc_unreachable (); } } } } /* Given loop represented by LOOP_VINFO, return true if computation of LOOP_VINFO_NITERS (= LOOP_VINFO_NITERSM1 + 1) doesn't overflow, false otherwise. */ static bool loop_niters_no_overflow (loop_vec_info loop_vinfo) { /* Constant case. */ if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)) { tree cst_niters = LOOP_VINFO_NITERS (loop_vinfo); tree cst_nitersm1 = LOOP_VINFO_NITERSM1 (loop_vinfo); gcc_assert (TREE_CODE (cst_niters) == INTEGER_CST); gcc_assert (TREE_CODE (cst_nitersm1) == INTEGER_CST); if (wi::to_widest (cst_nitersm1) < wi::to_widest (cst_niters)) return true; } widest_int max; struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); /* Check the upper bound of loop niters. */ if (get_max_loop_iterations (loop, &max)) { tree type = TREE_TYPE (LOOP_VINFO_NITERS (loop_vinfo)); signop sgn = TYPE_SIGN (type); widest_int type_max = widest_int::from (wi::max_value (type), sgn); if (max < type_max) return true; } return false; } /* Scale profiling counters by estimation for LOOP which is vectorized by factor VF. */ static void scale_profile_for_vect_loop (struct loop *loop, unsigned vf) { edge preheader = loop_preheader_edge (loop); /* Reduce loop iterations by the vectorization factor. */ gcov_type new_est_niter = niter_for_unrolled_loop (loop, vf); gcov_type freq_h = loop->header->count, freq_e = preheader->count; /* Use frequency only if counts are zero. */ if (freq_h == 0 && freq_e == 0) { freq_h = loop->header->frequency; freq_e = EDGE_FREQUENCY (preheader); } if (freq_h != 0) { gcov_type scale; /* Avoid dropping loop body profile counter to 0 because of zero count in loop's preheader. */ freq_e = MAX (freq_e, 1); /* This should not overflow. */ scale = GCOV_COMPUTE_SCALE (freq_e * (new_est_niter + 1), freq_h); scale_loop_frequencies (loop, scale, REG_BR_PROB_BASE); } basic_block exit_bb = single_pred (loop->latch); edge exit_e = single_exit (loop); exit_e->count = loop_preheader_edge (loop)->count; exit_e->probability = REG_BR_PROB_BASE / (new_est_niter + 1); edge exit_l = single_pred_edge (loop->latch); int prob = exit_l->probability; exit_l->probability = REG_BR_PROB_BASE - exit_e->probability; exit_l->count = exit_bb->count - exit_e->count; if (exit_l->count < 0) exit_l->count = 0; if (prob > 0) scale_bbs_frequencies_int (&loop->latch, 1, exit_l->probability, prob); } /* Function vect_transform_loop. The analysis phase has determined that the loop is vectorizable. Vectorize the loop - created vectorized stmts to replace the scalar stmts in the loop, and update the loop exit condition. Returns scalar epilogue loop if any. */ struct loop * vect_transform_loop (loop_vec_info loop_vinfo) { struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); struct loop *epilogue = NULL; basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo); int nbbs = loop->num_nodes; int i; tree niters_vector = NULL; int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo); bool grouped_store; bool slp_scheduled = false; gimple *stmt, *pattern_stmt; gimple_seq pattern_def_seq = NULL; gimple_stmt_iterator pattern_def_si = gsi_none (); bool transform_pattern_stmt = false; bool check_profitability = false; int th; if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "=== vec_transform_loop ===\n"); /* Use the more conservative vectorization threshold. If the number of iterations is constant assume the cost check has been performed by our caller. If the threshold makes all loops profitable that run at least the vectorization factor number of times checking is pointless, too. */ th = LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo); if (th >= LOOP_VINFO_VECT_FACTOR (loop_vinfo) - 1 && !LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)) { if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "Profitability threshold is %d loop iterations.\n", th); check_profitability = true; } /* Make sure there exists a single-predecessor exit bb. Do this before versioning. */ edge e = single_exit (loop); if (! single_pred_p (e->dest)) { split_loop_exit_edge (e); if (dump_enabled_p ()) dump_printf (MSG_NOTE, "split exit edge\n"); } /* Version the loop first, if required, so the profitability check comes first. */ if (LOOP_REQUIRES_VERSIONING (loop_vinfo)) { vect_loop_versioning (loop_vinfo, th, check_profitability); check_profitability = false; } /* Make sure there exists a single-predecessor exit bb also on the scalar loop copy. Do this after versioning but before peeling so CFG structure is fine for both scalar and if-converted loop to make slpeel_duplicate_current_defs_from_edges face matched loop closed PHI nodes on the exit. */ if (LOOP_VINFO_SCALAR_LOOP (loop_vinfo)) { e = single_exit (LOOP_VINFO_SCALAR_LOOP (loop_vinfo)); if (! single_pred_p (e->dest)) { split_loop_exit_edge (e); if (dump_enabled_p ()) dump_printf (MSG_NOTE, "split exit edge of scalar loop\n"); } } tree niters = vect_build_loop_niters (loop_vinfo); LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo) = niters; tree nitersm1 = unshare_expr (LOOP_VINFO_NITERSM1 (loop_vinfo)); bool niters_no_overflow = loop_niters_no_overflow (loop_vinfo); epilogue = vect_do_peeling (loop_vinfo, niters, nitersm1, &niters_vector, th, check_profitability, niters_no_overflow); if (niters_vector == NULL_TREE) { if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)) niters_vector = build_int_cst (TREE_TYPE (LOOP_VINFO_NITERS (loop_vinfo)), LOOP_VINFO_INT_NITERS (loop_vinfo) / vf); else vect_gen_vector_loop_niters (loop_vinfo, niters, &niters_vector, niters_no_overflow); } /* 1) Make sure the loop header has exactly two entries 2) Make sure we have a preheader basic block. */ gcc_assert (EDGE_COUNT (loop->header->preds) == 2); split_edge (loop_preheader_edge (loop)); /* FORNOW: the vectorizer supports only loops which body consist of one basic block (header + empty latch). When the vectorizer will support more involved loop forms, the order by which the BBs are traversed need to be reconsidered. */ for (i = 0; i < nbbs; i++) { basic_block bb = bbs[i]; stmt_vec_info stmt_info; for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si)) { gphi *phi = si.phi (); if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "------>vectorizing phi: "); dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0); } stmt_info = vinfo_for_stmt (phi); if (!stmt_info) continue; if (MAY_HAVE_DEBUG_STMTS && !STMT_VINFO_LIVE_P (stmt_info)) vect_loop_kill_debug_uses (loop, phi); if (!STMT_VINFO_RELEVANT_P (stmt_info) && !STMT_VINFO_LIVE_P (stmt_info)) continue; if (STMT_VINFO_VECTYPE (stmt_info) && (TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info)) != (unsigned HOST_WIDE_INT) vf) && dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "multiple-types.\n"); if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def) { if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "transform phi.\n"); vect_transform_stmt (phi, NULL, NULL, NULL, NULL); } } pattern_stmt = NULL; for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si) || transform_pattern_stmt;) { bool is_store; if (transform_pattern_stmt) stmt = pattern_stmt; else { stmt = gsi_stmt (si); /* During vectorization remove existing clobber stmts. */ if (gimple_clobber_p (stmt)) { unlink_stmt_vdef (stmt); gsi_remove (&si, true); release_defs (stmt); continue; } } if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "------>vectorizing statement: "); dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0); } stmt_info = vinfo_for_stmt (stmt); /* vector stmts created in the outer-loop during vectorization of stmts in an inner-loop may not have a stmt_info, and do not need to be vectorized. */ if (!stmt_info) { gsi_next (&si); continue; } if (MAY_HAVE_DEBUG_STMTS && !STMT_VINFO_LIVE_P (stmt_info)) vect_loop_kill_debug_uses (loop, stmt); if (!STMT_VINFO_RELEVANT_P (stmt_info) && !STMT_VINFO_LIVE_P (stmt_info)) { if (STMT_VINFO_IN_PATTERN_P (stmt_info) && (pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info)) && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt)) || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt)))) { stmt = pattern_stmt; stmt_info = vinfo_for_stmt (stmt); } else { gsi_next (&si); continue; } } else if (STMT_VINFO_IN_PATTERN_P (stmt_info) && (pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info)) && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt)) || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt)))) transform_pattern_stmt = true; /* If pattern statement has def stmts, vectorize them too. */ if (is_pattern_stmt_p (stmt_info)) { if (pattern_def_seq == NULL) { pattern_def_seq = STMT_VINFO_PATTERN_DEF_SEQ (stmt_info); pattern_def_si = gsi_start (pattern_def_seq); } else if (!gsi_end_p (pattern_def_si)) gsi_next (&pattern_def_si); if (pattern_def_seq != NULL) { gimple *pattern_def_stmt = NULL; stmt_vec_info pattern_def_stmt_info = NULL; while (!gsi_end_p (pattern_def_si)) { pattern_def_stmt = gsi_stmt (pattern_def_si); pattern_def_stmt_info = vinfo_for_stmt (pattern_def_stmt); if (STMT_VINFO_RELEVANT_P (pattern_def_stmt_info) || STMT_VINFO_LIVE_P (pattern_def_stmt_info)) break; gsi_next (&pattern_def_si); } if (!gsi_end_p (pattern_def_si)) { if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "==> vectorizing pattern def " "stmt: "); dump_gimple_stmt (MSG_NOTE, TDF_SLIM, pattern_def_stmt, 0); } stmt = pattern_def_stmt; stmt_info = pattern_def_stmt_info; } else { pattern_def_si = gsi_none (); transform_pattern_stmt = false; } } else transform_pattern_stmt = false; } if (STMT_VINFO_VECTYPE (stmt_info)) { unsigned int nunits = (unsigned int) TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info)); if (!STMT_SLP_TYPE (stmt_info) && nunits != (unsigned int) vf && dump_enabled_p ()) /* For SLP VF is set according to unrolling factor, and not to vector size, hence for SLP this print is not valid. */ dump_printf_loc (MSG_NOTE, vect_location, "multiple-types.\n"); } /* SLP. Schedule all the SLP instances when the first SLP stmt is reached. */ if (STMT_SLP_TYPE (stmt_info)) { if (!slp_scheduled) { slp_scheduled = true; if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "=== scheduling SLP instances ===\n"); vect_schedule_slp (loop_vinfo); } /* Hybrid SLP stmts must be vectorized in addition to SLP. */ if (!vinfo_for_stmt (stmt) || PURE_SLP_STMT (stmt_info)) { if (!transform_pattern_stmt && gsi_end_p (pattern_def_si)) { pattern_def_seq = NULL; gsi_next (&si); } continue; } } /* -------- vectorize statement ------------ */ if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "transform statement.\n"); grouped_store = false; is_store = vect_transform_stmt (stmt, &si, &grouped_store, NULL, NULL); if (is_store) { if (STMT_VINFO_GROUPED_ACCESS (stmt_info)) { /* Interleaving. If IS_STORE is TRUE, the vectorization of the interleaving chain was completed - free all the stores in the chain. */ gsi_next (&si); vect_remove_stores (GROUP_FIRST_ELEMENT (stmt_info)); } else { /* Free the attached stmt_vec_info and remove the stmt. */ gimple *store = gsi_stmt (si); free_stmt_vec_info (store); unlink_stmt_vdef (store); gsi_remove (&si, true); release_defs (store); } /* Stores can only appear at the end of pattern statements. */ gcc_assert (!transform_pattern_stmt); pattern_def_seq = NULL; } else if (!transform_pattern_stmt && gsi_end_p (pattern_def_si)) { pattern_def_seq = NULL; gsi_next (&si); } } /* stmts in BB */ } /* BBs in loop */ slpeel_make_loop_iterate_ntimes (loop, niters_vector); scale_profile_for_vect_loop (loop, vf); /* The minimum number of iterations performed by the epilogue. This is 1 when peeling for gaps because we always need a final scalar iteration. */ int min_epilogue_iters = LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) ? 1 : 0; /* +1 to convert latch counts to loop iteration counts, -min_epilogue_iters to remove iterations that cannot be performed by the vector code. */ int bias = 1 - min_epilogue_iters; /* In these calculations the "- 1" converts loop iteration counts back to latch counts. */ if (loop->any_upper_bound) loop->nb_iterations_upper_bound = wi::udiv_floor (loop->nb_iterations_upper_bound + bias, vf) - 1; if (loop->any_likely_upper_bound) loop->nb_iterations_likely_upper_bound = wi::udiv_floor (loop->nb_iterations_likely_upper_bound + bias, vf) - 1; if (loop->any_estimate) loop->nb_iterations_estimate = wi::udiv_floor (loop->nb_iterations_estimate + bias, vf) - 1; if (dump_enabled_p ()) { if (!LOOP_VINFO_EPILOGUE_P (loop_vinfo)) { dump_printf_loc (MSG_NOTE, vect_location, "LOOP VECTORIZED\n"); if (loop->inner) dump_printf_loc (MSG_NOTE, vect_location, "OUTER LOOP VECTORIZED\n"); dump_printf (MSG_NOTE, "\n"); } else dump_printf_loc (MSG_NOTE, vect_location, "LOOP EPILOGUE VECTORIZED (VS=%d)\n", current_vector_size); } /* Free SLP instances here because otherwise stmt reference counting won't work. */ slp_instance instance; FOR_EACH_VEC_ELT (LOOP_VINFO_SLP_INSTANCES (loop_vinfo), i, instance) vect_free_slp_instance (instance); LOOP_VINFO_SLP_INSTANCES (loop_vinfo).release (); /* Clear-up safelen field since its value is invalid after vectorization since vectorized loop can have loop-carried dependencies. */ loop->safelen = 0; /* Don't vectorize epilogue for epilogue. */ if (LOOP_VINFO_EPILOGUE_P (loop_vinfo)) epilogue = NULL; if (epilogue) { unsigned int vector_sizes = targetm.vectorize.autovectorize_vector_sizes (); vector_sizes &= current_vector_size - 1; if (!PARAM_VALUE (PARAM_VECT_EPILOGUES_NOMASK)) epilogue = NULL; else if (!vector_sizes) epilogue = NULL; else if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo) && LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) >= 0) { int smallest_vec_size = 1 << ctz_hwi (vector_sizes); int ratio = current_vector_size / smallest_vec_size; int eiters = LOOP_VINFO_INT_NITERS (loop_vinfo) - LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo); eiters = eiters % vf; epilogue->nb_iterations_upper_bound = eiters - 1; if (eiters < vf / ratio) epilogue = NULL; } } if (epilogue) { epilogue->force_vectorize = loop->force_vectorize; epilogue->safelen = loop->safelen; epilogue->dont_vectorize = false; /* We may need to if-convert epilogue to vectorize it. */ if (LOOP_VINFO_SCALAR_LOOP (loop_vinfo)) tree_if_conversion (epilogue); } return epilogue; } /* The code below is trying to perform simple optimization - revert if-conversion for masked stores, i.e. if the mask of a store is zero do not perform it and all stored value producers also if possible. For example, for (i=0; i<n; i++) if (c[i]) { p1[i] += 1; p2[i] = p3[i] +2; } this transformation will produce the following semi-hammock: if (!mask__ifc__42.18_165 == { 0, 0, 0, 0, 0, 0, 0, 0 }) { vect__11.19_170 = MASK_LOAD (vectp_p1.20_168, 0B, mask__ifc__42.18_165); vect__12.22_172 = vect__11.19_170 + vect_cst__171; MASK_STORE (vectp_p1.23_175, 0B, mask__ifc__42.18_165, vect__12.22_172); vect__18.25_182 = MASK_LOAD (vectp_p3.26_180, 0B, mask__ifc__42.18_165); vect__19.28_184 = vect__18.25_182 + vect_cst__183; MASK_STORE (vectp_p2.29_187, 0B, mask__ifc__42.18_165, vect__19.28_184); } */ void optimize_mask_stores (struct loop *loop) { basic_block *bbs = get_loop_body (loop); unsigned nbbs = loop->num_nodes; unsigned i; basic_block bb; struct loop *bb_loop; gimple_stmt_iterator gsi; gimple *stmt; auto_vec<gimple *> worklist; vect_location = find_loop_location (loop); /* Pick up all masked stores in loop if any. */ for (i = 0; i < nbbs; i++) { bb = bbs[i]; for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) { stmt = gsi_stmt (gsi); if (gimple_call_internal_p (stmt, IFN_MASK_STORE)) worklist.safe_push (stmt); } } free (bbs); if (worklist.is_empty ()) return; /* Loop has masked stores. */ while (!worklist.is_empty ()) { gimple *last, *last_store; edge e, efalse; tree mask; basic_block store_bb, join_bb; gimple_stmt_iterator gsi_to; tree vdef, new_vdef; gphi *phi; tree vectype; tree zero; last = worklist.pop (); mask = gimple_call_arg (last, 2); bb = gimple_bb (last); /* Create then_bb and if-then structure in CFG, then_bb belongs to the same loop as if_bb. It could be different to LOOP when two level loop-nest is vectorized and mask_store belongs to the inner one. */ e = split_block (bb, last); bb_loop = bb->loop_father; gcc_assert (loop == bb_loop || flow_loop_nested_p (loop, bb_loop)); join_bb = e->dest; store_bb = create_empty_bb (bb); add_bb_to_loop (store_bb, bb_loop); e->flags = EDGE_TRUE_VALUE; efalse = make_edge (bb, store_bb, EDGE_FALSE_VALUE); /* Put STORE_BB to likely part. */ efalse->probability = PROB_UNLIKELY; store_bb->frequency = PROB_ALWAYS - EDGE_FREQUENCY (efalse); make_edge (store_bb, join_bb, EDGE_FALLTHRU); if (dom_info_available_p (CDI_DOMINATORS)) set_immediate_dominator (CDI_DOMINATORS, store_bb, bb); if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "Create new block %d to sink mask stores.", store_bb->index); /* Create vector comparison with boolean result. */ vectype = TREE_TYPE (mask); zero = build_zero_cst (vectype); stmt = gimple_build_cond (EQ_EXPR, mask, zero, NULL_TREE, NULL_TREE); gsi = gsi_last_bb (bb); gsi_insert_after (&gsi, stmt, GSI_SAME_STMT); /* Create new PHI node for vdef of the last masked store: .MEM_2 = VDEF <.MEM_1> will be converted to .MEM.3 = VDEF <.MEM_1> and new PHI node will be created in join bb .MEM_2 = PHI <.MEM_1, .MEM_3> */ vdef = gimple_vdef (last); new_vdef = make_ssa_name (gimple_vop (cfun), last); gimple_set_vdef (last, new_vdef); phi = create_phi_node (vdef, join_bb); add_phi_arg (phi, new_vdef, EDGE_SUCC (store_bb, 0), UNKNOWN_LOCATION); /* Put all masked stores with the same mask to STORE_BB if possible. */ while (true) { gimple_stmt_iterator gsi_from; gimple *stmt1 = NULL; /* Move masked store to STORE_BB. */ last_store = last; gsi = gsi_for_stmt (last); gsi_from = gsi; /* Shift GSI to the previous stmt for further traversal. */ gsi_prev (&gsi); gsi_to = gsi_start_bb (store_bb); gsi_move_before (&gsi_from, &gsi_to); /* Setup GSI_TO to the non-empty block start. */ gsi_to = gsi_start_bb (store_bb); if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "Move stmt to created bb\n"); dump_gimple_stmt (MSG_NOTE, TDF_SLIM, last, 0); } /* Move all stored value producers if possible. */ while (!gsi_end_p (gsi)) { tree lhs; imm_use_iterator imm_iter; use_operand_p use_p; bool res; /* Skip debug statements. */ if (is_gimple_debug (gsi_stmt (gsi))) { gsi_prev (&gsi); continue; } stmt1 = gsi_stmt (gsi); /* Do not consider statements writing to memory or having volatile operand. */ if (gimple_vdef (stmt1) || gimple_has_volatile_ops (stmt1)) break; gsi_from = gsi; gsi_prev (&gsi); lhs = gimple_get_lhs (stmt1); if (!lhs) break; /* LHS of vectorized stmt must be SSA_NAME. */ if (TREE_CODE (lhs) != SSA_NAME) break; if (!VECTOR_TYPE_P (TREE_TYPE (lhs))) { /* Remove dead scalar statement. */ if (has_zero_uses (lhs)) { gsi_remove (&gsi_from, true); continue; } } /* Check that LHS does not have uses outside of STORE_BB. */ res = true; FOR_EACH_IMM_USE_FAST (use_p, imm_iter, lhs) { gimple *use_stmt; use_stmt = USE_STMT (use_p); if (is_gimple_debug (use_stmt)) continue; if (gimple_bb (use_stmt) != store_bb) { res = false; break; } } if (!res) break; if (gimple_vuse (stmt1) && gimple_vuse (stmt1) != gimple_vuse (last_store)) break; /* Can move STMT1 to STORE_BB. */ if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "Move stmt to created bb\n"); dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt1, 0); } gsi_move_before (&gsi_from, &gsi_to); /* Shift GSI_TO for further insertion. */ gsi_prev (&gsi_to); } /* Put other masked stores with the same mask to STORE_BB. */ if (worklist.is_empty () || gimple_call_arg (worklist.last (), 2) != mask || worklist.last () != stmt1) break; last = worklist.pop (); } add_phi_arg (phi, gimple_vuse (last_store), e, UNKNOWN_LOCATION); } }
UniOP.h
#ifndef UNIOP_H_ #define UNIOP_H_ /* * UniOP.h: * a simple feed forward neural operation, unary input. * * Created on: Apr 22, 2017 * Author: mszhang */ #include "Param.h" #include "MyLib.h" #include "Node.h" #include "Graph.h" #include "ModelUpdate.h" #include <cstdlib> #include "profiler.h" class UniParams { public: Param W; Param b; bool bUseB; public: UniParams() { bUseB = true; } inline void exportAdaParams(ModelUpdate& ada) { ada.addParam(&W); if (bUseB) { ada.addParam(&b); } } inline void initial(int nOSize, int nISize, bool useB = true) { W.initial(nOSize, nISize); bUseB = useB; if (bUseB) { b.initial(nOSize, 1); } } inline void save(std::ofstream &os) const { os << bUseB << std::endl; W.save(os); if (bUseB) { b.save(os); } } inline void load(std::ifstream &is) { is >> bUseB; W.load(is); if (bUseB) { b.load(is); } } }; // non-linear feed-forward node // input nodes should be specified by forward function // for input variables, we exploit column vector, // which means a concrete input vector x_i is represented by x(0, i), x(1, i), ..., x(n, i) class UniNode : public Node { public: PNode in; UniParams* param; dtype(*activate)(const dtype&); // activation function dtype(*derivate)(const dtype&, const dtype&); // derivation function of activation function Tensor1D ty, lty; public: UniNode() : Node() { in = NULL; activate = ftanh; derivate = dtanh; param = NULL; node_type = "uni"; } ~UniNode() { in = NULL; } inline void init(int ndim, dtype dropout) { Node::init(ndim, dropout); ty.init(ndim); lty.init(ndim); } inline void setParam(UniParams* paramInit) { param = paramInit; } inline void clearValue() { Node::clearValue(); in = NULL; } // define the activate function and its derivation form inline void setFunctions(dtype(*f)(const dtype&), dtype(*f_deri)(const dtype&, const dtype&)) { activate = f; derivate = f_deri; } void forward(Graph *cg, PNode x) { in = x; degree = 0; in->addParent(this); cg->addNode(this); } inline void compute() { ty.mat() = param->W.val.mat() * in->val.mat(); if (param->bUseB) { ty.vec() += param->b.val.vec(); } val.vec() = ty.vec().unaryExpr(ptr_fun(activate)); } inline void backward() { lty.vec() = loss.vec() * ty.vec().binaryExpr(val.vec(), ptr_fun(derivate)); param->W.grad.mat() += lty.mat() * in->val.tmat(); if (param->bUseB) { param->b.grad.vec() += lty.vec(); } in->loss.mat() += param->W.val.mat().transpose() * lty.mat(); } inline PExecute generate(bool bTrain, dtype cur_drop_factor); // better to rewrite for deep understanding bool typeEqual(PNode other) override { bool result = Node::typeEqual(other); if (!result) return false; UniNode* conv_other = (UniNode*)other; if (param != conv_other->param) { return false; } if (activate != conv_other->activate || derivate != conv_other->derivate) { return false; } return true; } size_t typeHashCode() const override { void *act = reinterpret_cast<void*>(activate); void *de = reinterpret_cast<void*>(derivate); return Node::typeHashCode() ^ ::typeHashCode(param) ^ ::typeHashCode(act) ^ (::typeHashCode(de) << 1); } #if USE_GPU void toNodeInfo(NodeInfo &info) const override { Node::toNodeInfo(info); info.input_vals.push_back(in->val.value); info.input_losses.push_back(in->loss.value); } #endif }; // non-linear feed-forward node // input nodes should be specified by forward function // for input variables, we exploit column vector, // which means a concrete input vector x_i is represented by x(0, i), x(1, i), ..., x(n, i) class LinearUniNode : public Node { public: PNode in; UniParams* param; public: LinearUniNode() : Node() { in = NULL; param = NULL; node_type = "linear_uni"; } inline void setParam(UniParams* paramInit) { param = paramInit; } inline void clearValue() { Node::clearValue(); in = NULL; } public: void forward(Graph *cg, PNode x) { in = x; degree = 0; in->addParent(this); cg->addNode(this); } public: inline void compute() { val.mat() = param->W.val.mat() * in->val.mat(); if (param->bUseB) { val.vec() += param->b.val.vec(); } } inline void backward() { param->W.grad.mat() += loss.mat() * in->val.tmat(); if (param->bUseB) { param->b.grad.vec() += loss.vec(); } in->loss.mat() += param->W.val.mat().transpose() * loss.mat(); } public: inline PExecute generate(bool bTrain, dtype cur_drop_factor); // better to rewrite for deep understanding inline bool typeEqual(PNode other) { bool result = Node::typeEqual(other); if (!result) return false; LinearUniNode* conv_other = (LinearUniNode*)other; if (param != conv_other->param) { return false; } return true; } }; // non-linear feed-forward node // input nodes should be specified by forward function // for input variables, we exploit column vector, // which means a concrete input vector x_i is represented by x(0, i), x(1, i), ..., x(n, i) class LinearNode : public Node { public: PNode in; UniParams* param; public: LinearNode() : Node() { in = NULL; param = NULL; node_type = "linear"; } inline void setParam(UniParams* paramInit) { param = paramInit; } inline void clearValue() { Node::clearValue(); in = NULL; } public: void forward(Graph *cg, PNode x) { in = x; degree = 0; in->addParent(this); cg->addNode(this); } public: inline void compute() { val.mat() = param->W.val.mat() * in->val.mat(); } inline void backward() { param->W.grad.mat() += loss.mat() * in->val.tmat(); in->loss.mat() += param->W.val.mat().transpose() * loss.mat(); } public: PExecute generate(bool bTrain, dtype cur_drop_factor); // better to rewrite for deep understanding bool typeEqual(PNode other) override { bool result = Node::typeEqual(other); if (!result) return false; LinearNode* conv_other = (LinearNode*)other; if (param != conv_other->param) { return false; } return true; } size_t typeHashCode() const override { return Node::typeHashCode() ^ ::typeHashCode(param); } #if USE_GPU void toNodeInfo(NodeInfo &info) const override { Node::toNodeInfo(info); info.input_vals.push_back(in->val.value); info.input_losses.push_back(in->loss.value); } #endif }; class UniExecute :public Execute { public: Tensor2D x, ty, y, b; int inDim, outDim; UniParams* param; dtype(*activate)(const dtype&); // activation function dtype(*derivate)(const dtype&, const dtype&); // derivation function of activation function Tensor2D drop_mask; inline void forward() { int count = batch.size(); ty.init(outDim, count); x.init(inDim, count); y.init(outDim, count); drop_mask.init(outDim, count); #if TEST_CUDA || !USE_GPU b.init(outDim, count); #endif #if USE_GPU std::vector<dtype*> xs, ys; xs.reserve(batch.size()); ys.reserve(batch.size()); for (int i = 0; i < batch.size(); ++i) { UniNode *n = static_cast<UniNode*>(batch.at(i)); xs.push_back(n->in->val.value); ys.push_back(n->val.value); } n3ldg_cuda::CopyForUniNodeForward(xs, param->b.val.value, x.value, ty.value, count, inDim, outDim, param->bUseB); n3ldg_cuda::MatrixMultiplyMatrix(param->W.val.value, x.value, ty.value, outDim, inDim, count, param->bUseB); CalculateDropMask(count, outDim, drop_mask); n3ldg_cuda::ActivatedEnum activatedEnum = ToActivatedEnum(activate); n3ldg_cuda::Activated(activatedEnum, ty.value, ys, y.value, outDim, bTrain, dynamicDropValue(), drop_mask.value); for (int i = 0; i<batch.size(); ++i) { UniNode *n = static_cast<UniNode*>(batch.at(i)); } #if TEST_CUDA for (int idx = 0; idx < count; idx++) { UniNode* ptr = (UniNode*)batch[idx]; for (int idy = 0; idy < inDim; idy++) { x[idy][idx] = ptr->in->val[idy]; } if (param->bUseB) { for (int idy = 0; idy < outDim; idy++) { b[idy][idx] = param->b.val.v[idy]; } } } n3ldg_cuda::Assert(x.verify("forward x")); ty.mat() = param->W.val.mat() * x.mat(); if (param->bUseB) { ty.vec() = ty.vec() + b.vec(); } y.vec() = ty.vec().unaryExpr(ptr_fun(activate)); for (int idx = 0; idx < count; idx++) { UniNode* ptr = (UniNode*)batch[idx]; for (int idy = 0; idy < outDim; idy++) { ptr->val[idy] = y[idy][idx]; } } drop_mask.copyFromDeviceToHost(); for (int i = 0; i < count; ++i) { for (int j = 0; j < outDim; ++j) { dtype v = drop_mask[j][i]; batch[i]->drop_mask[j] = v <= dynamicDropValue() ? 0 : 1; } } for (int i = 0; i < count; ++i) { batch[i]->forward_drop(bTrain, drop_factor); n3ldg_cuda::Assert(batch[i]->val.verify("forward batch i val")); } n3ldg_cuda::Assert(ty.verify("forward ty")); n3ldg_cuda::Assert(y.verify("forward y")); #endif #else for (int idx = 0; idx < count; idx++) { UniNode* ptr = (UniNode*)batch[idx]; for (int idy = 0; idy < inDim; idy++) { x[idy][idx] = ptr->in->val[idy]; } if (param->bUseB) { for (int idy = 0; idy < outDim; idy++) { b[idy][idx] = param->b.val.v[idy]; } } } ty.mat() = param->W.val.mat() * x.mat(); if (param->bUseB) { ty.vec() = ty.vec() + b.vec(); } y.vec() = ty.vec().unaryExpr(ptr_fun(activate)); for (int idx = 0; idx < count; idx++) { UniNode* ptr = (UniNode*)batch[idx]; for (int idy = 0; idy < outDim; idy++) { ptr->val[idy] = y[idy][idx]; } } for (int i = 0; i < count; ++i) { dtype drop_value = batch[0]->drop_value; batch[i]->forward_drop(bTrain, drop_factor); } #endif } void backward() { int count = batch.size(); Tensor2D lx, lty, ly; #if USE_GPU lx.init(inDim, count); lty.init(outDim, count); ly.init(outDim, count); std::vector<dtype*> ly_vec; ly_vec.reserve(count); for (int i = 0; i < count; ++i) { UniNode* ptr = (UniNode*)batch[i]; ly_vec.push_back(ptr->loss.value); } n3ldg_cuda::ActivatedEnum activatedEnum = ToActivatedEnum(activate); n3ldg_cuda::CalculateLtyForUniBackward(activatedEnum, ly_vec, ty.value, y.value, drop_mask.value, dynamicDropValue(), lty.value, count, outDim); #if TEST_CUDA n3ldg_cuda::Assert(param->W.grad.verify( "uni backward W grad initial")); #endif n3ldg_cuda::MatrixMultiplyMatrix(lty.value, x.value, param->W.grad.value, outDim, count, inDim, true, true, false); #if TEST_CUDA n3ldg_cuda::Assert(param->W.val.verify("uni W.val initial")); #endif n3ldg_cuda::MatrixMultiplyMatrix(param->W.val.value, lty.value, lx.value, inDim, outDim, count, false, false, true); std::vector<dtype*> losses; losses.reserve(count); for (int idx = 0; idx < count; idx++) { UniNode* ptr = (UniNode*)batch[idx]; losses.push_back(ptr->in->loss.value); } #if TEST_CUDA n3ldg_cuda::Assert( param->b.grad.verify("uni backward param b initial")); #endif n3ldg_cuda::AddLtyToParamBiasAndAddLxToInputLossesForUniBackward( lty.value, lx.value, param->b.grad.value, losses, count, outDim, inDim, param->bUseB); #if TEST_CUDA for (int idx = 0; idx < count; idx++) { UniNode* ptr = (UniNode*)batch[idx]; ptr->backward_drop(); for (int idy = 0; idy < outDim; idy++) { ly[idy][idx] = ptr->loss[idy]; } } n3ldg_cuda::Assert(x.verify("backward x")); lty.vec() = ly.vec() * ty.vec().binaryExpr(y.vec(), ptr_fun(derivate)); n3ldg_cuda::Assert(lty.verify("backward lty")); param->W.grad.mat() += lty.mat() * x.mat().transpose(); n3ldg_cuda::Assert(param->W.grad.verify("backward W grad")); if (param->bUseB) { for (int idx = 0; idx < count; idx++) { for (int idy = 0; idy < outDim; idy++) { param->b.grad.v[idy] += lty[idy][idx]; } } } n3ldg_cuda::Assert(param->b.grad.verify("backward b grad")); lx.mat() += param->W.val.mat().transpose() * lty.mat(); n3ldg_cuda::Assert(lx.verify("backward lx")); for (int idx = 0; idx < count; idx++) { UniNode* ptr = (UniNode*)batch[idx]; for (int idy = 0; idy < inDim; idy++) { ptr->in->loss[idy] += lx[idy][idx]; } } for (Node * n : batch) { UniNode *ptr = static_cast<UniNode *>(n); n3ldg_cuda::Assert(ptr->in->loss.verify("uni backward loss")); } #endif #else lx.init(inDim, count); lty.init(outDim, count); ly.init(outDim, count); for (int idx = 0; idx < count; idx++) { UniNode* ptr = (UniNode*)batch[idx]; ptr->backward_drop(); for (int idy = 0; idy < outDim; idy++) { ly[idy][idx] = ptr->loss[idy]; } } lty.vec() = ly.vec() * ty.vec().binaryExpr(y.vec(), ptr_fun(derivate)); param->W.grad.mat() += lty.mat() * x.mat().transpose(); if (param->bUseB) { for (int idx = 0; idx < count; idx++) { for (int idy = 0; idy < outDim; idy++) { param->b.grad.v[idy] += lty[idy][idx]; } } } lx.mat() += param->W.val.mat().transpose() * lty.mat(); for (int idx = 0; idx < count; idx++) { UniNode* ptr = (UniNode*)batch[idx]; for (int idy = 0; idy < inDim; idy++) { ptr->in->loss[idy] += lx[idy][idx]; } } #endif } }; inline PExecute UniNode::generate(bool bTrain, dtype cur_drop_factor) { UniExecute* exec = new UniExecute(); exec->batch.push_back(this); exec->bTrain = bTrain; exec->drop_factor = cur_drop_factor; exec->inDim = param->W.inDim(); exec->outDim = param->W.outDim(); exec->param = param; exec->activate = activate; exec->derivate = derivate; return exec; }; class LinearUniExecute :public Execute { public: inline void forward() { int count = batch.size(); //#pragma omp parallel for for (int idx = 0; idx < count; idx++) { batch[idx]->compute(); batch[idx]->forward_drop(bTrain, drop_factor); } } inline void backward() { int count = batch.size(); //#pragma omp parallel for for (int idx = 0; idx < count; idx++) { batch[idx]->backward_drop(); batch[idx]->backward(); } } }; inline PExecute LinearUniNode::generate(bool bTrain, dtype cur_drop_factor) { LinearUniExecute* exec = new LinearUniExecute(); exec->batch.push_back(this); exec->bTrain = bTrain; exec->drop_factor = cur_drop_factor; return exec; }; #if USE_GPU class LinearExecute :public Execute { public: Tensor2D x, y, b; int inDim, outDim, count; UniParams* param; void forward() { int count = batch.size(); x.init(inDim, count); y.init(outDim, count); #if TEST_CUDA b.init(outDim, count); #endif std::vector<dtype*> xs, ys; xs.reserve(batch.size()); ys.reserve(batch.size()); for (int i = 0; i < batch.size(); ++i) { LinearNode *n = static_cast<LinearNode*>(batch.at(i)); xs.push_back(n->in->val.value); ys.push_back(n->val.value); } n3ldg_cuda::CopyForUniNodeForward(xs, param->b.val.value, x.value, y.value, count, inDim, outDim, param->bUseB); n3ldg_cuda::MatrixMultiplyMatrix(param->W.val.value, x.value, y.value, outDim, inDim, count, false); std::vector<dtype*> vals; vals.reserve(count); for (Node *node : batch) { vals.push_back(node->val.value); } n3ldg_cuda::CopyFromOneVectorToMultiVals(y.value, vals, count, outDim); #if TEST_CUDA for (int idx = 0; idx < count; idx++) { LinearNode* ptr = (LinearNode*)batch[idx]; for (int idy = 0; idy < inDim; idy++) { x[idy][idx] = ptr->in->val[idy]; } } y.mat() = param->W.val.mat() * x.mat(); n3ldg_cuda::Assert(x.verify("forward x")); n3ldg_cuda::Assert(y.verify("forward y")); for (int idx = 0; idx < count; idx++) { LinearNode* ptr = (LinearNode*)batch[idx]; for (int idy = 0; idy < outDim; idy++) { ptr->val[idy] = y[idy][idx]; } n3ldg_cuda::Assert(ptr->val.verify("linear forward val")); } #endif } void backward() { int count = batch.size(); Tensor2D lx, ly; lx.init(inDim, count); ly.init(outDim, count); std::vector<dtype*> ly_vec; ly_vec.reserve(count); for (int i = 0; i < count; ++i) { UniNode* ptr = (UniNode*)batch[i]; ly_vec.push_back(ptr->loss.value); } n3ldg_cuda::CalculateLyForLinearBackward(ly_vec, ly.value, count, outDim); n3ldg_cuda::MatrixMultiplyMatrix(ly.value, x.value, param->W.grad.value, outDim, count, inDim, true, true, false); n3ldg_cuda::MatrixMultiplyMatrix(param->W.val.value, ly.value, lx.value, inDim, outDim, count, false, false, true); std::vector<dtype*> losses; losses.reserve(count); for (int idx = 0; idx < count; idx++) { UniNode* ptr = (UniNode*)batch[idx]; losses.push_back(ptr->in->loss.value); } n3ldg_cuda::AddLtyToParamBiasAndAddLxToInputLossesForUniBackward( ly.value, lx.value, param->b.grad.value, losses, count, outDim, inDim, param->bUseB); #if TEST_CUDA for (int idx = 0; idx < count; idx++) { UniNode* ptr = (UniNode*)batch[idx]; ptr->backward_drop(); for (int idy = 0; idy < outDim; idy++) { ly[idy][idx] = ptr->loss[idy]; } } assert(x.verify("backward x")); param->W.grad.mat() += ly.mat() * x.mat().transpose(); param->W.grad.verify("backward W grad"); if (param->bUseB) { for (int idx = 0; idx < count; idx++) { for (int idy = 0; idy < outDim; idy++) { param->b.grad.v[idy] += ly[idy][idx]; } } } n3ldg_cuda::Assert(param->b.grad.verify("backward b grad")); lx.mat() += param->W.val.mat().transpose() * ly.mat(); lx.verify("backward lx"); for (int idx = 0; idx < count; idx++) { UniNode* ptr = (UniNode*)batch[idx]; for (int idy = 0; idy < inDim; idy++) { ptr->in->loss[idy] += lx[idy][idx]; } } for (Node * n : batch) { UniNode *ptr = static_cast<UniNode *>(n); n3ldg_cuda::Assert(ptr->in->loss.verify("backward loss")); } #endif } }; #else class LinearExecute :public Execute { public: Tensor2D x, y; int inDim, outDim, count; UniParams* param; inline void forward() { count = batch.size(); x.init(inDim, count); y.init(outDim, count); for (int idx = 0; idx < count; idx++) { LinearNode* ptr = (LinearNode*)batch[idx]; for (int idy = 0; idy < inDim; idy++) { x[idy][idx] = ptr->in->val[idy]; } } y.mat() = param->W.val.mat() * x.mat(); for (int idx = 0; idx < count; idx++) { LinearNode* ptr = (LinearNode*)batch[idx]; for (int idy = 0; idy < outDim; idy++) { ptr->val[idy] = y[idy][idx]; } ptr->forward_drop(bTrain, drop_factor); } } inline void backward() { Tensor2D lx, ly; lx.init(inDim, count); ly.init(outDim, count); for (int idx = 0; idx < count; idx++) { LinearNode* ptr = (LinearNode*)batch[idx]; ptr->backward_drop(); for (int idy = 0; idy < outDim; idy++) { ly[idy][idx] = ptr->loss[idy]; } } param->W.grad.mat() += ly.mat() * x.mat().transpose(); lx.mat() += param->W.val.mat().transpose() * ly.mat(); for (int idx = 0; idx < count; idx++) { LinearNode* ptr = (LinearNode*)batch[idx]; for (int idy = 0; idy < inDim; idy++) { ptr->in->loss[idy] += lx[idy][idx]; } } } }; #endif inline PExecute LinearNode::generate(bool bTrain, dtype cur_drop_factor) { LinearExecute* exec = new LinearExecute(); exec->batch.push_back(this); exec->inDim = param->W.inDim(); exec->outDim = param->W.outDim(); exec->param = param; exec->bTrain = bTrain; return exec; }; #endif /* UNIOP_H_ */
conv_dw_k5_k7_kernel_arm.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * License); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (c) 2020, OPEN AI LAB * Author: haoluo@openailab.com */ #ifndef __CONV_DW_K5_K7_KERNEL_ARM_H_ #define __CONV_DW_K5_K7_KERNEL_ARM_H_ #include <stdio.h> #include <arm_neon.h> #include <math.h> #include <stdlib.h> #include <string.h> #include <sys/time.h> void dw_k5s1(float*, float*, float*, float*, int, int, int); static float elem_activation(float tmp, int type) { if (type == 0) { if (tmp < 0.0f) tmp = 0; if (type > 0) tmp = tmp < type ? tmp : type; } return tmp; } static float32x4_t vector_activation(float32x4_t tmp, int type) { if (type == 0) { float32x4_t zero = vdupq_n_f32(0.0); tmp = vmaxq_f32(tmp, zero); if (type > 0) { float32x4_t max = vdupq_n_f32(( float )type); tmp = vminq_f32(tmp, max); } } return tmp; } void depthwise_conv_k5s1(float* input, float* weight, float* bias, float* output, int input_h, int input_w, int channel, int output_h, int output_w, int pad0, int pad1, int activation, int num_thread) { int input_h_pad = input_h + pad0 + pad1; int input_w_pad = input_w + pad0 + pad1; int no_pad = pad0 == 0 && pad1 == 0; if (!no_pad) // have pad { // #pragma omp parallel for num_threads(num_thread) for (int c = 0; c < channel; c++) { /* pad */ float* input_buf = ( float* )malloc(sizeof(float) * input_h_pad * input_w_pad + 128); float* input_tmp = input_buf; float* input_c = input + c * input_h * input_w; memset(input_tmp, 0, sizeof(float) * (input_w_pad * pad0 + pad0)); input_tmp += input_w_pad * pad0 + pad0; for (int h = 0; h < input_h; h++) { memcpy(input_tmp, input_c + h * input_w, sizeof(float) * input_w); input_tmp += input_w; memset(input_tmp, 0, sizeof(float) * (pad0 + pad1)); input_tmp += pad0 + pad1; } memset(input_tmp, 0, sizeof(float) * (input_w_pad * pad1 - pad0)); /* process convdw5x5s1 */ float* weight_cur = weight + c * 25; float* output_cur = output + c * output_h * output_w; if (bias) dw_k5s1(input_buf, weight_cur, bias + c, output_cur, output_h, output_w, activation); else dw_k5s1(input_buf, weight_cur, NULL, output_cur, output_h, output_w, activation); /* free input temp buffer */ free(input_buf); } } else { // #pragma omp parallel for num_threads(num_thread) for (int c = 0; c < channel; c++) { float* input_cur = input + c * input_h * input_w; float* weight_cur = weight + c * 25; // kernel_w * kernel_h is 5 * 5 = 25 float* output_cur = output + c * output_h * output_w; dw_k5s1(input_cur, weight_cur, bias + c, output_cur, output_h, output_w, activation); } } } void depthwise_conv_k5s2(float* input_buf, float* weight_buf, float* bias, float* output_buf, int input_h, int input_w, int channel, int output_h, int output_w, int activation, int num_thread) { int input_hw = input_h * input_w; int output_hw = output_h * output_w; int h_remain = input_h & 0x1; int w_remain = input_w & 0x1; int mid_h = output_h - 2; int mid_w = output_w - 2; int mid_w_block = mid_w & -4; // #pragma omp parallel for num_threads(num_thread) for (int c = 0; c < channel; c++) { int w, h; float* input_buf_c = input_buf + c * input_hw; float* output_buf_c = output_buf + c * output_hw; float* weight_buf_c = weight_buf + c * 25; float bias_c = bias ? bias[c] : 0; float tmp = bias_c; tmp += weight_buf_c[12] * input_buf_c[0]; tmp += weight_buf_c[13] * input_buf_c[1]; tmp += weight_buf_c[14] * input_buf_c[2]; tmp += weight_buf_c[17] * input_buf_c[input_w]; tmp += weight_buf_c[18] * input_buf_c[input_w + 1]; tmp += weight_buf_c[19] * input_buf_c[input_w + 2]; tmp += weight_buf_c[22] * input_buf_c[input_w * 2]; tmp += weight_buf_c[23] * input_buf_c[input_w * 2 + 1]; tmp += weight_buf_c[24] * input_buf_c[input_w * 2 + 2]; output_buf_c[0] = elem_activation(tmp, activation); for (w = 0; w < mid_w_block; w += 4) { float32x4_t sum0 = vdupq_n_f32(bias_c); float32x4_t line2_0 = vld1q_f32(input_buf_c + 2 * w); float32x4_t line2_1 = vld1q_f32(input_buf_c + 2 * w + 4); float32x4_t line2_2 = vld1q_f32(input_buf_c + 2 * w + 8); float32x4x2_t line2_01 = vuzpq_f32(line2_0, line2_1); float32x4x2_t line2_12 = vuzpq_f32(line2_1, line2_2); float32x4_t input2_2 = vextq_f32(line2_01.val[0], line2_2, 1); float32x4_t input2_3 = vextq_f32(line2_0, line2_12.val[1], 3); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[10]), line2_01.val[0]); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[11]), line2_01.val[1]); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[12]), input2_2); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[13]), input2_3); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[14]), line2_12.val[0]); float32x4_t line3_0 = vld1q_f32(input_buf_c + input_w + 2 * w); float32x4_t line3_1 = vld1q_f32(input_buf_c + input_w + 2 * w + 4); float32x4_t line3_2 = vld1q_f32(input_buf_c + input_w + 2 * w + 8); float32x4x2_t line3_01 = vuzpq_f32(line3_0, line3_1); float32x4x2_t line3_12 = vuzpq_f32(line3_1, line3_2); float32x4_t input3_2 = vextq_f32(line3_01.val[0], line3_2, 1); float32x4_t input3_3 = vextq_f32(line3_0, line3_12.val[1], 3); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[15]), line3_01.val[0]); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[16]), line3_01.val[1]); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[17]), input3_2); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[18]), input3_3); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[19]), line3_12.val[0]); float32x4_t line4_0 = vld1q_f32(input_buf_c + input_w * 2 + 2 * w); float32x4_t line4_1 = vld1q_f32(input_buf_c + input_w * 2 + 2 * w + 4); float32x4_t line4_2 = vld1q_f32(input_buf_c + input_w * 2 + 2 * w + 8); float32x4x2_t line4_01 = vuzpq_f32(line4_0, line4_1); float32x4x2_t line4_12 = vuzpq_f32(line4_1, line4_2); float32x4_t input4_2 = vextq_f32(line4_01.val[0], line4_2, 1); float32x4_t input4_3 = vextq_f32(line4_0, line4_12.val[1], 3); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[20]), line4_01.val[0]); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[21]), line4_01.val[1]); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[22]), input4_2); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[23]), input4_3); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[24]), line4_12.val[0]); sum0 = vector_activation(sum0, activation); vst1q_f32(output_buf_c + w + 1, sum0); } for (w = mid_w_block; w < mid_w; w++) { tmp = bias_c; tmp += weight_buf_c[10] * input_buf_c[2 * w]; tmp += weight_buf_c[11] * input_buf_c[2 * w + 1]; tmp += weight_buf_c[12] * input_buf_c[2 * w + 2]; tmp += weight_buf_c[13] * input_buf_c[2 * w + 3]; tmp += weight_buf_c[14] * input_buf_c[2 * w + 4]; tmp += weight_buf_c[15] * input_buf_c[input_w + 2 * w]; tmp += weight_buf_c[16] * input_buf_c[input_w + 2 * w + 1]; tmp += weight_buf_c[17] * input_buf_c[input_w + 2 * w + 2]; tmp += weight_buf_c[18] * input_buf_c[input_w + 2 * w + 3]; tmp += weight_buf_c[19] * input_buf_c[input_w + 2 * w + 4]; tmp += weight_buf_c[20] * input_buf_c[input_w * 2 + 2 * w]; tmp += weight_buf_c[21] * input_buf_c[input_w * 2 + 2 * w + 1]; tmp += weight_buf_c[22] * input_buf_c[input_w * 2 + 2 * w + 2]; tmp += weight_buf_c[23] * input_buf_c[input_w * 2 + 2 * w + 3]; tmp += weight_buf_c[24] * input_buf_c[input_w * 2 + 2 * w + 4]; output_buf_c[w + 1] = elem_activation(tmp, activation); } if (w_remain) { tmp = bias_c; tmp += weight_buf_c[10] * input_buf_c[2 * w]; tmp += weight_buf_c[11] * input_buf_c[2 * w + 1]; tmp += weight_buf_c[12] * input_buf_c[2 * w + 2]; tmp += weight_buf_c[15] * input_buf_c[input_w + 2 * w]; tmp += weight_buf_c[16] * input_buf_c[input_w + 2 * w + 1]; tmp += weight_buf_c[17] * input_buf_c[input_w + 2 * w + 2]; tmp += weight_buf_c[20] * input_buf_c[input_w * 2 + 2 * w]; tmp += weight_buf_c[21] * input_buf_c[input_w * 2 + 2 * w + 1]; tmp += weight_buf_c[22] * input_buf_c[input_w * 2 + 2 * w + 2]; output_buf_c[w + 1] = elem_activation(tmp, activation); } else { tmp = bias_c; tmp += weight_buf_c[10] * input_buf_c[2 * w]; tmp += weight_buf_c[11] * input_buf_c[2 * w + 1]; tmp += weight_buf_c[12] * input_buf_c[2 * w + 2]; tmp += weight_buf_c[13] * input_buf_c[2 * w + 3]; tmp += weight_buf_c[15] * input_buf_c[input_w + 2 * w]; tmp += weight_buf_c[16] * input_buf_c[input_w + 2 * w + 1]; tmp += weight_buf_c[17] * input_buf_c[input_w + 2 * w + 2]; tmp += weight_buf_c[18] * input_buf_c[input_w + 2 * w + 3]; tmp += weight_buf_c[20] * input_buf_c[input_w * 2 + 2 * w]; tmp += weight_buf_c[21] * input_buf_c[input_w * 2 + 2 * w + 1]; tmp += weight_buf_c[22] * input_buf_c[input_w * 2 + 2 * w + 2]; tmp += weight_buf_c[23] * input_buf_c[input_w * 2 + 2 * w + 3]; output_buf_c[w + 1] = elem_activation(tmp, activation); } // mid height for (h = 0; h < mid_h; h++) { tmp = bias_c; tmp += weight_buf_c[2] * input_buf_c[input_w * 2 * h]; tmp += weight_buf_c[3] * input_buf_c[input_w * 2 * h + 1]; tmp += weight_buf_c[4] * input_buf_c[input_w * 2 * h + 2]; tmp += weight_buf_c[7] * input_buf_c[input_w * (2 * h + 1)]; tmp += weight_buf_c[8] * input_buf_c[input_w * (2 * h + 1) + 1]; tmp += weight_buf_c[9] * input_buf_c[input_w * (2 * h + 1) + 2]; tmp += weight_buf_c[12] * input_buf_c[input_w * (2 * h + 2)]; tmp += weight_buf_c[13] * input_buf_c[input_w * (2 * h + 2) + 1]; tmp += weight_buf_c[14] * input_buf_c[input_w * (2 * h + 2) + 2]; tmp += weight_buf_c[17] * input_buf_c[input_w * (2 * h + 3)]; tmp += weight_buf_c[18] * input_buf_c[input_w * (2 * h + 3) + 1]; tmp += weight_buf_c[19] * input_buf_c[input_w * (2 * h + 3) + 2]; tmp += weight_buf_c[22] * input_buf_c[input_w * (2 * h + 4)]; tmp += weight_buf_c[23] * input_buf_c[input_w * (2 * h + 4) + 1]; tmp += weight_buf_c[24] * input_buf_c[input_w * (2 * h + 4) + 2]; output_buf_c[output_w * (h + 1)] = elem_activation(tmp, activation); for (w = 0; w < mid_w_block; w += 4) { float32x4_t sum0 = vdupq_n_f32(bias_c); float32x4_t line0_0 = vld1q_f32(input_buf_c + input_w * 2 * h + 2 * w); float32x4_t line0_1 = vld1q_f32(input_buf_c + input_w * 2 * h + 2 * w + 4); float32x4_t line0_2 = vld1q_f32(input_buf_c + input_w * 2 * h + 2 * w + 8); float32x4x2_t line0_01 = vuzpq_f32(line0_0, line0_1); float32x4x2_t line0_12 = vuzpq_f32(line0_1, line0_2); float32x4_t input0_2 = vextq_f32(line0_01.val[0], line0_2, 1); float32x4_t input0_3 = vextq_f32(line0_0, line0_12.val[1], 3); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[0]), line0_01.val[0]); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[1]), line0_01.val[1]); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[2]), input0_2); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[3]), input0_3); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[4]), line0_12.val[0]); float32x4_t line1_0 = vld1q_f32(input_buf_c + input_w * (2 * h + 1) + 2 * w); float32x4_t line1_1 = vld1q_f32(input_buf_c + input_w * (2 * h + 1) + 2 * w + 4); float32x4_t line1_2 = vld1q_f32(input_buf_c + input_w * (2 * h + 1) + 2 * w + 8); float32x4x2_t line1_01 = vuzpq_f32(line1_0, line1_1); float32x4x2_t line1_12 = vuzpq_f32(line1_1, line1_2); float32x4_t input1_2 = vextq_f32(line1_01.val[0], line1_2, 1); float32x4_t input1_3 = vextq_f32(line1_0, line1_12.val[1], 3); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[5]), line1_01.val[0]); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[6]), line1_01.val[1]); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[7]), input1_2); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[8]), input1_3); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[9]), line1_12.val[0]); float32x4_t line2_0 = vld1q_f32(input_buf_c + input_w * (2 * h + 2) + 2 * w); float32x4_t line2_1 = vld1q_f32(input_buf_c + input_w * (2 * h + 2) + 2 * w + 4); float32x4_t line2_2 = vld1q_f32(input_buf_c + input_w * (2 * h + 2) + 2 * w + 8); float32x4x2_t line2_01 = vuzpq_f32(line2_0, line2_1); float32x4x2_t line2_12 = vuzpq_f32(line2_1, line2_2); float32x4_t input2_2 = vextq_f32(line2_01.val[0], line2_2, 1); float32x4_t input2_3 = vextq_f32(line2_0, line2_12.val[1], 3); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[10]), line2_01.val[0]); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[11]), line2_01.val[1]); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[12]), input2_2); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[13]), input2_3); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[14]), line2_12.val[0]); float32x4_t line3_0 = vld1q_f32(input_buf_c + input_w * (2 * h + 3) + 2 * w); float32x4_t line3_1 = vld1q_f32(input_buf_c + input_w * (2 * h + 3) + 2 * w + 4); float32x4_t line3_2 = vld1q_f32(input_buf_c + input_w * (2 * h + 3) + 2 * w + 8); float32x4x2_t line3_01 = vuzpq_f32(line3_0, line3_1); float32x4x2_t line3_12 = vuzpq_f32(line3_1, line3_2); float32x4_t input3_2 = vextq_f32(line3_01.val[0], line3_2, 1); float32x4_t input3_3 = vextq_f32(line3_0, line3_12.val[1], 3); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[15]), line3_01.val[0]); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[16]), line3_01.val[1]); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[17]), input3_2); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[18]), input3_3); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[19]), line3_12.val[0]); float32x4_t line4_0 = vld1q_f32(input_buf_c + input_w * (2 * h + 4) + 2 * w); float32x4_t line4_1 = vld1q_f32(input_buf_c + input_w * (2 * h + 4) + 2 * w + 4); float32x4_t line4_2 = vld1q_f32(input_buf_c + input_w * (2 * h + 4) + 2 * w + 8); float32x4x2_t line4_01 = vuzpq_f32(line4_0, line4_1); float32x4x2_t line4_12 = vuzpq_f32(line4_1, line4_2); float32x4_t input4_2 = vextq_f32(line4_01.val[0], line4_2, 1); float32x4_t input4_3 = vextq_f32(line4_0, line4_12.val[1], 3); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[20]), line4_01.val[0]); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[21]), line4_01.val[1]); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[22]), input4_2); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[23]), input4_3); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[24]), line4_12.val[0]); sum0 = vector_activation(sum0, activation); vst1q_f32(output_buf_c + output_w * (h + 1) + w + 1, sum0); } for (w = mid_w_block; w < mid_w; w++) { tmp = bias_c; tmp += weight_buf_c[0] * input_buf_c[input_w * 2 * h + 2 * w]; tmp += weight_buf_c[1] * input_buf_c[input_w * 2 * h + 2 * w + 1]; tmp += weight_buf_c[2] * input_buf_c[input_w * 2 * h + 2 * w + 2]; tmp += weight_buf_c[3] * input_buf_c[input_w * 2 * h + 2 * w + 3]; tmp += weight_buf_c[4] * input_buf_c[input_w * 2 * h + 2 * w + 4]; tmp += weight_buf_c[5] * input_buf_c[input_w * (2 * h + 1) + 2 * w]; tmp += weight_buf_c[6] * input_buf_c[input_w * (2 * h + 1) + 2 * w + 1]; tmp += weight_buf_c[7] * input_buf_c[input_w * (2 * h + 1) + 2 * w + 2]; tmp += weight_buf_c[8] * input_buf_c[input_w * (2 * h + 1) + 2 * w + 3]; tmp += weight_buf_c[9] * input_buf_c[input_w * (2 * h + 1) + 2 * w + 4]; tmp += weight_buf_c[10] * input_buf_c[input_w * (2 * h + 2) + 2 * w]; tmp += weight_buf_c[11] * input_buf_c[input_w * (2 * h + 2) + 2 * w + 1]; tmp += weight_buf_c[12] * input_buf_c[input_w * (2 * h + 2) + 2 * w + 2]; tmp += weight_buf_c[13] * input_buf_c[input_w * (2 * h + 2) + 2 * w + 3]; tmp += weight_buf_c[14] * input_buf_c[input_w * (2 * h + 2) + 2 * w + 4]; tmp += weight_buf_c[15] * input_buf_c[input_w * (2 * h + 3) + 2 * w]; tmp += weight_buf_c[16] * input_buf_c[input_w * (2 * h + 3) + 2 * w + 1]; tmp += weight_buf_c[17] * input_buf_c[input_w * (2 * h + 3) + 2 * w + 2]; tmp += weight_buf_c[18] * input_buf_c[input_w * (2 * h + 3) + 2 * w + 3]; tmp += weight_buf_c[19] * input_buf_c[input_w * (2 * h + 3) + 2 * w + 4]; tmp += weight_buf_c[20] * input_buf_c[input_w * (2 * h + 4) + 2 * w]; tmp += weight_buf_c[21] * input_buf_c[input_w * (2 * h + 4) + 2 * w + 1]; tmp += weight_buf_c[22] * input_buf_c[input_w * (2 * h + 4) + 2 * w + 2]; tmp += weight_buf_c[23] * input_buf_c[input_w * (2 * h + 4) + 2 * w + 3]; tmp += weight_buf_c[24] * input_buf_c[input_w * (2 * h + 4) + 2 * w + 4]; output_buf_c[output_w * (h + 1) + w + 1] = elem_activation(tmp, activation); } if (w_remain) { tmp = bias_c; tmp += weight_buf_c[0] * input_buf_c[input_w * 2 * h + 2 * w]; tmp += weight_buf_c[1] * input_buf_c[input_w * 2 * h + 2 * w + 1]; tmp += weight_buf_c[2] * input_buf_c[input_w * 2 * h + 2 * w + 2]; tmp += weight_buf_c[5] * input_buf_c[input_w * (2 * h + 1) + 2 * w]; tmp += weight_buf_c[6] * input_buf_c[input_w * (2 * h + 1) + 2 * w + 1]; tmp += weight_buf_c[7] * input_buf_c[input_w * (2 * h + 1) + 2 * w + 2]; tmp += weight_buf_c[10] * input_buf_c[input_w * (2 * h + 2) + 2 * w]; tmp += weight_buf_c[11] * input_buf_c[input_w * (2 * h + 2) + 2 * w + 1]; tmp += weight_buf_c[12] * input_buf_c[input_w * (2 * h + 2) + 2 * w + 2]; tmp += weight_buf_c[15] * input_buf_c[input_w * (2 * h + 3) + 2 * w]; tmp += weight_buf_c[16] * input_buf_c[input_w * (2 * h + 3) + 2 * w + 1]; tmp += weight_buf_c[17] * input_buf_c[input_w * (2 * h + 3) + 2 * w + 2]; tmp += weight_buf_c[20] * input_buf_c[input_w * (2 * h + 4) + 2 * w]; tmp += weight_buf_c[21] * input_buf_c[input_w * (2 * h + 4) + 2 * w + 1]; tmp += weight_buf_c[22] * input_buf_c[input_w * (2 * h + 4) + 2 * w + 2]; output_buf_c[output_w * (h + 2) - 1] = elem_activation(tmp, activation); } else { tmp = bias_c; tmp += weight_buf_c[0] * input_buf_c[input_w * 2 * h + 2 * w]; tmp += weight_buf_c[1] * input_buf_c[input_w * 2 * h + 2 * w + 1]; tmp += weight_buf_c[2] * input_buf_c[input_w * 2 * h + 2 * w + 2]; tmp += weight_buf_c[3] * input_buf_c[input_w * 2 * h + 2 * w + 3]; tmp += weight_buf_c[5] * input_buf_c[input_w * (2 * h + 1) + 2 * w]; tmp += weight_buf_c[6] * input_buf_c[input_w * (2 * h + 1) + 2 * w + 1]; tmp += weight_buf_c[7] * input_buf_c[input_w * (2 * h + 1) + 2 * w + 2]; tmp += weight_buf_c[8] * input_buf_c[input_w * (2 * h + 1) + 2 * w + 3]; tmp += weight_buf_c[10] * input_buf_c[input_w * (2 * h + 2) + 2 * w]; tmp += weight_buf_c[11] * input_buf_c[input_w * (2 * h + 2) + 2 * w + 1]; tmp += weight_buf_c[12] * input_buf_c[input_w * (2 * h + 2) + 2 * w + 2]; tmp += weight_buf_c[13] * input_buf_c[input_w * (2 * h + 2) + 2 * w + 3]; tmp += weight_buf_c[15] * input_buf_c[input_w * (2 * h + 3) + 2 * w]; tmp += weight_buf_c[16] * input_buf_c[input_w * (2 * h + 3) + 2 * w + 1]; tmp += weight_buf_c[17] * input_buf_c[input_w * (2 * h + 3) + 2 * w + 2]; tmp += weight_buf_c[18] * input_buf_c[input_w * (2 * h + 3) + 2 * w + 3]; tmp += weight_buf_c[20] * input_buf_c[input_w * (2 * h + 4) + 2 * w]; tmp += weight_buf_c[21] * input_buf_c[input_w * (2 * h + 4) + 2 * w + 1]; tmp += weight_buf_c[22] * input_buf_c[input_w * (2 * h + 4) + 2 * w + 2]; tmp += weight_buf_c[23] * input_buf_c[input_w * (2 * h + 4) + 2 * w + 3]; output_buf_c[output_w * (h + 2) - 1] = elem_activation(tmp, activation); } } if (h_remain) { tmp = bias_c; tmp += weight_buf_c[2] * input_buf_c[input_w * (input_h - 3)]; tmp += weight_buf_c[3] * input_buf_c[input_w * (input_h - 3) + 1]; tmp += weight_buf_c[4] * input_buf_c[input_w * (input_h - 3) + 2]; tmp += weight_buf_c[7] * input_buf_c[input_w * (input_h - 2)]; tmp += weight_buf_c[8] * input_buf_c[input_w * (input_h - 2) + 1]; tmp += weight_buf_c[9] * input_buf_c[input_w * (input_h - 2) + 2]; tmp += weight_buf_c[12] * input_buf_c[input_w * (input_h - 1)]; tmp += weight_buf_c[13] * input_buf_c[input_w * (input_h - 1) + 1]; tmp += weight_buf_c[14] * input_buf_c[input_w * (input_h - 1) + 2]; output_buf_c[output_w * (output_h - 1)] = elem_activation(tmp, activation); for (w = 0; w < mid_w_block; w += 4) { float32x4_t sum0 = vdupq_n_f32(bias_c); float32x4_t line0_0 = vld1q_f32(input_buf_c + input_w * (input_h - 3) + 2 * w); float32x4_t line0_1 = vld1q_f32(input_buf_c + input_w * (input_h - 3) + 2 * w + 4); float32x4_t line0_2 = vld1q_f32(input_buf_c + input_w * (input_h - 3) + 2 * w + 8); float32x4x2_t line0_01 = vuzpq_f32(line0_0, line0_1); float32x4x2_t line0_12 = vuzpq_f32(line0_1, line0_2); float32x4_t input0_2 = vextq_f32(line0_01.val[0], line0_2, 1); float32x4_t input0_3 = vextq_f32(line0_0, line0_12.val[1], 3); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[0]), line0_01.val[0]); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[1]), line0_01.val[1]); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[2]), input0_2); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[3]), input0_3); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[4]), line0_12.val[0]); float32x4_t line1_0 = vld1q_f32(input_buf_c + input_w * (input_h - 2) + 2 * w); float32x4_t line1_1 = vld1q_f32(input_buf_c + input_w * (input_h - 2) + 2 * w + 4); float32x4_t line1_2 = vld1q_f32(input_buf_c + input_w * (input_h - 2) + 2 * w + 8); float32x4x2_t line1_01 = vuzpq_f32(line1_0, line1_1); float32x4x2_t line1_12 = vuzpq_f32(line1_1, line1_2); float32x4_t input1_2 = vextq_f32(line1_01.val[0], line1_2, 1); float32x4_t input1_3 = vextq_f32(line1_0, line1_12.val[1], 3); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[5]), line1_01.val[0]); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[6]), line1_01.val[1]); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[7]), input1_2); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[8]), input1_3); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[9]), line1_12.val[0]); float32x4_t line2_0 = vld1q_f32(input_buf_c + input_w * (input_h - 1) + 2 * w); float32x4_t line2_1 = vld1q_f32(input_buf_c + input_w * (input_h - 1) + 2 * w + 4); float32x4_t line2_2 = vld1q_f32(input_buf_c + input_w * (input_h - 1) + 2 * w + 8); float32x4x2_t line2_01 = vuzpq_f32(line2_0, line2_1); float32x4x2_t line2_12 = vuzpq_f32(line2_1, line2_2); float32x4_t input2_2 = vextq_f32(line2_01.val[0], line2_2, 1); float32x4_t input2_3 = vextq_f32(line2_0, line2_12.val[1], 3); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[10]), line2_01.val[0]); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[11]), line2_01.val[1]); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[12]), input2_2); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[13]), input2_3); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[14]), line2_12.val[0]); sum0 = vector_activation(sum0, activation); vst1q_f32(output_buf_c + output_w * (output_h - 1) + w + 1, sum0); } for (w = mid_w_block; w < mid_w; w++) { tmp = bias_c; tmp += weight_buf_c[0] * input_buf_c[input_w * (input_h - 3) + 2 * w]; tmp += weight_buf_c[1] * input_buf_c[input_w * (input_h - 3) + 2 * w + 1]; tmp += weight_buf_c[2] * input_buf_c[input_w * (input_h - 3) + 2 * w + 2]; tmp += weight_buf_c[3] * input_buf_c[input_w * (input_h - 3) + 2 * w + 3]; tmp += weight_buf_c[4] * input_buf_c[input_w * (input_h - 3) + 2 * w + 4]; tmp += weight_buf_c[5] * input_buf_c[input_w * (input_h - 2) + 2 * w]; tmp += weight_buf_c[6] * input_buf_c[input_w * (input_h - 2) + 2 * w + 1]; tmp += weight_buf_c[7] * input_buf_c[input_w * (input_h - 2) + 2 * w + 2]; tmp += weight_buf_c[8] * input_buf_c[input_w * (input_h - 2) + 2 * w + 3]; tmp += weight_buf_c[9] * input_buf_c[input_w * (input_h - 2) + 2 * w + 4]; tmp += weight_buf_c[10] * input_buf_c[input_w * (input_h - 1) + 2 * w]; tmp += weight_buf_c[11] * input_buf_c[input_w * (input_h - 1) + 2 * w + 1]; tmp += weight_buf_c[12] * input_buf_c[input_w * (input_h - 1) + 2 * w + 2]; tmp += weight_buf_c[13] * input_buf_c[input_w * (input_h - 1) + 2 * w + 3]; tmp += weight_buf_c[14] * input_buf_c[input_w * (input_h - 1) + 2 * w + 4]; output_buf_c[output_w * (output_h - 1) + w + 1] = elem_activation(tmp, activation); } if (w_remain) { tmp = bias_c; tmp += weight_buf_c[0] * input_buf_c[input_w * (input_h - 3) + 2 * w]; tmp += weight_buf_c[1] * input_buf_c[input_w * (input_h - 3) + 2 * w + 1]; tmp += weight_buf_c[2] * input_buf_c[input_w * (input_h - 3) + 2 * w + 2]; tmp += weight_buf_c[5] * input_buf_c[input_w * (input_h - 2) + 2 * w]; tmp += weight_buf_c[6] * input_buf_c[input_w * (input_h - 2) + 2 * w + 1]; tmp += weight_buf_c[7] * input_buf_c[input_w * (input_h - 2) + 2 * w + 2]; tmp += weight_buf_c[10] * input_buf_c[input_w * (input_h - 1) + 2 * w]; tmp += weight_buf_c[11] * input_buf_c[input_w * (input_h - 1) + 2 * w + 1]; tmp += weight_buf_c[12] * input_buf_c[input_w * (input_h - 1) + 2 * w + 2]; output_buf_c[output_hw - 1] = elem_activation(tmp, activation); } else { tmp = bias_c; tmp += weight_buf_c[0] * input_buf_c[input_w * (input_h - 3) + 2 * w]; tmp += weight_buf_c[1] * input_buf_c[input_w * (input_h - 3) + 2 * w + 1]; tmp += weight_buf_c[2] * input_buf_c[input_w * (input_h - 3) + 2 * w + 2]; tmp += weight_buf_c[3] * input_buf_c[input_w * (input_h - 3) + 2 * w + 3]; tmp += weight_buf_c[5] * input_buf_c[input_w * (input_h - 2) + 2 * w]; tmp += weight_buf_c[6] * input_buf_c[input_w * (input_h - 2) + 2 * w + 1]; tmp += weight_buf_c[7] * input_buf_c[input_w * (input_h - 2) + 2 * w + 2]; tmp += weight_buf_c[8] * input_buf_c[input_w * (input_h - 2) + 2 * w + 3]; tmp += weight_buf_c[10] * input_buf_c[input_w * (input_h - 1) + 2 * w]; tmp += weight_buf_c[11] * input_buf_c[input_w * (input_h - 1) + 2 * w + 1]; tmp += weight_buf_c[12] * input_buf_c[input_w * (input_h - 1) + 2 * w + 2]; tmp += weight_buf_c[13] * input_buf_c[input_w * (input_h - 1) + 2 * w + 3]; output_buf_c[output_hw - 1] = elem_activation(tmp, activation); } } else { tmp = bias_c; tmp += weight_buf_c[2] * input_buf_c[input_w * (input_h - 4)]; tmp += weight_buf_c[3] * input_buf_c[input_w * (input_h - 4) + 1]; tmp += weight_buf_c[4] * input_buf_c[input_w * (input_h - 4) + 2]; tmp += weight_buf_c[7] * input_buf_c[input_w * (input_h - 3)]; tmp += weight_buf_c[8] * input_buf_c[input_w * (input_h - 3) + 1]; tmp += weight_buf_c[9] * input_buf_c[input_w * (input_h - 3) + 2]; tmp += weight_buf_c[12] * input_buf_c[input_w * (input_h - 2)]; tmp += weight_buf_c[13] * input_buf_c[input_w * (input_h - 2) + 1]; tmp += weight_buf_c[14] * input_buf_c[input_w * (input_h - 2) + 2]; tmp += weight_buf_c[17] * input_buf_c[input_w * (input_h - 1)]; tmp += weight_buf_c[18] * input_buf_c[input_w * (input_h - 1) + 1]; tmp += weight_buf_c[19] * input_buf_c[input_w * (input_h - 1) + 2]; output_buf_c[output_w * (output_h - 1)] = elem_activation(tmp, activation); for (w = 0; w < mid_w_block; w += 4) { float32x4_t sum0 = vdupq_n_f32(bias_c); float32x4_t line0_0 = vld1q_f32(input_buf_c + input_w * (input_h - 4) + 2 * w); float32x4_t line0_1 = vld1q_f32(input_buf_c + input_w * (input_h - 4) + 2 * w + 4); float32x4_t line0_2 = vld1q_f32(input_buf_c + input_w * (input_h - 4) + 2 * w + 8); float32x4x2_t line0_01 = vuzpq_f32(line0_0, line0_1); float32x4x2_t line0_12 = vuzpq_f32(line0_1, line0_2); float32x4_t input0_2 = vextq_f32(line0_01.val[0], line0_2, 1); float32x4_t input0_3 = vextq_f32(line0_0, line0_12.val[1], 3); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[0]), line0_01.val[0]); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[1]), line0_01.val[1]); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[2]), input0_2); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[3]), input0_3); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[4]), line0_12.val[0]); float32x4_t line1_0 = vld1q_f32(input_buf_c + input_w * (input_h - 3) + 2 * w); float32x4_t line1_1 = vld1q_f32(input_buf_c + input_w * (input_h - 3) + 2 * w + 4); float32x4_t line1_2 = vld1q_f32(input_buf_c + input_w * (input_h - 3) + 2 * w + 8); float32x4x2_t line1_01 = vuzpq_f32(line1_0, line1_1); float32x4x2_t line1_12 = vuzpq_f32(line1_1, line1_2); float32x4_t input1_2 = vextq_f32(line1_01.val[0], line1_2, 1); float32x4_t input1_3 = vextq_f32(line1_0, line1_12.val[1], 3); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[5]), line1_01.val[0]); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[6]), line1_01.val[1]); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[7]), input1_2); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[8]), input1_3); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[9]), line1_12.val[0]); float32x4_t line2_0 = vld1q_f32(input_buf_c + input_w * (input_h - 2) + 2 * w); float32x4_t line2_1 = vld1q_f32(input_buf_c + input_w * (input_h - 2) + 2 * w + 4); float32x4_t line2_2 = vld1q_f32(input_buf_c + input_w * (input_h - 2) + 2 * w + 8); float32x4x2_t line2_01 = vuzpq_f32(line2_0, line2_1); float32x4x2_t line2_12 = vuzpq_f32(line2_1, line2_2); float32x4_t input2_2 = vextq_f32(line2_01.val[0], line2_2, 1); float32x4_t input2_3 = vextq_f32(line2_0, line2_12.val[1], 3); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[10]), line2_01.val[0]); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[11]), line2_01.val[1]); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[12]), input2_2); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[13]), input2_3); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[14]), line2_12.val[0]); float32x4_t line3_0 = vld1q_f32(input_buf_c + input_w * (input_h - 1) + 2 * w); float32x4_t line3_1 = vld1q_f32(input_buf_c + input_w * (input_h - 1) + 2 * w + 4); float32x4_t line3_2 = vld1q_f32(input_buf_c + input_w * (input_h - 1) + 2 * w + 8); float32x4x2_t line3_01 = vuzpq_f32(line3_0, line3_1); float32x4x2_t line3_12 = vuzpq_f32(line3_1, line3_2); float32x4_t input3_2 = vextq_f32(line3_01.val[0], line3_2, 1); float32x4_t input3_3 = vextq_f32(line3_0, line3_12.val[1], 3); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[15]), line3_01.val[0]); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[16]), line3_01.val[1]); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[17]), input3_2); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[18]), input3_3); sum0 = vmlaq_f32(sum0, vdupq_n_f32(weight_buf_c[19]), line3_12.val[0]); sum0 = vector_activation(sum0, activation); vst1q_f32(output_buf_c + output_w * (output_h - 1) + w + 1, sum0); } for (w = mid_w_block; w < mid_w; w++) { tmp = bias_c; tmp += weight_buf_c[0] * input_buf_c[input_w * (input_h - 4) + 2 * w]; tmp += weight_buf_c[1] * input_buf_c[input_w * (input_h - 4) + 2 * w + 1]; tmp += weight_buf_c[2] * input_buf_c[input_w * (input_h - 4) + 2 * w + 2]; tmp += weight_buf_c[3] * input_buf_c[input_w * (input_h - 4) + 2 * w + 3]; tmp += weight_buf_c[4] * input_buf_c[input_w * (input_h - 4) + 2 * w + 4]; tmp += weight_buf_c[5] * input_buf_c[input_w * (input_h - 3) + 2 * w]; tmp += weight_buf_c[6] * input_buf_c[input_w * (input_h - 3) + 2 * w + 1]; tmp += weight_buf_c[7] * input_buf_c[input_w * (input_h - 3) + 2 * w + 2]; tmp += weight_buf_c[8] * input_buf_c[input_w * (input_h - 3) + 2 * w + 3]; tmp += weight_buf_c[9] * input_buf_c[input_w * (input_h - 3) + 2 * w + 4]; tmp += weight_buf_c[10] * input_buf_c[input_w * (input_h - 2) + 2 * w]; tmp += weight_buf_c[11] * input_buf_c[input_w * (input_h - 2) + 2 * w + 1]; tmp += weight_buf_c[12] * input_buf_c[input_w * (input_h - 2) + 2 * w + 2]; tmp += weight_buf_c[13] * input_buf_c[input_w * (input_h - 2) + 2 * w + 3]; tmp += weight_buf_c[14] * input_buf_c[input_w * (input_h - 2) + 2 * w + 4]; tmp += weight_buf_c[15] * input_buf_c[input_w * (input_h - 1) + 2 * w]; tmp += weight_buf_c[16] * input_buf_c[input_w * (input_h - 1) + 2 * w + 1]; tmp += weight_buf_c[17] * input_buf_c[input_w * (input_h - 1) + 2 * w + 2]; tmp += weight_buf_c[18] * input_buf_c[input_w * (input_h - 1) + 2 * w + 3]; tmp += weight_buf_c[19] * input_buf_c[input_w * (input_h - 1) + 2 * w + 4]; output_buf_c[output_w * (output_h - 1) + w + 1] = elem_activation(tmp, activation); } if (w_remain) { tmp = bias_c; tmp += weight_buf_c[0] * input_buf_c[input_w * (input_h - 4) + 2 * w]; tmp += weight_buf_c[1] * input_buf_c[input_w * (input_h - 4) + 2 * w + 1]; tmp += weight_buf_c[2] * input_buf_c[input_w * (input_h - 4) + 2 * w + 2]; tmp += weight_buf_c[5] * input_buf_c[input_w * (input_h - 3) + 2 * w]; tmp += weight_buf_c[6] * input_buf_c[input_w * (input_h - 3) + 2 * w + 1]; tmp += weight_buf_c[7] * input_buf_c[input_w * (input_h - 3) + 2 * w + 2]; tmp += weight_buf_c[10] * input_buf_c[input_w * (input_h - 2) + 2 * w]; tmp += weight_buf_c[11] * input_buf_c[input_w * (input_h - 2) + 2 * w + 1]; tmp += weight_buf_c[12] * input_buf_c[input_w * (input_h - 2) + 2 * w + 2]; tmp += weight_buf_c[15] * input_buf_c[input_w * (input_h - 1) + 2 * w]; tmp += weight_buf_c[16] * input_buf_c[input_w * (input_h - 1) + 2 * w + 1]; tmp += weight_buf_c[17] * input_buf_c[input_w * (input_h - 1) + 2 * w + 2]; output_buf_c[output_hw - 1] = elem_activation(tmp, activation); } else { tmp = bias_c; tmp += weight_buf_c[0] * input_buf_c[input_w * (input_h - 4) + 2 * w]; tmp += weight_buf_c[1] * input_buf_c[input_w * (input_h - 4) + 2 * w + 1]; tmp += weight_buf_c[2] * input_buf_c[input_w * (input_h - 4) + 2 * w + 2]; tmp += weight_buf_c[3] * input_buf_c[input_w * (input_h - 4) + 2 * w + 3]; tmp += weight_buf_c[5] * input_buf_c[input_w * (input_h - 3) + 2 * w]; tmp += weight_buf_c[6] * input_buf_c[input_w * (input_h - 3) + 2 * w + 1]; tmp += weight_buf_c[7] * input_buf_c[input_w * (input_h - 3) + 2 * w + 2]; tmp += weight_buf_c[8] * input_buf_c[input_w * (input_h - 3) + 2 * w + 3]; tmp += weight_buf_c[10] * input_buf_c[input_w * (input_h - 2) + 2 * w]; tmp += weight_buf_c[11] * input_buf_c[input_w * (input_h - 2) + 2 * w + 1]; tmp += weight_buf_c[12] * input_buf_c[input_w * (input_h - 2) + 2 * w + 2]; tmp += weight_buf_c[13] * input_buf_c[input_w * (input_h - 2) + 2 * w + 3]; tmp += weight_buf_c[15] * input_buf_c[input_w * (input_h - 1) + 2 * w]; tmp += weight_buf_c[16] * input_buf_c[input_w * (input_h - 1) + 2 * w + 1]; tmp += weight_buf_c[17] * input_buf_c[input_w * (input_h - 1) + 2 * w + 2]; tmp += weight_buf_c[18] * input_buf_c[input_w * (input_h - 1) + 2 * w + 3]; output_buf_c[output_hw - 1] = elem_activation(tmp, activation); } } } } void depthwise_conv_k7s1(float* input, float* weight, float* bias, float* output, int input_h, int input_w, int channel, int output_h, int output_w, int activation, int num_thread) { int channel_size = input_h * input_w; int mid_w = input_w - 6; int mid_block = mid_w >> 2; int mid_h = input_h - 6; int w = 0; // #pragma omp parallel for num_threads(num_thread) for (int c = 0; c < channel; c++) { float tmp0, tmp1, tmp2; float* input_1 = input + c * channel_size; float* input_2 = input_1 + input_w; float* input_3 = input_1 + input_w * 2; float* input_4 = input_1 + input_w * 3; float* input_5 = input_1 + input_w * 4; float* input_6 = input_1 + input_w * 5; float* input_7 = input_1 + input_w * 6; float* output_buf = output + c * channel_size; float* output_buf_1 = output_buf + output_w; float* output_buf_2 = output_buf_1 + output_w; float* weight_buf = weight + c * 49; float bias_c = bias ? bias[c] : 0; float32x4_t kernel_0_3 = vld1q_f32(weight_buf); float32x4_t kernel_4_7 = vld1q_f32(weight_buf + 4); float32x4_t kernel_8_11 = vld1q_f32(weight_buf + 8); float32x4_t kernel_12_15 = vld1q_f32(weight_buf + 12); float32x4_t kernel_16_19 = vld1q_f32(weight_buf + 16); float32x4_t kernel_20_23 = vld1q_f32(weight_buf + 20); float32x4_t kernel_24_27 = vld1q_f32(weight_buf + 24); float32x4_t kernel_28_31 = vld1q_f32(weight_buf + 28); float32x4_t kernel_32_35 = vld1q_f32(weight_buf + 32); float32x4_t kernel_36_39 = vld1q_f32(weight_buf + 36); float32x4_t kernel_40_43 = vld1q_f32(weight_buf + 40); float32x4_t kernel_44_47 = vld1q_f32(weight_buf + 44); float32x4_t kernel_48_51 = vld1q_f32(weight_buf + 48); float32x4_t line1 = vld1q_f32(input_1); float32x4_t line2 = vld1q_f32(input_2); float32x4_t line3 = vld1q_f32(input_3); float32x4_t line4 = vld1q_f32(input_4); float32x4_t line5 = vld1q_f32(input_5); float32x4_t line6 = vld1q_f32(input_6); float32x4_t kernel_10_13 = vextq_f32(kernel_8_11, kernel_12_15, 2); float32x4_t kernel_17_20 = vextq_f32(kernel_16_19, kernel_20_23, 1); float32x4_t kernel_31_34 = vextq_f32(kernel_28_31, kernel_32_35, 3); float32x4_t kernel_38_41 = vextq_f32(kernel_36_39, kernel_40_43, 2); float32x4_t kernel_45_48 = vextq_f32(kernel_44_47, kernel_48_51, 1); float32x4_t line1_1 = vld1q_f32(input_1 + 4); float32x4_t line2_1 = vld1q_f32(input_2 + 4); float32x4_t line3_1 = vld1q_f32(input_3 + 4); float32x4_t line4_1 = vld1q_f32(input_4 + 4); float32x4_t line5_1 = vld1q_f32(input_5 + 4); float32x4_t line6_1 = vld1q_f32(input_6 + 4); /* top start1 */ { float32x4_t tmp_4_0 = vmulq_f32(line1, kernel_24_27); tmp_4_0 = vmlaq_f32(tmp_4_0, line2, kernel_31_34); tmp_4_0 = vmlaq_f32(tmp_4_0, line3, kernel_38_41); tmp_4_0 = vmlaq_f32(tmp_4_0, line4, kernel_45_48); tmp0 = vgetq_lane_f32(tmp_4_0, 0) + vgetq_lane_f32(tmp_4_0, 1) + vgetq_lane_f32(tmp_4_0, 2) + vgetq_lane_f32(tmp_4_0, 3) + bias_c; *output_buf++ = elem_activation(tmp0, activation); float32x4_t tmp_4_1 = vmulq_f32(line1, kernel_17_20); tmp_4_1 = vmlaq_f32(tmp_4_1, line2, kernel_24_27); tmp_4_1 = vmlaq_f32(tmp_4_1, line3, kernel_31_34); tmp_4_1 = vmlaq_f32(tmp_4_1, line4, kernel_38_41); tmp_4_1 = vmlaq_f32(tmp_4_1, line5, kernel_45_48); tmp1 = vgetq_lane_f32(tmp_4_1, 0) + vgetq_lane_f32(tmp_4_1, 1) + vgetq_lane_f32(tmp_4_1, 2) + vgetq_lane_f32(tmp_4_1, 3) + bias_c; *output_buf_1++ = elem_activation(tmp1, activation); float32x4_t tmp_4_2 = vmulq_f32(line1, kernel_10_13); tmp_4_2 = vmlaq_f32(tmp_4_2, line2, kernel_17_20); tmp_4_2 = vmlaq_f32(tmp_4_2, line3, kernel_24_27); tmp_4_2 = vmlaq_f32(tmp_4_2, line4, kernel_31_34); tmp_4_2 = vmlaq_f32(tmp_4_2, line5, kernel_38_41); tmp_4_2 = vmlaq_f32(tmp_4_2, line6, kernel_45_48); tmp2 = vgetq_lane_f32(tmp_4_2, 0) + vgetq_lane_f32(tmp_4_2, 1) + vgetq_lane_f32(tmp_4_2, 2) + vgetq_lane_f32(tmp_4_2, 3) + bias_c; *output_buf_2++ = elem_activation(tmp2, activation); } float32x4_t kernel_9_12 = vextq_f32(kernel_8_11, kernel_12_15, 1); float32x4_t kernel_23_26 = vextq_f32(kernel_20_23, kernel_24_27, 3); float32x4_t kernel_30_33 = vextq_f32(kernel_28_31, kernel_32_35, 2); float32x4_t kernel_37_40 = vextq_f32(kernel_36_39, kernel_40_43, 1); /* top start2 */ { float32x4_t tmp_4_0 = vmulq_f32(line1, kernel_23_26); tmp_4_0 = vmlaq_f32(tmp_4_0, line2, kernel_30_33); tmp_4_0 = vmlaq_f32(tmp_4_0, line3, kernel_37_40); tmp_4_0 = vmlaq_f32(tmp_4_0, line4, kernel_44_47); tmp0 = vgetq_lane_f32(tmp_4_0, 0) + vgetq_lane_f32(tmp_4_0, 1) + vgetq_lane_f32(tmp_4_0, 2) + vgetq_lane_f32(tmp_4_0, 3) + bias_c; tmp0 += weight_buf[27] * input_1[4]; tmp0 += weight_buf[34] * input_2[4]; tmp0 += weight_buf[41] * input_3[4]; tmp0 += weight_buf[48] * input_4[4]; *output_buf++ = elem_activation(tmp0, activation); float32x4_t tmp_4_1 = vmulq_f32(line1, kernel_16_19); tmp_4_1 = vmlaq_f32(tmp_4_1, line2, kernel_23_26); tmp_4_1 = vmlaq_f32(tmp_4_1, line3, kernel_30_33); tmp_4_1 = vmlaq_f32(tmp_4_1, line4, kernel_37_40); tmp_4_1 = vmlaq_f32(tmp_4_1, line5, kernel_44_47); tmp1 = vgetq_lane_f32(tmp_4_1, 0) + vgetq_lane_f32(tmp_4_1, 1) + vgetq_lane_f32(tmp_4_1, 2) + vgetq_lane_f32(tmp_4_1, 3) + bias_c; tmp1 += weight_buf[20] * input_1[4]; tmp1 += weight_buf[27] * input_2[4]; tmp1 += weight_buf[34] * input_3[4]; tmp1 += weight_buf[41] * input_4[4]; tmp1 += weight_buf[48] * input_5[4]; *output_buf_1++ = elem_activation(tmp1, activation); float32x4_t tmp_4_2 = vmulq_f32(line1, kernel_9_12); tmp_4_2 = vmlaq_f32(tmp_4_2, line2, kernel_16_19); tmp_4_2 = vmlaq_f32(tmp_4_2, line3, kernel_23_26); tmp_4_2 = vmlaq_f32(tmp_4_2, line4, kernel_30_33); tmp_4_2 = vmlaq_f32(tmp_4_2, line5, kernel_37_40); tmp_4_2 = vmlaq_f32(tmp_4_2, line6, kernel_44_47); tmp2 = vgetq_lane_f32(tmp_4_2, 0) + vgetq_lane_f32(tmp_4_2, 1) + vgetq_lane_f32(tmp_4_2, 2) + vgetq_lane_f32(tmp_4_2, 3) + bias_c; tmp2 += weight_buf[13] * input_1[4]; tmp2 += weight_buf[20] * input_2[4]; tmp2 += weight_buf[27] * input_3[4]; tmp2 += weight_buf[34] * input_4[4]; tmp2 += weight_buf[41] * input_5[4]; tmp2 += weight_buf[48] * input_6[4]; *output_buf_2++ = elem_activation(tmp2, activation); } float32x4_t kernel_15_18 = vextq_f32(kernel_12_15, kernel_16_19, 3); float32x4_t kernel_22_25 = vextq_f32(kernel_20_23, kernel_24_27, 2); float32x4_t kernel_29_32 = vextq_f32(kernel_28_31, kernel_32_35, 1); float32x4_t kernel_43_46 = vextq_f32(kernel_40_43, kernel_44_47, 3); /* top start3 */ { float32x4_t tmp_4_0 = vmulq_f32(line1, kernel_22_25); tmp_4_0 = vmlaq_f32(tmp_4_0, line2, kernel_29_32); tmp_4_0 = vmlaq_f32(tmp_4_0, line3, kernel_36_39); tmp_4_0 = vmlaq_f32(tmp_4_0, line4, kernel_43_46); float32x2_t tmp_2_0 = vadd_f32(vget_low_f32(tmp_4_0), vget_high_f32(tmp_4_0)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line1_1), vget_high_f32(kernel_24_27)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line2_1), vget_high_f32(kernel_31_34)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line3_1), vget_high_f32(kernel_38_41)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line4_1), vget_high_f32(kernel_45_48)); tmp0 = vget_lane_f32(tmp_2_0, 0) + vget_lane_f32(tmp_2_0, 1) + bias_c; *output_buf++ = elem_activation(tmp0, activation); float32x4_t tmp_4_1 = vmulq_f32(line1, kernel_15_18); tmp_4_1 = vmlaq_f32(tmp_4_1, line2, kernel_22_25); tmp_4_1 = vmlaq_f32(tmp_4_1, line3, kernel_29_32); tmp_4_1 = vmlaq_f32(tmp_4_1, line4, kernel_36_39); tmp_4_1 = vmlaq_f32(tmp_4_1, line5, kernel_43_46); float32x2_t tmp_2_1 = vadd_f32(vget_low_f32(tmp_4_1), vget_high_f32(tmp_4_1)); tmp_2_1 = vmla_f32(tmp_2_1, vget_low_f32(line1_1), vget_high_f32(kernel_17_20)); tmp_2_1 = vmla_f32(tmp_2_1, vget_low_f32(line2_1), vget_high_f32(kernel_24_27)); tmp_2_1 = vmla_f32(tmp_2_1, vget_low_f32(line3_1), vget_high_f32(kernel_31_34)); tmp_2_1 = vmla_f32(tmp_2_1, vget_low_f32(line4_1), vget_high_f32(kernel_38_41)); tmp_2_1 = vmla_f32(tmp_2_1, vget_low_f32(line5_1), vget_high_f32(kernel_45_48)); tmp1 = vget_lane_f32(tmp_2_1, 0) + vget_lane_f32(tmp_2_1, 1) + bias_c; *output_buf_1++ = elem_activation(tmp1, activation); float32x4_t tmp_4_2 = vmulq_f32(line1, kernel_8_11); tmp_4_2 = vmlaq_f32(tmp_4_2, line2, kernel_15_18); tmp_4_2 = vmlaq_f32(tmp_4_2, line3, kernel_22_25); tmp_4_2 = vmlaq_f32(tmp_4_2, line4, kernel_29_32); tmp_4_2 = vmlaq_f32(tmp_4_2, line5, kernel_36_39); tmp_4_2 = vmlaq_f32(tmp_4_2, line6, kernel_43_46); float32x2_t tmp_2_2 = vadd_f32(vget_low_f32(tmp_4_2), vget_high_f32(tmp_4_2)); tmp_2_2 = vmla_f32(tmp_2_2, vget_low_f32(line1_1), vget_high_f32(kernel_10_13)); tmp_2_2 = vmla_f32(tmp_2_2, vget_low_f32(line2_1), vget_high_f32(kernel_17_20)); tmp_2_2 = vmla_f32(tmp_2_2, vget_low_f32(line3_1), vget_high_f32(kernel_24_27)); tmp_2_2 = vmla_f32(tmp_2_2, vget_low_f32(line4_1), vget_high_f32(kernel_31_34)); tmp_2_2 = vmla_f32(tmp_2_2, vget_low_f32(line5_1), vget_high_f32(kernel_38_41)); tmp_2_2 = vmla_f32(tmp_2_2, vget_low_f32(line6_1), vget_high_f32(kernel_45_48)); tmp2 = vget_lane_f32(tmp_2_2, 0) + vget_lane_f32(tmp_2_2, 1) + bias_c; *output_buf_2++ = elem_activation(tmp2, activation); } float32x4_t line1_2; float32x4_t line2_2; float32x4_t line3_2; float32x4_t line4_2; float32x4_t line5_2; float32x4_t line6_2; /* top mid */ for (w = 0; w < mid_block; w++) { line1_2 = vld1q_f32(input_1 + 8 + 4 * w); line2_2 = vld1q_f32(input_2 + 8 + 4 * w); line3_2 = vld1q_f32(input_3 + 8 + 4 * w); line4_2 = vld1q_f32(input_4 + 8 + 4 * w); line5_2 = vld1q_f32(input_5 + 8 + 4 * w); line6_2 = vld1q_f32(input_6 + 8 + 4 * w); float32x4_t tmp_4_0 = vdupq_n_f32(bias_c); float32x4_t tmp_4_1 = vdupq_n_f32(bias_c); float32x4_t tmp_4_2 = vdupq_n_f32(bias_c); /* line1 */ tmp_4_0 = vmlaq_lane_f32(tmp_4_0, line1, vget_low_f32(kernel_20_23), 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, line1, vget_high_f32(kernel_12_15), 0); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, line1, vget_high_f32(kernel_4_7), 1); float32x4_t tmp = vextq_f32(line1, line1_1, 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_20_23), 0); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_high_f32(kernel_12_15), 1); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, tmp, vget_low_f32(kernel_8_11), 0); tmp = vextq_f32(line1, line1_1, 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_20_23), 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_low_f32(kernel_16_19), 0); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, tmp, vget_low_f32(kernel_8_11), 1); tmp = vextq_f32(line1, line1_1, 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_24_27), 0); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_low_f32(kernel_16_19), 1); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, tmp, vget_high_f32(kernel_8_11), 0); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, line1_1, vget_low_f32(kernel_24_27), 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, line1_1, vget_high_f32(kernel_16_19), 0); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, line1_1, vget_high_f32(kernel_8_11), 1); tmp = vextq_f32(line1_1, line1_2, 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_24_27), 0); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_high_f32(kernel_16_19), 1); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, tmp, vget_low_f32(kernel_12_15), 0); tmp = vextq_f32(line1_1, line1_2, 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_24_27), 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_low_f32(kernel_20_23), 0); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, tmp, vget_low_f32(kernel_12_15), 1); /* line2 */ tmp_4_0 = vmlaq_lane_f32(tmp_4_0, line2, vget_low_f32(kernel_28_31), 0); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, line2, vget_low_f32(kernel_20_23), 1); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, line2, vget_high_f32(kernel_12_15), 0); tmp = vextq_f32(line2, line2_1, 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_28_31), 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_high_f32(kernel_20_23), 0); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, tmp, vget_high_f32(kernel_12_15), 1); tmp = vextq_f32(line2, line2_1, 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_28_31), 0); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_high_f32(kernel_20_23), 1); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, tmp, vget_low_f32(kernel_16_19), 0); tmp = vextq_f32(line2, line2_1, 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_28_31), 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_low_f32(kernel_24_27), 0); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, tmp, vget_low_f32(kernel_16_19), 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, line2_1, vget_low_f32(kernel_32_35), 0); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, line2_1, vget_low_f32(kernel_24_27), 1); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, line2_1, vget_high_f32(kernel_16_19), 0); tmp = vextq_f32(line2_1, line2_2, 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_32_35), 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_high_f32(kernel_24_27), 0); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, tmp, vget_high_f32(kernel_16_19), 1); tmp = vextq_f32(line2_1, line2_2, 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_32_35), 0); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_high_f32(kernel_24_27), 1); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, tmp, vget_low_f32(kernel_20_23), 0); /* line3 */ tmp_4_0 = vmlaq_lane_f32(tmp_4_0, line3, vget_high_f32(kernel_32_35), 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, line3, vget_low_f32(kernel_28_31), 0); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, line3, vget_low_f32(kernel_20_23), 1); tmp = vextq_f32(line3, line3_1, 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_36_39), 0); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_low_f32(kernel_28_31), 1); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, tmp, vget_high_f32(kernel_20_23), 0); tmp = vextq_f32(line3, line3_1, 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_36_39), 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_high_f32(kernel_28_31), 0); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, tmp, vget_high_f32(kernel_20_23), 1); tmp = vextq_f32(line3, line3_1, 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_36_39), 0); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_high_f32(kernel_28_31), 1); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, tmp, vget_low_f32(kernel_24_27), 0); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, line3_1, vget_high_f32(kernel_36_39), 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, line3_1, vget_low_f32(kernel_32_35), 0); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, line3_1, vget_low_f32(kernel_24_27), 1); tmp = vextq_f32(line3_1, line3_2, 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_40_43), 0); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_low_f32(kernel_32_35), 1); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, tmp, vget_high_f32(kernel_24_27), 0); tmp = vextq_f32(line3_1, line3_2, 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_40_43), 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_high_f32(kernel_32_35), 0); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, tmp, vget_high_f32(kernel_24_27), 1); /* line4 */ tmp_4_0 = vmlaq_lane_f32(tmp_4_0, line4, vget_high_f32(kernel_40_43), 0); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, line4, vget_high_f32(kernel_32_35), 1); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, line4, vget_low_f32(kernel_28_31), 0); tmp = vextq_f32(line4, line4_1, 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_40_43), 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_low_f32(kernel_36_39), 0); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, tmp, vget_low_f32(kernel_28_31), 1); tmp = vextq_f32(line4, line4_1, 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_44_47), 0); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_low_f32(kernel_36_39), 1); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, tmp, vget_high_f32(kernel_28_31), 0); tmp = vextq_f32(line4, line4_1, 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_44_47), 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_high_f32(kernel_36_39), 0); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, tmp, vget_high_f32(kernel_28_31), 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, line4_1, vget_high_f32(kernel_44_47), 0); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, line4_1, vget_high_f32(kernel_36_39), 1); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, line4_1, vget_low_f32(kernel_32_35), 0); tmp = vextq_f32(line4_1, line4_2, 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_44_47), 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_low_f32(kernel_40_43), 0); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, tmp, vget_low_f32(kernel_32_35), 1); tmp = vextq_f32(line4_1, line4_2, 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_48_51), 0); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_low_f32(kernel_40_43), 1); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, tmp, vget_high_f32(kernel_32_35), 0); /* line5 */ tmp_4_1 = vmlaq_lane_f32(tmp_4_1, line5, vget_high_f32(kernel_40_43), 0); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, line5, vget_high_f32(kernel_32_35), 1); tmp = vextq_f32(line5, line5_1, 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_high_f32(kernel_40_43), 1); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, tmp, vget_low_f32(kernel_36_39), 0); tmp = vextq_f32(line5, line5_1, 2); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_low_f32(kernel_44_47), 0); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, tmp, vget_low_f32(kernel_36_39), 1); tmp = vextq_f32(line5, line5_1, 3); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_low_f32(kernel_44_47), 1); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, tmp, vget_high_f32(kernel_36_39), 0); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, line5_1, vget_high_f32(kernel_44_47), 0); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, line5_1, vget_high_f32(kernel_36_39), 1); tmp = vextq_f32(line5_1, line5_2, 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_high_f32(kernel_44_47), 1); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, tmp, vget_low_f32(kernel_40_43), 0); tmp = vextq_f32(line5_1, line5_2, 2); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_low_f32(kernel_48_51), 0); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, tmp, vget_low_f32(kernel_40_43), 1); /* line6 */ tmp_4_2 = vmlaq_lane_f32(tmp_4_2, line6, vget_high_f32(kernel_40_43), 0); tmp = vextq_f32(line6, line6_1, 1); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, tmp, vget_high_f32(kernel_40_43), 1); tmp = vextq_f32(line6, line6_1, 2); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, tmp, vget_low_f32(kernel_44_47), 0); tmp = vextq_f32(line6, line6_1, 3); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, tmp, vget_low_f32(kernel_44_47), 1); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, line6_1, vget_high_f32(kernel_44_47), 0); tmp = vextq_f32(line6_1, line6_2, 1); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, tmp, vget_high_f32(kernel_44_47), 1); tmp = vextq_f32(line6_1, line6_2, 2); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, tmp, vget_low_f32(kernel_48_51), 0); tmp_4_0 = vector_activation(tmp_4_0, activation); tmp_4_1 = vector_activation(tmp_4_1, activation); tmp_4_2 = vector_activation(tmp_4_2, activation); vst1q_f32(output_buf, tmp_4_0); vst1q_f32(output_buf_1, tmp_4_1); vst1q_f32(output_buf_2, tmp_4_2); output_buf += 4; output_buf_1 += 4; output_buf_2 += 4; line1 = line1_1; line2 = line2_1; line3 = line3_1; line4 = line4_1; line5 = line5_1; line6 = line6_1; line1_1 = line1_2; line2_1 = line2_2; line3_1 = line3_2; line4_1 = line4_2; line5_1 = line5_2; line6_1 = line6_2; } float32x4_t zero = vdupq_n_f32(0.0); float32x4_t kernel_7_10 = vextq_f32(kernel_4_7, kernel_8_11, 3); float32x4_t kernel_14_17 = vextq_f32(kernel_12_15, kernel_16_19, 2); float32x4_t kernel_21_24 = vextq_f32(kernel_20_23, kernel_24_27, 1); float32x4_t kernel_35_38 = vextq_f32(kernel_32_35, kernel_36_39, 3); float32x4_t kernel_42_45 = vextq_f32(kernel_40_43, kernel_44_47, 2); line1_2 = vld1q_f32(input_1 + 8 + 4 * w); line2_2 = vld1q_f32(input_2 + 8 + 4 * w); line3_2 = vld1q_f32(input_3 + 8 + 4 * w); line4_2 = vld1q_f32(input_4 + 8 + 4 * w); line5_2 = vld1q_f32(input_5 + 8 + 4 * w); line6_2 = vld1q_f32(input_6 + 8 + 4 * w); for (w = mid_block * 4; w < mid_w; w++) { float32x4_t tmp_4_0 = vmulq_f32(line1, kernel_21_24); tmp_4_0 = vmlaq_f32(tmp_4_0, line2, kernel_28_31); tmp_4_0 = vmlaq_f32(tmp_4_0, line3, kernel_35_38); tmp_4_0 = vmlaq_f32(tmp_4_0, line4, kernel_42_45); float32x4_t tmp_4_1 = vmulq_f32(line1, kernel_14_17); tmp_4_1 = vmlaq_f32(tmp_4_1, line2, kernel_21_24); tmp_4_1 = vmlaq_f32(tmp_4_1, line3, kernel_28_31); tmp_4_1 = vmlaq_f32(tmp_4_1, line4, kernel_35_38); tmp_4_1 = vmlaq_f32(tmp_4_1, line5, kernel_42_45); float32x4_t tmp_4_2 = vmulq_f32(line1, kernel_7_10); tmp_4_2 = vmlaq_f32(tmp_4_2, line2, kernel_14_17); tmp_4_2 = vmlaq_f32(tmp_4_2, line3, kernel_21_24); tmp_4_2 = vmlaq_f32(tmp_4_2, line4, kernel_28_31); tmp_4_2 = vmlaq_f32(tmp_4_2, line5, kernel_35_38); tmp_4_2 = vmlaq_f32(tmp_4_2, line6, kernel_42_45); float32x4_t tmp = vextq_f32(zero, line1_1, 3); tmp_4_0 = vmlaq_f32(tmp_4_0, tmp, kernel_24_27); tmp_4_1 = vmlaq_f32(tmp_4_1, tmp, kernel_17_20); tmp_4_2 = vmlaq_f32(tmp_4_2, tmp, kernel_10_13); tmp = vextq_f32(zero, line2_1, 3); tmp_4_0 = vmlaq_f32(tmp_4_0, tmp, kernel_31_34); tmp_4_1 = vmlaq_f32(tmp_4_1, tmp, kernel_24_27); tmp_4_2 = vmlaq_f32(tmp_4_2, tmp, kernel_17_20); tmp = vextq_f32(zero, line3_1, 3); tmp_4_0 = vmlaq_f32(tmp_4_0, tmp, kernel_38_41); tmp_4_1 = vmlaq_f32(tmp_4_1, tmp, kernel_31_34); tmp_4_2 = vmlaq_f32(tmp_4_2, tmp, kernel_24_27); tmp = vextq_f32(zero, line4_1, 3); tmp_4_0 = vmlaq_f32(tmp_4_0, tmp, kernel_45_48); tmp_4_1 = vmlaq_f32(tmp_4_1, tmp, kernel_38_41); tmp_4_2 = vmlaq_f32(tmp_4_2, tmp, kernel_31_34); tmp = vextq_f32(zero, line5_1, 3); tmp_4_1 = vmlaq_f32(tmp_4_1, tmp, kernel_45_48); tmp_4_2 = vmlaq_f32(tmp_4_2, tmp, kernel_38_41); tmp = vextq_f32(zero, line6_1, 3); tmp_4_2 = vmlaq_f32(tmp_4_2, tmp, kernel_45_48); tmp0 = vgetq_lane_f32(tmp_4_0, 0) + vgetq_lane_f32(tmp_4_0, 1) + vgetq_lane_f32(tmp_4_0, 2) + vgetq_lane_f32(tmp_4_0, 3) + bias_c; tmp1 = vgetq_lane_f32(tmp_4_1, 0) + vgetq_lane_f32(tmp_4_1, 1) + vgetq_lane_f32(tmp_4_1, 2) + vgetq_lane_f32(tmp_4_1, 3) + bias_c; tmp2 = vgetq_lane_f32(tmp_4_2, 0) + vgetq_lane_f32(tmp_4_2, 1) + vgetq_lane_f32(tmp_4_2, 2) + vgetq_lane_f32(tmp_4_2, 3) + bias_c; *output_buf++ = elem_activation(tmp0, activation); *output_buf_1++ = elem_activation(tmp1, activation); *output_buf_2++ = elem_activation(tmp2, activation); line1 = vextq_f32(line1, line1_1, 1); line2 = vextq_f32(line2, line2_1, 1); line3 = vextq_f32(line3, line3_1, 1); line4 = vextq_f32(line4, line4_1, 1); line5 = vextq_f32(line5, line5_1, 1); line6 = vextq_f32(line6, line6_1, 1); line1_1 = vextq_f32(line1_1, line1_2, 1); line2_1 = vextq_f32(line2_1, line2_2, 1); line3_1 = vextq_f32(line3_1, line3_2, 1); line4_1 = vextq_f32(line4_1, line4_2, 1); line5_1 = vextq_f32(line5_1, line5_2, 1); line6_1 = vextq_f32(line6_1, line6_2, 1); } /* top end1 */ { float32x4_t tmp_4_0 = vmulq_f32(line1, kernel_21_24); tmp_4_0 = vmlaq_f32(tmp_4_0, line2, kernel_28_31); tmp_4_0 = vmlaq_f32(tmp_4_0, line3, kernel_35_38); tmp_4_0 = vmlaq_f32(tmp_4_0, line4, kernel_42_45); float32x2_t tmp_2_0 = vadd_f32(vget_low_f32(tmp_4_0), vget_high_f32(tmp_4_0)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line1_1), vget_high_f32(kernel_23_26)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line2_1), vget_high_f32(kernel_30_33)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line3_1), vget_high_f32(kernel_37_40)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line4_1), vget_high_f32(kernel_44_47)); tmp0 = vget_lane_f32(tmp_2_0, 0) + vget_lane_f32(tmp_2_0, 1) + bias_c; *output_buf++ = elem_activation(tmp0, activation); float32x4_t tmp_4_1 = vmulq_f32(line1, kernel_14_17); tmp_4_1 = vmlaq_f32(tmp_4_1, line2, kernel_21_24); tmp_4_1 = vmlaq_f32(tmp_4_1, line3, kernel_28_31); tmp_4_1 = vmlaq_f32(tmp_4_1, line4, kernel_35_38); tmp_4_1 = vmlaq_f32(tmp_4_1, line5, kernel_42_45); float32x2_t tmp_2_1 = vadd_f32(vget_low_f32(tmp_4_1), vget_high_f32(tmp_4_1)); tmp_2_1 = vmla_f32(tmp_2_1, vget_low_f32(line1_1), vget_high_f32(kernel_16_19)); tmp_2_1 = vmla_f32(tmp_2_1, vget_low_f32(line2_1), vget_high_f32(kernel_23_26)); tmp_2_1 = vmla_f32(tmp_2_1, vget_low_f32(line3_1), vget_high_f32(kernel_30_33)); tmp_2_1 = vmla_f32(tmp_2_1, vget_low_f32(line4_1), vget_high_f32(kernel_37_40)); tmp_2_1 = vmla_f32(tmp_2_1, vget_low_f32(line5_1), vget_high_f32(kernel_44_47)); tmp1 = vget_lane_f32(tmp_2_1, 0) + vget_lane_f32(tmp_2_1, 1) + bias_c; *output_buf_1++ = elem_activation(tmp1, activation); float32x4_t tmp_4_2 = vmulq_f32(line1, kernel_7_10); tmp_4_2 = vmlaq_f32(tmp_4_2, line2, kernel_14_17); tmp_4_2 = vmlaq_f32(tmp_4_2, line3, kernel_21_24); tmp_4_2 = vmlaq_f32(tmp_4_2, line4, kernel_28_31); tmp_4_2 = vmlaq_f32(tmp_4_2, line5, kernel_35_38); tmp_4_2 = vmlaq_f32(tmp_4_2, line6, kernel_42_45); float32x2_t tmp_2_2 = vadd_f32(vget_low_f32(tmp_4_2), vget_high_f32(tmp_4_2)); tmp_2_2 = vmla_f32(tmp_2_2, vget_low_f32(line1_1), vget_high_f32(kernel_9_12)); tmp_2_2 = vmla_f32(tmp_2_2, vget_low_f32(line2_1), vget_high_f32(kernel_16_19)); tmp_2_2 = vmla_f32(tmp_2_2, vget_low_f32(line3_1), vget_high_f32(kernel_23_26)); tmp_2_2 = vmla_f32(tmp_2_2, vget_low_f32(line4_1), vget_high_f32(kernel_30_33)); tmp_2_2 = vmla_f32(tmp_2_2, vget_low_f32(line5_1), vget_high_f32(kernel_37_40)); tmp_2_2 = vmla_f32(tmp_2_2, vget_low_f32(line6_1), vget_high_f32(kernel_44_47)); tmp2 = vget_lane_f32(tmp_2_2, 0) + vget_lane_f32(tmp_2_2, 1) + bias_c; *output_buf_2++ = elem_activation(tmp2, activation); line1 = vextq_f32(line1, line1_1, 1); line2 = vextq_f32(line2, line2_1, 1); line3 = vextq_f32(line3, line3_1, 1); line4 = vextq_f32(line4, line4_1, 1); line5 = vextq_f32(line5, line5_1, 1); line6 = vextq_f32(line6, line6_1, 1); line1_1 = vextq_f32(line1_1, line1_1, 1); line2_1 = vextq_f32(line2_1, line2_1, 1); line3_1 = vextq_f32(line3_1, line3_1, 1); line4_1 = vextq_f32(line4_1, line4_1, 1); line5_1 = vextq_f32(line5_1, line5_1, 1); line6_1 = vextq_f32(line6_1, line6_1, 1); } /* top end2 */ { float32x4_t tmp_4_0 = vmulq_f32(line1, kernel_21_24); tmp_4_0 = vmlaq_f32(tmp_4_0, line2, kernel_28_31); tmp_4_0 = vmlaq_f32(tmp_4_0, line3, kernel_35_38); tmp_4_0 = vmlaq_f32(tmp_4_0, line4, kernel_42_45); tmp0 = vgetq_lane_f32(tmp_4_0, 0) + vgetq_lane_f32(tmp_4_0, 1) + vgetq_lane_f32(tmp_4_0, 2) + vgetq_lane_f32(tmp_4_0, 3) + bias_c; tmp0 += vgetq_lane_f32(line1_1, 0) * weight_buf[25]; tmp0 += vgetq_lane_f32(line2_1, 0) * weight_buf[32]; tmp0 += vgetq_lane_f32(line3_1, 0) * weight_buf[39]; tmp0 += vgetq_lane_f32(line4_1, 0) * weight_buf[46]; *output_buf++ = elem_activation(tmp0, activation); float32x4_t tmp_4_1 = vmulq_f32(line1, kernel_14_17); tmp_4_1 = vmlaq_f32(tmp_4_1, line2, kernel_21_24); tmp_4_1 = vmlaq_f32(tmp_4_1, line3, kernel_28_31); tmp_4_1 = vmlaq_f32(tmp_4_1, line4, kernel_35_38); tmp_4_1 = vmlaq_f32(tmp_4_1, line5, kernel_42_45); tmp1 = vgetq_lane_f32(tmp_4_1, 0) + vgetq_lane_f32(tmp_4_1, 1) + vgetq_lane_f32(tmp_4_1, 2) + vgetq_lane_f32(tmp_4_1, 3) + bias_c; tmp1 += vgetq_lane_f32(line1_1, 0) * weight_buf[18]; tmp1 += vgetq_lane_f32(line2_1, 0) * weight_buf[25]; tmp1 += vgetq_lane_f32(line3_1, 0) * weight_buf[32]; tmp1 += vgetq_lane_f32(line4_1, 0) * weight_buf[39]; tmp1 += vgetq_lane_f32(line5_1, 0) * weight_buf[46]; *output_buf_1++ = elem_activation(tmp1, activation); float32x4_t tmp_4_2 = vmulq_f32(line1, kernel_7_10); tmp_4_2 = vmlaq_f32(tmp_4_2, line2, kernel_14_17); tmp_4_2 = vmlaq_f32(tmp_4_2, line3, kernel_21_24); tmp_4_2 = vmlaq_f32(tmp_4_2, line4, kernel_28_31); tmp_4_2 = vmlaq_f32(tmp_4_2, line5, kernel_35_38); tmp_4_2 = vmlaq_f32(tmp_4_2, line6, kernel_42_45); tmp2 = vgetq_lane_f32(tmp_4_2, 0) + vgetq_lane_f32(tmp_4_2, 1) + vgetq_lane_f32(tmp_4_2, 2) + vgetq_lane_f32(tmp_4_2, 3) + bias_c; tmp2 += vgetq_lane_f32(line1_1, 0) * weight_buf[11]; tmp2 += vgetq_lane_f32(line2_1, 0) * weight_buf[18]; tmp2 += vgetq_lane_f32(line3_1, 0) * weight_buf[25]; tmp2 += vgetq_lane_f32(line4_1, 0) * weight_buf[32]; tmp2 += vgetq_lane_f32(line5_1, 0) * weight_buf[39]; tmp2 += vgetq_lane_f32(line6_1, 0) * weight_buf[46]; *output_buf_2++ = elem_activation(tmp2, activation); line1 = vextq_f32(line1, line1_1, 1); line2 = vextq_f32(line2, line2_1, 1); line3 = vextq_f32(line3, line3_1, 1); line4 = vextq_f32(line4, line4_1, 1); line5 = vextq_f32(line5, line5_1, 1); line6 = vextq_f32(line6, line6_1, 1); } /* top end3 */ { float32x4_t tmp_4_0 = vmulq_f32(line1, kernel_21_24); tmp_4_0 = vmlaq_f32(tmp_4_0, line2, kernel_28_31); tmp_4_0 = vmlaq_f32(tmp_4_0, line3, kernel_35_38); tmp_4_0 = vmlaq_f32(tmp_4_0, line4, kernel_42_45); tmp0 = vgetq_lane_f32(tmp_4_0, 0) + vgetq_lane_f32(tmp_4_0, 1) + vgetq_lane_f32(tmp_4_0, 2) + vgetq_lane_f32(tmp_4_0, 3) + bias_c; *output_buf++ = elem_activation(tmp0, activation); float32x4_t tmp_4_1 = vmulq_f32(line1, kernel_14_17); tmp_4_1 = vmlaq_f32(tmp_4_1, line2, kernel_21_24); tmp_4_1 = vmlaq_f32(tmp_4_1, line3, kernel_28_31); tmp_4_1 = vmlaq_f32(tmp_4_1, line4, kernel_35_38); tmp_4_1 = vmlaq_f32(tmp_4_1, line5, kernel_42_45); tmp1 = vgetq_lane_f32(tmp_4_1, 0) + vgetq_lane_f32(tmp_4_1, 1) + vgetq_lane_f32(tmp_4_1, 2) + vgetq_lane_f32(tmp_4_1, 3) + bias_c; *output_buf_1++ = elem_activation(tmp1, activation); float32x4_t tmp_4_2 = vmulq_f32(line1, kernel_7_10); tmp_4_2 = vmlaq_f32(tmp_4_2, line2, kernel_14_17); tmp_4_2 = vmlaq_f32(tmp_4_2, line3, kernel_21_24); tmp_4_2 = vmlaq_f32(tmp_4_2, line4, kernel_28_31); tmp_4_2 = vmlaq_f32(tmp_4_2, line5, kernel_35_38); tmp_4_2 = vmlaq_f32(tmp_4_2, line6, kernel_42_45); tmp2 = vgetq_lane_f32(tmp_4_2, 0) + vgetq_lane_f32(tmp_4_2, 1) + vgetq_lane_f32(tmp_4_2, 2) + vgetq_lane_f32(tmp_4_2, 3) + bias_c; *output_buf_2++ = elem_activation(tmp2, activation); } float32x4_t kernel_1_4 = vextq_f32(kernel_0_3, kernel_4_7, 1); float32x4_t kernel_2_5 = vextq_f32(kernel_0_3, kernel_4_7, 2); float32x4_t kernel_3_6 = vextq_f32(kernel_0_3, kernel_4_7, 3); output_buf += output_w * 2; float32x4_t line7; float32x4_t line7_1; float32x4_t line7_2; /* mid */ for (int h = 0; h < mid_h; h++) { input_1 = input + c * channel_size + h * input_w; input_2 = input_1 + input_w; input_3 = input_2 + input_w; input_4 = input_3 + input_w; input_5 = input_4 + input_w; input_6 = input_5 + input_w; input_7 = input_6 + input_w; line1 = vld1q_f32(input_1); line2 = vld1q_f32(input_2); line3 = vld1q_f32(input_3); line4 = vld1q_f32(input_4); line5 = vld1q_f32(input_5); line6 = vld1q_f32(input_6); line7 = vld1q_f32(input_7); { float32x4_t tmp_4_0 = vmulq_f32(line1, kernel_3_6); tmp_4_0 = vmlaq_f32(tmp_4_0, line2, kernel_10_13); tmp_4_0 = vmlaq_f32(tmp_4_0, line3, kernel_17_20); tmp_4_0 = vmlaq_f32(tmp_4_0, line4, kernel_24_27); tmp_4_0 = vmlaq_f32(tmp_4_0, line5, kernel_31_34); tmp_4_0 = vmlaq_f32(tmp_4_0, line6, kernel_38_41); tmp_4_0 = vmlaq_f32(tmp_4_0, line7, kernel_45_48); tmp0 = vgetq_lane_f32(tmp_4_0, 0) + vgetq_lane_f32(tmp_4_0, 1) + vgetq_lane_f32(tmp_4_0, 2) + vgetq_lane_f32(tmp_4_0, 3) + bias_c; *output_buf++ = elem_activation(tmp0, activation); } line1_1 = vld1q_f32(input_1 + 4); line2_1 = vld1q_f32(input_2 + 4); line3_1 = vld1q_f32(input_3 + 4); line4_1 = vld1q_f32(input_4 + 4); line5_1 = vld1q_f32(input_5 + 4); line6_1 = vld1q_f32(input_6 + 4); line7_1 = vld1q_f32(input_7 + 4); { float32x4_t tmp_4_0 = vmulq_f32(line1, kernel_2_5); tmp_4_0 = vmlaq_f32(tmp_4_0, line2, kernel_9_12); tmp_4_0 = vmlaq_f32(tmp_4_0, line3, kernel_16_19); tmp_4_0 = vmlaq_f32(tmp_4_0, line4, kernel_23_26); tmp_4_0 = vmlaq_f32(tmp_4_0, line5, kernel_30_33); tmp_4_0 = vmlaq_f32(tmp_4_0, line6, kernel_37_40); tmp_4_0 = vmlaq_f32(tmp_4_0, line7, kernel_44_47); tmp0 = vgetq_lane_f32(tmp_4_0, 0) + vgetq_lane_f32(tmp_4_0, 1) + vgetq_lane_f32(tmp_4_0, 2) + vgetq_lane_f32(tmp_4_0, 3) + bias_c; tmp0 += vgetq_lane_f32(line1_1, 0) * weight_buf[6]; tmp0 += vgetq_lane_f32(line2_1, 0) * weight_buf[13]; tmp0 += vgetq_lane_f32(line3_1, 0) * weight_buf[20]; tmp0 += vgetq_lane_f32(line4_1, 0) * weight_buf[27]; tmp0 += vgetq_lane_f32(line5_1, 0) * weight_buf[34]; tmp0 += vgetq_lane_f32(line6_1, 0) * weight_buf[41]; tmp0 += vgetq_lane_f32(line7_1, 0) * weight_buf[48]; *output_buf++ = elem_activation(tmp0, activation); } { float32x4_t tmp_4_0 = vmulq_f32(line1, kernel_1_4); tmp_4_0 = vmlaq_f32(tmp_4_0, line2, kernel_8_11); tmp_4_0 = vmlaq_f32(tmp_4_0, line3, kernel_15_18); tmp_4_0 = vmlaq_f32(tmp_4_0, line4, kernel_22_25); tmp_4_0 = vmlaq_f32(tmp_4_0, line5, kernel_29_32); tmp_4_0 = vmlaq_f32(tmp_4_0, line6, kernel_36_39); tmp_4_0 = vmlaq_f32(tmp_4_0, line7, kernel_43_46); float32x2_t tmp_2_0 = vadd_f32(vget_low_f32(tmp_4_0), vget_high_f32(tmp_4_0)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line1_1), vget_high_f32(kernel_3_6)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line2_1), vget_high_f32(kernel_10_13)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line3_1), vget_high_f32(kernel_17_20)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line4_1), vget_high_f32(kernel_24_27)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line5_1), vget_high_f32(kernel_31_34)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line6_1), vget_high_f32(kernel_38_41)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line7_1), vget_high_f32(kernel_45_48)); tmp0 = vget_lane_f32(tmp_2_0, 0) + vget_lane_f32(tmp_2_0, 1) + bias_c; *output_buf++ = elem_activation(tmp0, activation); } for (w = 0; w < mid_block; w++) { line1_2 = vld1q_f32(input_1 + 8 + 4 * w); line2_2 = vld1q_f32(input_2 + 8 + 4 * w); line3_2 = vld1q_f32(input_3 + 8 + 4 * w); line4_2 = vld1q_f32(input_4 + 8 + 4 * w); line5_2 = vld1q_f32(input_5 + 8 + 4 * w); line6_2 = vld1q_f32(input_6 + 8 + 4 * w); line7_2 = vld1q_f32(input_7 + 8 + 4 * w); float32x4_t tmp_4_0 = vdupq_n_f32(bias_c); /* line1 */ tmp_4_0 = vmlaq_lane_f32(tmp_4_0, line1, vget_low_f32(kernel_0_3), 0); float32x4_t tmp = vextq_f32(line1, line1_1, 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_0_3), 1); tmp = vextq_f32(line1, line1_1, 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_0_3), 0); tmp = vextq_f32(line1, line1_1, 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_0_3), 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, line1_1, vget_low_f32(kernel_4_7), 0); tmp = vextq_f32(line1_1, line1_2, 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_4_7), 1); tmp = vextq_f32(line1_1, line1_2, 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_4_7), 0); /* line2 */ tmp_4_0 = vmlaq_lane_f32(tmp_4_0, line2, vget_high_f32(kernel_4_7), 1); tmp = vextq_f32(line2, line2_1, 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_8_11), 0); tmp = vextq_f32(line2, line2_1, 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_8_11), 1); tmp = vextq_f32(line2, line2_1, 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_8_11), 0); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, line2_1, vget_high_f32(kernel_8_11), 1); tmp = vextq_f32(line2_1, line2_2, 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_12_15), 0); tmp = vextq_f32(line2_1, line2_2, 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_12_15), 1); /* line3 */ tmp_4_0 = vmlaq_lane_f32(tmp_4_0, line3, vget_high_f32(kernel_12_15), 0); tmp = vextq_f32(line3, line3_1, 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_12_15), 1); tmp = vextq_f32(line3, line3_1, 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_16_19), 0); tmp = vextq_f32(line3, line3_1, 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_16_19), 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, line3_1, vget_high_f32(kernel_16_19), 0); tmp = vextq_f32(line3_1, line3_2, 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_16_19), 1); tmp = vextq_f32(line3_1, line3_2, 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_20_23), 0); /* line4 */ tmp_4_0 = vmlaq_lane_f32(tmp_4_0, line4, vget_low_f32(kernel_20_23), 1); tmp = vextq_f32(line4, line4_1, 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_20_23), 0); tmp = vextq_f32(line4, line4_1, 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_20_23), 1); tmp = vextq_f32(line4, line4_1, 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_24_27), 0); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, line4_1, vget_low_f32(kernel_24_27), 1); tmp = vextq_f32(line4_1, line4_2, 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_24_27), 0); tmp = vextq_f32(line4_1, line4_2, 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_24_27), 1); /* line5 */ tmp_4_0 = vmlaq_lane_f32(tmp_4_0, line5, vget_low_f32(kernel_28_31), 0); tmp = vextq_f32(line5, line5_1, 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_28_31), 1); tmp = vextq_f32(line5, line5_1, 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_28_31), 0); tmp = vextq_f32(line5, line5_1, 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_28_31), 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, line5_1, vget_low_f32(kernel_32_35), 0); tmp = vextq_f32(line5_1, line5_2, 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_32_35), 1); tmp = vextq_f32(line5_1, line5_2, 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_32_35), 0); /* line6 */ tmp_4_0 = vmlaq_lane_f32(tmp_4_0, line6, vget_high_f32(kernel_32_35), 1); tmp = vextq_f32(line6, line6_1, 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_36_39), 0); tmp = vextq_f32(line6, line6_1, 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_36_39), 1); tmp = vextq_f32(line6, line6_1, 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_36_39), 0); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, line6_1, vget_high_f32(kernel_36_39), 1); tmp = vextq_f32(line6_1, line6_2, 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_40_43), 0); tmp = vextq_f32(line6_1, line6_2, 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_40_43), 1); /* line7 */ tmp_4_0 = vmlaq_lane_f32(tmp_4_0, line7, vget_high_f32(kernel_40_43), 0); tmp = vextq_f32(line7, line7_1, 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_40_43), 1); tmp = vextq_f32(line7, line7_1, 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_44_47), 0); tmp = vextq_f32(line7, line7_1, 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_44_47), 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, line7_1, vget_high_f32(kernel_44_47), 0); tmp = vextq_f32(line7_1, line7_2, 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_44_47), 1); tmp = vextq_f32(line7_1, line7_2, 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_48_51), 0); tmp_4_0 = vector_activation(tmp_4_0, activation); vst1q_f32(output_buf, tmp_4_0); output_buf += 4; line1 = line1_1; line2 = line2_1; line3 = line3_1; line4 = line4_1; line5 = line5_1; line6 = line6_1; line7 = line7_1; line1_1 = line1_2; line2_1 = line2_2; line3_1 = line3_2; line4_1 = line4_2; line5_1 = line5_2; line6_1 = line6_2; line7_1 = line7_2; } line1_2 = vld1q_f32(input_1 + 8 + 4 * w); line2_2 = vld1q_f32(input_2 + 8 + 4 * w); line3_2 = vld1q_f32(input_3 + 8 + 4 * w); line4_2 = vld1q_f32(input_4 + 8 + 4 * w); line5_2 = vld1q_f32(input_5 + 8 + 4 * w); line6_2 = vld1q_f32(input_6 + 8 + 4 * w); line7_2 = vld1q_f32(input_7 + 8 + 4 * w); for (w = mid_block * 4; w < mid_w; w++) { float32x4_t tmp_4_0 = vmulq_f32(line1, kernel_0_3); tmp_4_0 = vmlaq_f32(tmp_4_0, line2, kernel_7_10); tmp_4_0 = vmlaq_f32(tmp_4_0, line3, kernel_14_17); tmp_4_0 = vmlaq_f32(tmp_4_0, line4, kernel_21_24); tmp_4_0 = vmlaq_f32(tmp_4_0, line5, kernel_28_31); tmp_4_0 = vmlaq_f32(tmp_4_0, line6, kernel_35_38); tmp_4_0 = vmlaq_f32(tmp_4_0, line7, kernel_42_45); float32x4_t tmp = vextq_f32(zero, line1_1, 3); tmp_4_0 = vmlaq_f32(tmp_4_0, tmp, kernel_3_6); tmp = vextq_f32(zero, line2_1, 3); tmp_4_0 = vmlaq_f32(tmp_4_0, tmp, kernel_10_13); tmp = vextq_f32(zero, line3_1, 3); tmp_4_0 = vmlaq_f32(tmp_4_0, tmp, kernel_17_20); tmp = vextq_f32(zero, line4_1, 3); tmp_4_0 = vmlaq_f32(tmp_4_0, tmp, kernel_24_27); tmp = vextq_f32(zero, line5_1, 3); tmp_4_0 = vmlaq_f32(tmp_4_0, tmp, kernel_31_34); tmp = vextq_f32(zero, line6_1, 3); tmp_4_0 = vmlaq_f32(tmp_4_0, tmp, kernel_38_41); tmp = vextq_f32(zero, line7_1, 3); tmp_4_0 = vmlaq_f32(tmp_4_0, tmp, kernel_45_48); tmp0 = vgetq_lane_f32(tmp_4_0, 0) + vgetq_lane_f32(tmp_4_0, 1) + vgetq_lane_f32(tmp_4_0, 2) + vgetq_lane_f32(tmp_4_0, 3) + bias_c; *output_buf++ = elem_activation(tmp0, activation); line1 = vextq_f32(line1, line1_1, 1); line2 = vextq_f32(line2, line2_1, 1); line3 = vextq_f32(line3, line3_1, 1); line4 = vextq_f32(line4, line4_1, 1); line5 = vextq_f32(line5, line5_1, 1); line6 = vextq_f32(line6, line6_1, 1); line7 = vextq_f32(line7, line7_1, 1); line1_1 = vextq_f32(line1_1, line1_2, 1); line2_1 = vextq_f32(line2_1, line2_2, 1); line3_1 = vextq_f32(line3_1, line3_2, 1); line4_1 = vextq_f32(line4_1, line4_2, 1); line5_1 = vextq_f32(line5_1, line5_2, 1); line6_1 = vextq_f32(line6_1, line6_2, 1); line7_1 = vextq_f32(line7_1, line7_2, 1); } { float32x4_t tmp_4_0 = vmulq_f32(line1, kernel_0_3); tmp_4_0 = vmlaq_f32(tmp_4_0, line2, kernel_7_10); tmp_4_0 = vmlaq_f32(tmp_4_0, line3, kernel_14_17); tmp_4_0 = vmlaq_f32(tmp_4_0, line4, kernel_21_24); tmp_4_0 = vmlaq_f32(tmp_4_0, line5, kernel_28_31); tmp_4_0 = vmlaq_f32(tmp_4_0, line6, kernel_35_38); tmp_4_0 = vmlaq_f32(tmp_4_0, line7, kernel_42_45); float32x2_t tmp_2_0 = vadd_f32(vget_low_f32(tmp_4_0), vget_high_f32(tmp_4_0)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line1_1), vget_high_f32(kernel_2_5)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line2_1), vget_high_f32(kernel_9_12)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line3_1), vget_high_f32(kernel_16_19)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line4_1), vget_high_f32(kernel_23_26)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line5_1), vget_high_f32(kernel_30_33)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line6_1), vget_high_f32(kernel_37_40)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line7_1), vget_high_f32(kernel_44_47)); tmp0 = vget_lane_f32(tmp_2_0, 0) + vget_lane_f32(tmp_2_0, 1) + bias_c; *output_buf++ = elem_activation(tmp0, activation); line1 = vextq_f32(line1, line1_1, 1); line2 = vextq_f32(line2, line2_1, 1); line3 = vextq_f32(line3, line3_1, 1); line4 = vextq_f32(line4, line4_1, 1); line5 = vextq_f32(line5, line5_1, 1); line6 = vextq_f32(line6, line6_1, 1); line7 = vextq_f32(line7, line7_1, 1); line1_1 = vextq_f32(line1_1, line1_1, 1); line2_1 = vextq_f32(line2_1, line2_1, 1); line3_1 = vextq_f32(line3_1, line3_1, 1); line4_1 = vextq_f32(line4_1, line4_1, 1); line5_1 = vextq_f32(line5_1, line5_1, 1); line6_1 = vextq_f32(line6_1, line6_1, 1); line7_1 = vextq_f32(line7_1, line7_1, 1); } { float32x4_t tmp_4_0 = vmulq_f32(line1, kernel_0_3); tmp_4_0 = vmlaq_f32(tmp_4_0, line2, kernel_7_10); tmp_4_0 = vmlaq_f32(tmp_4_0, line3, kernel_14_17); tmp_4_0 = vmlaq_f32(tmp_4_0, line4, kernel_21_24); tmp_4_0 = vmlaq_f32(tmp_4_0, line5, kernel_28_31); tmp_4_0 = vmlaq_f32(tmp_4_0, line6, kernel_35_38); tmp_4_0 = vmlaq_f32(tmp_4_0, line7, kernel_42_45); tmp0 = vgetq_lane_f32(tmp_4_0, 0) + vgetq_lane_f32(tmp_4_0, 1) + vgetq_lane_f32(tmp_4_0, 2) + vgetq_lane_f32(tmp_4_0, 3) + bias_c; tmp0 += vgetq_lane_f32(line1_1, 0) * weight_buf[4]; tmp0 += vgetq_lane_f32(line2_1, 0) * weight_buf[11]; tmp0 += vgetq_lane_f32(line3_1, 0) * weight_buf[18]; tmp0 += vgetq_lane_f32(line4_1, 0) * weight_buf[25]; tmp0 += vgetq_lane_f32(line5_1, 0) * weight_buf[32]; tmp0 += vgetq_lane_f32(line6_1, 0) * weight_buf[39]; tmp0 += vgetq_lane_f32(line7_1, 0) * weight_buf[46]; *output_buf++ = elem_activation(tmp0, activation); line1 = vextq_f32(line1, line1_1, 1); line2 = vextq_f32(line2, line2_1, 1); line3 = vextq_f32(line3, line3_1, 1); line4 = vextq_f32(line4, line4_1, 1); line5 = vextq_f32(line5, line5_1, 1); line6 = vextq_f32(line6, line6_1, 1); line7 = vextq_f32(line7, line7_1, 1); } { float32x4_t tmp_4_0 = vmulq_f32(line1, kernel_0_3); tmp_4_0 = vmlaq_f32(tmp_4_0, line2, kernel_7_10); tmp_4_0 = vmlaq_f32(tmp_4_0, line3, kernel_14_17); tmp_4_0 = vmlaq_f32(tmp_4_0, line4, kernel_21_24); tmp_4_0 = vmlaq_f32(tmp_4_0, line5, kernel_28_31); tmp_4_0 = vmlaq_f32(tmp_4_0, line6, kernel_35_38); tmp_4_0 = vmlaq_f32(tmp_4_0, line7, kernel_42_45); tmp0 = vgetq_lane_f32(tmp_4_0, 0) + vgetq_lane_f32(tmp_4_0, 1) + vgetq_lane_f32(tmp_4_0, 2) + vgetq_lane_f32(tmp_4_0, 3) + bias_c; *output_buf++ = elem_activation(tmp0, activation); } } /* bottom start1 */ input_1 = input + c * channel_size + input_w * (input_h - 6); input_2 = input_1 + input_w; input_3 = input_2 + input_w; input_4 = input_3 + input_w; input_5 = input_4 + input_w; input_6 = input_5 + input_w; line1 = vld1q_f32(input_1); line2 = vld1q_f32(input_2); line3 = vld1q_f32(input_3); line4 = vld1q_f32(input_4); line5 = vld1q_f32(input_5); line6 = vld1q_f32(input_6); output_buf_1 = output_buf + input_w; output_buf_2 = output_buf_1 + input_w; { float32x4_t tmp_4_0 = vmulq_f32(line1, kernel_3_6); tmp_4_0 = vmlaq_f32(tmp_4_0, line2, kernel_10_13); tmp_4_0 = vmlaq_f32(tmp_4_0, line3, kernel_17_20); tmp_4_0 = vmlaq_f32(tmp_4_0, line4, kernel_24_27); tmp_4_0 = vmlaq_f32(tmp_4_0, line5, kernel_31_34); tmp_4_0 = vmlaq_f32(tmp_4_0, line6, kernel_38_41); tmp0 = vgetq_lane_f32(tmp_4_0, 0) + vgetq_lane_f32(tmp_4_0, 1) + vgetq_lane_f32(tmp_4_0, 2) + vgetq_lane_f32(tmp_4_0, 3) + bias_c; *output_buf++ = elem_activation(tmp0, activation); float32x4_t tmp_4_1 = vmulq_f32(line2, kernel_3_6); tmp_4_1 = vmlaq_f32(tmp_4_1, line3, kernel_10_13); tmp_4_1 = vmlaq_f32(tmp_4_1, line4, kernel_17_20); tmp_4_1 = vmlaq_f32(tmp_4_1, line5, kernel_24_27); tmp_4_1 = vmlaq_f32(tmp_4_1, line6, kernel_31_34); tmp1 = vgetq_lane_f32(tmp_4_1, 0) + vgetq_lane_f32(tmp_4_1, 1) + vgetq_lane_f32(tmp_4_1, 2) + vgetq_lane_f32(tmp_4_1, 3) + bias_c; *output_buf_1++ = elem_activation(tmp1, activation); float32x4_t tmp_4_2 = vmulq_f32(line3, kernel_3_6); tmp_4_2 = vmlaq_f32(tmp_4_2, line4, kernel_10_13); tmp_4_2 = vmlaq_f32(tmp_4_2, line5, kernel_17_20); tmp_4_2 = vmlaq_f32(tmp_4_2, line6, kernel_24_27); tmp2 = vgetq_lane_f32(tmp_4_2, 0) + vgetq_lane_f32(tmp_4_2, 1) + vgetq_lane_f32(tmp_4_2, 2) + vgetq_lane_f32(tmp_4_2, 3) + bias_c; *output_buf_2++ = elem_activation(tmp2, activation); } line1_1 = vld1q_f32(input_1 + 4); line2_1 = vld1q_f32(input_2 + 4); line3_1 = vld1q_f32(input_3 + 4); line4_1 = vld1q_f32(input_4 + 4); line5_1 = vld1q_f32(input_5 + 4); line6_1 = vld1q_f32(input_6 + 4); /* bottom start2 */ { float32x4_t tmp_4_0 = vmulq_f32(line1, kernel_2_5); tmp_4_0 = vmlaq_f32(tmp_4_0, line2, kernel_9_12); tmp_4_0 = vmlaq_f32(tmp_4_0, line3, kernel_16_19); tmp_4_0 = vmlaq_f32(tmp_4_0, line4, kernel_23_26); tmp_4_0 = vmlaq_f32(tmp_4_0, line5, kernel_30_33); tmp_4_0 = vmlaq_f32(tmp_4_0, line6, kernel_37_40); tmp0 = vgetq_lane_f32(tmp_4_0, 0) + vgetq_lane_f32(tmp_4_0, 1) + vgetq_lane_f32(tmp_4_0, 2) + vgetq_lane_f32(tmp_4_0, 3) + bias_c; tmp0 += vgetq_lane_f32(line1_1, 0) * weight_buf[6]; tmp0 += vgetq_lane_f32(line2_1, 0) * weight_buf[13]; tmp0 += vgetq_lane_f32(line3_1, 0) * weight_buf[20]; tmp0 += vgetq_lane_f32(line4_1, 0) * weight_buf[27]; tmp0 += vgetq_lane_f32(line5_1, 0) * weight_buf[34]; tmp0 += vgetq_lane_f32(line6_1, 0) * weight_buf[41]; *output_buf++ = elem_activation(tmp0, activation); float32x4_t tmp_4_1 = vmulq_f32(line2, kernel_2_5); tmp_4_1 = vmlaq_f32(tmp_4_1, line3, kernel_9_12); tmp_4_1 = vmlaq_f32(tmp_4_1, line4, kernel_16_19); tmp_4_1 = vmlaq_f32(tmp_4_1, line5, kernel_23_26); tmp_4_1 = vmlaq_f32(tmp_4_1, line6, kernel_30_33); tmp1 = vgetq_lane_f32(tmp_4_1, 0) + vgetq_lane_f32(tmp_4_1, 1) + vgetq_lane_f32(tmp_4_1, 2) + vgetq_lane_f32(tmp_4_1, 3) + bias_c; tmp1 += vgetq_lane_f32(line2_1, 0) * weight_buf[6]; tmp1 += vgetq_lane_f32(line3_1, 0) * weight_buf[13]; tmp1 += vgetq_lane_f32(line4_1, 0) * weight_buf[20]; tmp1 += vgetq_lane_f32(line5_1, 0) * weight_buf[27]; tmp1 += vgetq_lane_f32(line6_1, 0) * weight_buf[34]; *output_buf_1++ = elem_activation(tmp1, activation); float32x4_t tmp_4_2 = vmulq_f32(line3, kernel_2_5); tmp_4_2 = vmlaq_f32(tmp_4_2, line4, kernel_9_12); tmp_4_2 = vmlaq_f32(tmp_4_2, line5, kernel_16_19); tmp_4_2 = vmlaq_f32(tmp_4_2, line6, kernel_23_26); tmp2 = vgetq_lane_f32(tmp_4_2, 0) + vgetq_lane_f32(tmp_4_2, 1) + vgetq_lane_f32(tmp_4_2, 2) + vgetq_lane_f32(tmp_4_2, 3) + bias_c; tmp2 += vgetq_lane_f32(line3_1, 0) * weight_buf[6]; tmp2 += vgetq_lane_f32(line4_1, 0) * weight_buf[13]; tmp2 += vgetq_lane_f32(line5_1, 0) * weight_buf[20]; tmp2 += vgetq_lane_f32(line6_1, 0) * weight_buf[27]; *output_buf_2++ = elem_activation(tmp2, activation); } /* bottom start3 */ { float32x4_t tmp_4_0 = vmulq_f32(line1, kernel_1_4); tmp_4_0 = vmlaq_f32(tmp_4_0, line2, kernel_8_11); tmp_4_0 = vmlaq_f32(tmp_4_0, line3, kernel_15_18); tmp_4_0 = vmlaq_f32(tmp_4_0, line4, kernel_22_25); tmp_4_0 = vmlaq_f32(tmp_4_0, line5, kernel_29_32); tmp_4_0 = vmlaq_f32(tmp_4_0, line6, kernel_36_39); float32x2_t tmp_2_0 = vadd_f32(vget_low_f32(tmp_4_0), vget_high_f32(tmp_4_0)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line1_1), vget_high_f32(kernel_3_6)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line2_1), vget_high_f32(kernel_10_13)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line3_1), vget_high_f32(kernel_17_20)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line4_1), vget_high_f32(kernel_24_27)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line5_1), vget_high_f32(kernel_31_34)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line6_1), vget_high_f32(kernel_38_41)); tmp0 = vget_lane_f32(tmp_2_0, 0) + vget_lane_f32(tmp_2_0, 1) + bias_c; *output_buf++ = elem_activation(tmp0, activation); float32x4_t tmp_4_1 = vmulq_f32(line2, kernel_1_4); tmp_4_1 = vmlaq_f32(tmp_4_1, line3, kernel_8_11); tmp_4_1 = vmlaq_f32(tmp_4_1, line4, kernel_15_18); tmp_4_1 = vmlaq_f32(tmp_4_1, line5, kernel_22_25); tmp_4_1 = vmlaq_f32(tmp_4_1, line6, kernel_29_32); float32x2_t tmp_2_1 = vadd_f32(vget_low_f32(tmp_4_1), vget_high_f32(tmp_4_1)); tmp_2_1 = vmla_f32(tmp_2_1, vget_low_f32(line2_1), vget_high_f32(kernel_3_6)); tmp_2_1 = vmla_f32(tmp_2_1, vget_low_f32(line3_1), vget_high_f32(kernel_10_13)); tmp_2_1 = vmla_f32(tmp_2_1, vget_low_f32(line4_1), vget_high_f32(kernel_17_20)); tmp_2_1 = vmla_f32(tmp_2_1, vget_low_f32(line5_1), vget_high_f32(kernel_24_27)); tmp_2_1 = vmla_f32(tmp_2_1, vget_low_f32(line6_1), vget_high_f32(kernel_31_34)); tmp1 = vget_lane_f32(tmp_2_1, 0) + vget_lane_f32(tmp_2_1, 1) + bias_c; *output_buf_1++ = elem_activation(tmp1, activation); float32x4_t tmp_4_2 = vmulq_f32(line3, kernel_1_4); tmp_4_2 = vmlaq_f32(tmp_4_2, line4, kernel_8_11); tmp_4_2 = vmlaq_f32(tmp_4_2, line5, kernel_15_18); tmp_4_2 = vmlaq_f32(tmp_4_2, line6, kernel_22_25); float32x2_t tmp_2_2 = vadd_f32(vget_low_f32(tmp_4_2), vget_high_f32(tmp_4_2)); tmp_2_2 = vmla_f32(tmp_2_2, vget_low_f32(line3_1), vget_high_f32(kernel_3_6)); tmp_2_2 = vmla_f32(tmp_2_2, vget_low_f32(line4_1), vget_high_f32(kernel_10_13)); tmp_2_2 = vmla_f32(tmp_2_2, vget_low_f32(line5_1), vget_high_f32(kernel_17_20)); tmp_2_2 = vmla_f32(tmp_2_2, vget_low_f32(line6_1), vget_high_f32(kernel_24_27)); tmp2 = vget_lane_f32(tmp_2_2, 0) + vget_lane_f32(tmp_2_2, 1) + bias_c; *output_buf_2++ = elem_activation(tmp2, activation); } /* bottom mid */ for (w = 0; w < mid_block; w++) { line1_2 = vld1q_f32(input_1 + 8 + 4 * w); line2_2 = vld1q_f32(input_2 + 8 + 4 * w); line3_2 = vld1q_f32(input_3 + 8 + 4 * w); line4_2 = vld1q_f32(input_4 + 8 + 4 * w); line5_2 = vld1q_f32(input_5 + 8 + 4 * w); line6_2 = vld1q_f32(input_6 + 8 + 4 * w); float32x4_t tmp_4_0 = vdupq_n_f32(bias_c); float32x4_t tmp_4_1 = vdupq_n_f32(bias_c); float32x4_t tmp_4_2 = vdupq_n_f32(bias_c); /* line1 */ tmp_4_0 = vmlaq_lane_f32(tmp_4_0, line1, vget_low_f32(kernel_0_3), 0); float32x4_t tmp = vextq_f32(line1, line1_1, 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_0_3), 1); tmp = vextq_f32(line1, line1_1, 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_0_3), 0); tmp = vextq_f32(line1, line1_1, 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_0_3), 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, line1_1, vget_low_f32(kernel_4_7), 0); tmp = vextq_f32(line1_1, line1_2, 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_4_7), 1); tmp = vextq_f32(line1_1, line1_2, 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_4_7), 0); /* line2 */ tmp_4_0 = vmlaq_lane_f32(tmp_4_0, line2, vget_high_f32(kernel_4_7), 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, line2, vget_low_f32(kernel_0_3), 0); tmp = vextq_f32(line2, line2_1, 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_8_11), 0); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_low_f32(kernel_0_3), 1); tmp = vextq_f32(line2, line2_1, 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_8_11), 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_high_f32(kernel_0_3), 0); tmp = vextq_f32(line2, line2_1, 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_8_11), 0); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_high_f32(kernel_0_3), 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, line2_1, vget_high_f32(kernel_8_11), 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, line2_1, vget_low_f32(kernel_4_7), 0); tmp = vextq_f32(line2_1, line2_2, 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_12_15), 0); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_low_f32(kernel_4_7), 1); tmp = vextq_f32(line2_1, line2_2, 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_12_15), 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_high_f32(kernel_4_7), 0); /* line3 */ tmp_4_0 = vmlaq_lane_f32(tmp_4_0, line3, vget_high_f32(kernel_12_15), 0); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, line3, vget_high_f32(kernel_4_7), 1); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, line3, vget_low_f32(kernel_0_3), 0); tmp = vextq_f32(line3, line3_1, 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_12_15), 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_low_f32(kernel_8_11), 0); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, tmp, vget_low_f32(kernel_0_3), 1); tmp = vextq_f32(line3, line3_1, 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_16_19), 0); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_low_f32(kernel_8_11), 1); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, tmp, vget_high_f32(kernel_0_3), 0); tmp = vextq_f32(line3, line3_1, 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_16_19), 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_high_f32(kernel_8_11), 0); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, tmp, vget_high_f32(kernel_0_3), 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, line3_1, vget_high_f32(kernel_16_19), 0); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, line3_1, vget_high_f32(kernel_8_11), 1); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, line3_1, vget_low_f32(kernel_4_7), 0); tmp = vextq_f32(line3_1, line3_2, 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_16_19), 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_low_f32(kernel_12_15), 0); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, tmp, vget_low_f32(kernel_4_7), 1); tmp = vextq_f32(line3_1, line3_2, 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_20_23), 0); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_low_f32(kernel_12_15), 1); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, tmp, vget_high_f32(kernel_4_7), 0); /* line4 */ tmp_4_0 = vmlaq_lane_f32(tmp_4_0, line4, vget_low_f32(kernel_20_23), 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, line4, vget_high_f32(kernel_12_15), 0); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, line4, vget_high_f32(kernel_4_7), 1); tmp = vextq_f32(line4, line4_1, 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_20_23), 0); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_high_f32(kernel_12_15), 1); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, tmp, vget_low_f32(kernel_8_11), 0); tmp = vextq_f32(line4, line4_1, 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_20_23), 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_low_f32(kernel_16_19), 0); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, tmp, vget_low_f32(kernel_8_11), 1); tmp = vextq_f32(line4, line4_1, 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_24_27), 0); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_low_f32(kernel_16_19), 1); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, tmp, vget_high_f32(kernel_8_11), 0); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, line4_1, vget_low_f32(kernel_24_27), 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, line4_1, vget_high_f32(kernel_16_19), 0); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, line4_1, vget_high_f32(kernel_8_11), 1); tmp = vextq_f32(line4_1, line4_2, 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_24_27), 0); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_high_f32(kernel_16_19), 1); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, tmp, vget_low_f32(kernel_12_15), 0); tmp = vextq_f32(line4_1, line4_2, 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_24_27), 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_low_f32(kernel_20_23), 0); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, tmp, vget_low_f32(kernel_12_15), 1); /* line5 */ tmp_4_0 = vmlaq_lane_f32(tmp_4_0, line5, vget_low_f32(kernel_28_31), 0); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, line5, vget_low_f32(kernel_20_23), 1); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, line5, vget_high_f32(kernel_12_15), 0); tmp = vextq_f32(line5, line5_1, 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_28_31), 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_high_f32(kernel_20_23), 0); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, tmp, vget_high_f32(kernel_12_15), 1); tmp = vextq_f32(line5, line5_1, 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_28_31), 0); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_high_f32(kernel_20_23), 1); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, tmp, vget_low_f32(kernel_16_19), 0); tmp = vextq_f32(line5, line5_1, 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_28_31), 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_low_f32(kernel_24_27), 0); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, tmp, vget_low_f32(kernel_16_19), 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, line5_1, vget_low_f32(kernel_32_35), 0); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, line5_1, vget_low_f32(kernel_24_27), 1); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, line5_1, vget_high_f32(kernel_16_19), 0); tmp = vextq_f32(line5_1, line5_2, 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_32_35), 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_high_f32(kernel_24_27), 0); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, tmp, vget_high_f32(kernel_16_19), 1); tmp = vextq_f32(line5_1, line5_2, 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_32_35), 0); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_high_f32(kernel_24_27), 1); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, tmp, vget_low_f32(kernel_20_23), 0); /* line6 */ tmp_4_0 = vmlaq_lane_f32(tmp_4_0, line6, vget_high_f32(kernel_32_35), 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, line6, vget_low_f32(kernel_28_31), 0); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, line6, vget_low_f32(kernel_20_23), 1); tmp = vextq_f32(line6, line6_1, 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_36_39), 0); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_low_f32(kernel_28_31), 1); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, tmp, vget_high_f32(kernel_20_23), 0); tmp = vextq_f32(line6, line6_1, 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_36_39), 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_high_f32(kernel_28_31), 0); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, tmp, vget_high_f32(kernel_20_23), 1); tmp = vextq_f32(line6, line6_1, 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_36_39), 0); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_high_f32(kernel_28_31), 1); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, tmp, vget_low_f32(kernel_24_27), 0); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, line6_1, vget_high_f32(kernel_36_39), 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, line6_1, vget_low_f32(kernel_32_35), 0); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, line6_1, vget_low_f32(kernel_24_27), 1); tmp = vextq_f32(line6_1, line6_2, 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_40_43), 0); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_low_f32(kernel_32_35), 1); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, tmp, vget_high_f32(kernel_24_27), 0); tmp = vextq_f32(line6_1, line6_2, 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_40_43), 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_high_f32(kernel_32_35), 0); tmp_4_2 = vmlaq_lane_f32(tmp_4_2, tmp, vget_high_f32(kernel_24_27), 1); tmp_4_0 = vector_activation(tmp_4_0, activation); vst1q_f32(output_buf, tmp_4_0); output_buf += 4; tmp_4_1 = vector_activation(tmp_4_1, activation); vst1q_f32(output_buf_1, tmp_4_1); output_buf_1 += 4; tmp_4_2 = vector_activation(tmp_4_2, activation); vst1q_f32(output_buf_2, tmp_4_2); output_buf_2 += 4; line1 = line1_1; line2 = line2_1; line3 = line3_1; line4 = line4_1; line5 = line5_1; line6 = line6_1; line1_1 = line1_2; line2_1 = line2_2; line3_1 = line3_2; line4_1 = line4_2; line5_1 = line5_2; line6_1 = line6_2; } line1_2 = vld1q_f32(input_1 + 8 + 4 * w); line2_2 = vld1q_f32(input_2 + 8 + 4 * w); line3_2 = vld1q_f32(input_3 + 8 + 4 * w); line4_2 = vld1q_f32(input_4 + 8 + 4 * w); line5_2 = vld1q_f32(input_5 + 8 + 4 * w); line6_2 = vld1q_f32(input_6 + 8 + 4 * w); for (w = mid_block * 4; w < mid_w; w++) { float32x4_t tmp_4_0 = vmulq_f32(line1, kernel_0_3); tmp_4_0 = vmlaq_f32(tmp_4_0, line2, kernel_7_10); tmp_4_0 = vmlaq_f32(tmp_4_0, line3, kernel_14_17); tmp_4_0 = vmlaq_f32(tmp_4_0, line4, kernel_21_24); tmp_4_0 = vmlaq_f32(tmp_4_0, line5, kernel_28_31); tmp_4_0 = vmlaq_f32(tmp_4_0, line6, kernel_35_38); float32x4_t tmp_4_1 = vmulq_f32(line2, kernel_0_3); tmp_4_1 = vmlaq_f32(tmp_4_1, line3, kernel_7_10); tmp_4_1 = vmlaq_f32(tmp_4_1, line4, kernel_14_17); tmp_4_1 = vmlaq_f32(tmp_4_1, line5, kernel_21_24); tmp_4_1 = vmlaq_f32(tmp_4_1, line6, kernel_28_31); float32x4_t tmp_4_2 = vmulq_f32(line3, kernel_0_3); tmp_4_2 = vmlaq_f32(tmp_4_2, line4, kernel_7_10); tmp_4_2 = vmlaq_f32(tmp_4_2, line5, kernel_14_17); tmp_4_2 = vmlaq_f32(tmp_4_2, line6, kernel_21_24); float32x4_t tmp = vextq_f32(zero, line1_1, 3); tmp_4_0 = vmlaq_f32(tmp_4_0, tmp, kernel_3_6); tmp = vextq_f32(zero, line2_1, 3); tmp_4_0 = vmlaq_f32(tmp_4_0, tmp, kernel_10_13); tmp_4_1 = vmlaq_f32(tmp_4_1, tmp, kernel_3_6); tmp = vextq_f32(zero, line3_1, 3); tmp_4_0 = vmlaq_f32(tmp_4_0, tmp, kernel_17_20); tmp_4_1 = vmlaq_f32(tmp_4_1, tmp, kernel_10_13); tmp_4_2 = vmlaq_f32(tmp_4_2, tmp, kernel_3_6); tmp = vextq_f32(zero, line4_1, 3); tmp_4_0 = vmlaq_f32(tmp_4_0, tmp, kernel_24_27); tmp_4_1 = vmlaq_f32(tmp_4_1, tmp, kernel_17_20); tmp_4_2 = vmlaq_f32(tmp_4_2, tmp, kernel_10_13); tmp = vextq_f32(zero, line5_1, 3); tmp_4_0 = vmlaq_f32(tmp_4_0, tmp, kernel_31_34); tmp_4_1 = vmlaq_f32(tmp_4_1, tmp, kernel_24_27); tmp_4_2 = vmlaq_f32(tmp_4_2, tmp, kernel_17_20); tmp = vextq_f32(zero, line6_1, 3); tmp_4_0 = vmlaq_f32(tmp_4_0, tmp, kernel_38_41); tmp_4_1 = vmlaq_f32(tmp_4_1, tmp, kernel_31_34); tmp_4_2 = vmlaq_f32(tmp_4_2, tmp, kernel_24_27); tmp0 = vgetq_lane_f32(tmp_4_0, 0) + vgetq_lane_f32(tmp_4_0, 1) + vgetq_lane_f32(tmp_4_0, 2) + vgetq_lane_f32(tmp_4_0, 3) + bias_c; *output_buf++ = elem_activation(tmp0, activation); tmp1 = vgetq_lane_f32(tmp_4_1, 0) + vgetq_lane_f32(tmp_4_1, 1) + vgetq_lane_f32(tmp_4_1, 2) + vgetq_lane_f32(tmp_4_1, 3) + bias_c; *output_buf_1++ = elem_activation(tmp1, activation); tmp2 = vgetq_lane_f32(tmp_4_2, 0) + vgetq_lane_f32(tmp_4_2, 1) + vgetq_lane_f32(tmp_4_2, 2) + vgetq_lane_f32(tmp_4_2, 3) + bias_c; *output_buf_2++ = elem_activation(tmp2, activation); line1 = vextq_f32(line1, line1_1, 1); line2 = vextq_f32(line2, line2_1, 1); line3 = vextq_f32(line3, line3_1, 1); line4 = vextq_f32(line4, line4_1, 1); line5 = vextq_f32(line5, line5_1, 1); line6 = vextq_f32(line6, line6_1, 1); line1_1 = vextq_f32(line1_1, line1_2, 1); line2_1 = vextq_f32(line2_1, line2_2, 1); line3_1 = vextq_f32(line3_1, line3_2, 1); line4_1 = vextq_f32(line4_1, line4_2, 1); line5_1 = vextq_f32(line5_1, line5_2, 1); line6_1 = vextq_f32(line6_1, line6_2, 1); } /* bottom end1 */ { float32x4_t tmp_4_0 = vmulq_f32(line1, kernel_0_3); tmp_4_0 = vmlaq_f32(tmp_4_0, line2, kernel_7_10); tmp_4_0 = vmlaq_f32(tmp_4_0, line3, kernel_14_17); tmp_4_0 = vmlaq_f32(tmp_4_0, line4, kernel_21_24); tmp_4_0 = vmlaq_f32(tmp_4_0, line5, kernel_28_31); tmp_4_0 = vmlaq_f32(tmp_4_0, line6, kernel_35_38); float32x2_t tmp_2_0 = vadd_f32(vget_low_f32(tmp_4_0), vget_high_f32(tmp_4_0)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line1_1), vget_high_f32(kernel_2_5)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line2_1), vget_high_f32(kernel_9_12)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line3_1), vget_high_f32(kernel_16_19)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line4_1), vget_high_f32(kernel_23_26)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line5_1), vget_high_f32(kernel_30_33)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line6_1), vget_high_f32(kernel_37_40)); tmp0 = vget_lane_f32(tmp_2_0, 0) + vget_lane_f32(tmp_2_0, 1) + bias_c; *output_buf++ = elem_activation(tmp0, activation); float32x4_t tmp_4_1 = vmulq_f32(line2, kernel_0_3); tmp_4_1 = vmlaq_f32(tmp_4_1, line3, kernel_7_10); tmp_4_1 = vmlaq_f32(tmp_4_1, line4, kernel_14_17); tmp_4_1 = vmlaq_f32(tmp_4_1, line5, kernel_21_24); tmp_4_1 = vmlaq_f32(tmp_4_1, line6, kernel_28_31); float32x2_t tmp_2_1 = vadd_f32(vget_low_f32(tmp_4_1), vget_high_f32(tmp_4_1)); tmp_2_1 = vmla_f32(tmp_2_1, vget_low_f32(line2_1), vget_high_f32(kernel_2_5)); tmp_2_1 = vmla_f32(tmp_2_1, vget_low_f32(line3_1), vget_high_f32(kernel_9_12)); tmp_2_1 = vmla_f32(tmp_2_1, vget_low_f32(line4_1), vget_high_f32(kernel_16_19)); tmp_2_1 = vmla_f32(tmp_2_1, vget_low_f32(line5_1), vget_high_f32(kernel_23_26)); tmp_2_1 = vmla_f32(tmp_2_1, vget_low_f32(line6_1), vget_high_f32(kernel_30_33)); tmp1 = vget_lane_f32(tmp_2_1, 0) + vget_lane_f32(tmp_2_1, 1) + bias_c; *output_buf_1++ = elem_activation(tmp1, activation); float32x4_t tmp_4_2 = vmulq_f32(line3, kernel_0_3); tmp_4_2 = vmlaq_f32(tmp_4_2, line4, kernel_7_10); tmp_4_2 = vmlaq_f32(tmp_4_2, line5, kernel_14_17); tmp_4_2 = vmlaq_f32(tmp_4_2, line6, kernel_21_24); float32x2_t tmp_2_2 = vadd_f32(vget_low_f32(tmp_4_2), vget_high_f32(tmp_4_2)); tmp_2_2 = vmla_f32(tmp_2_2, vget_low_f32(line3_1), vget_high_f32(kernel_2_5)); tmp_2_2 = vmla_f32(tmp_2_2, vget_low_f32(line4_1), vget_high_f32(kernel_9_12)); tmp_2_2 = vmla_f32(tmp_2_2, vget_low_f32(line5_1), vget_high_f32(kernel_16_19)); tmp_2_2 = vmla_f32(tmp_2_2, vget_low_f32(line6_1), vget_high_f32(kernel_23_26)); tmp2 = vget_lane_f32(tmp_2_2, 0) + vget_lane_f32(tmp_2_2, 1) + bias_c; *output_buf_2++ = elem_activation(tmp2, activation); line1 = vextq_f32(line1, line1_1, 1); line2 = vextq_f32(line2, line2_1, 1); line3 = vextq_f32(line3, line3_1, 1); line4 = vextq_f32(line4, line4_1, 1); line5 = vextq_f32(line5, line5_1, 1); line6 = vextq_f32(line6, line6_1, 1); line1_1 = vextq_f32(line1_1, line1_1, 1); line2_1 = vextq_f32(line2_1, line2_1, 1); line3_1 = vextq_f32(line3_1, line3_1, 1); line4_1 = vextq_f32(line4_1, line4_1, 1); line5_1 = vextq_f32(line5_1, line5_1, 1); line6_1 = vextq_f32(line6_1, line6_1, 1); } /* bottom end2 */ { float32x4_t tmp_4_0 = vmulq_f32(line1, kernel_0_3); tmp_4_0 = vmlaq_f32(tmp_4_0, line2, kernel_7_10); tmp_4_0 = vmlaq_f32(tmp_4_0, line3, kernel_14_17); tmp_4_0 = vmlaq_f32(tmp_4_0, line4, kernel_21_24); tmp_4_0 = vmlaq_f32(tmp_4_0, line5, kernel_28_31); tmp_4_0 = vmlaq_f32(tmp_4_0, line6, kernel_35_38); tmp0 = vgetq_lane_f32(tmp_4_0, 0) + vgetq_lane_f32(tmp_4_0, 1) + vgetq_lane_f32(tmp_4_0, 2) + vgetq_lane_f32(tmp_4_0, 3) + bias_c; tmp0 += vgetq_lane_f32(line1_1, 0) * weight_buf[4]; tmp0 += vgetq_lane_f32(line2_1, 0) * weight_buf[11]; tmp0 += vgetq_lane_f32(line3_1, 0) * weight_buf[18]; tmp0 += vgetq_lane_f32(line4_1, 0) * weight_buf[25]; tmp0 += vgetq_lane_f32(line5_1, 0) * weight_buf[32]; tmp0 += vgetq_lane_f32(line6_1, 0) * weight_buf[39]; *output_buf++ = elem_activation(tmp0, activation); float32x4_t tmp_4_1 = vmulq_f32(line2, kernel_0_3); tmp_4_1 = vmlaq_f32(tmp_4_1, line3, kernel_7_10); tmp_4_1 = vmlaq_f32(tmp_4_1, line4, kernel_14_17); tmp_4_1 = vmlaq_f32(tmp_4_1, line5, kernel_21_24); tmp_4_1 = vmlaq_f32(tmp_4_1, line6, kernel_28_31); tmp1 = vgetq_lane_f32(tmp_4_1, 0) + vgetq_lane_f32(tmp_4_1, 1) + vgetq_lane_f32(tmp_4_1, 2) + vgetq_lane_f32(tmp_4_1, 3) + bias_c; tmp1 += vgetq_lane_f32(line2_1, 0) * weight_buf[4]; tmp1 += vgetq_lane_f32(line3_1, 0) * weight_buf[11]; tmp1 += vgetq_lane_f32(line4_1, 0) * weight_buf[18]; tmp1 += vgetq_lane_f32(line5_1, 0) * weight_buf[25]; tmp1 += vgetq_lane_f32(line6_1, 0) * weight_buf[32]; *output_buf_1++ = elem_activation(tmp1, activation); float32x4_t tmp_4_2 = vmulq_f32(line3, kernel_0_3); tmp_4_2 = vmlaq_f32(tmp_4_2, line4, kernel_7_10); tmp_4_2 = vmlaq_f32(tmp_4_2, line5, kernel_14_17); tmp_4_2 = vmlaq_f32(tmp_4_2, line6, kernel_21_24); tmp2 = vgetq_lane_f32(tmp_4_2, 0) + vgetq_lane_f32(tmp_4_2, 1) + vgetq_lane_f32(tmp_4_2, 2) + vgetq_lane_f32(tmp_4_2, 3) + bias_c; tmp2 += vgetq_lane_f32(line3_1, 0) * weight_buf[4]; tmp2 += vgetq_lane_f32(line4_1, 0) * weight_buf[11]; tmp2 += vgetq_lane_f32(line5_1, 0) * weight_buf[18]; tmp2 += vgetq_lane_f32(line6_1, 0) * weight_buf[25]; *output_buf_2++ = elem_activation(tmp2, activation); line1 = vextq_f32(line1, line1_1, 1); line2 = vextq_f32(line2, line2_1, 1); line3 = vextq_f32(line3, line3_1, 1); line4 = vextq_f32(line4, line4_1, 1); line5 = vextq_f32(line5, line5_1, 1); line6 = vextq_f32(line6, line6_1, 1); } /* bottom end3 */ { float32x4_t tmp_4_0 = vmulq_f32(line1, kernel_0_3); tmp_4_0 = vmlaq_f32(tmp_4_0, line2, kernel_7_10); tmp_4_0 = vmlaq_f32(tmp_4_0, line3, kernel_14_17); tmp_4_0 = vmlaq_f32(tmp_4_0, line4, kernel_21_24); tmp_4_0 = vmlaq_f32(tmp_4_0, line5, kernel_28_31); tmp_4_0 = vmlaq_f32(tmp_4_0, line6, kernel_35_38); tmp0 = vgetq_lane_f32(tmp_4_0, 0) + vgetq_lane_f32(tmp_4_0, 1) + vgetq_lane_f32(tmp_4_0, 2) + vgetq_lane_f32(tmp_4_0, 3) + bias_c; *output_buf++ = elem_activation(tmp0, activation); float32x4_t tmp_4_1 = vmulq_f32(line2, kernel_0_3); tmp_4_1 = vmlaq_f32(tmp_4_1, line3, kernel_7_10); tmp_4_1 = vmlaq_f32(tmp_4_1, line4, kernel_14_17); tmp_4_1 = vmlaq_f32(tmp_4_1, line5, kernel_21_24); tmp_4_1 = vmlaq_f32(tmp_4_1, line6, kernel_28_31); tmp1 = vgetq_lane_f32(tmp_4_1, 0) + vgetq_lane_f32(tmp_4_1, 1) + vgetq_lane_f32(tmp_4_1, 2) + vgetq_lane_f32(tmp_4_1, 3) + bias_c; *output_buf_1++ = elem_activation(tmp1, activation); float32x4_t tmp_4_2 = vmulq_f32(line3, kernel_0_3); tmp_4_2 = vmlaq_f32(tmp_4_2, line4, kernel_7_10); tmp_4_2 = vmlaq_f32(tmp_4_2, line5, kernel_14_17); tmp_4_2 = vmlaq_f32(tmp_4_2, line6, kernel_21_24); tmp2 = vgetq_lane_f32(tmp_4_2, 0) + vgetq_lane_f32(tmp_4_2, 1) + vgetq_lane_f32(tmp_4_2, 2) + vgetq_lane_f32(tmp_4_2, 3) + bias_c; *output_buf_2++ = elem_activation(tmp2, activation); } } } void depthwise_conv_k7s2(float* input, float* weight, float* bias, float* output, int input_h, int input_w, int channel, int output_h, int output_w, int activation, int num_thread) { int input_hw = input_h * input_w; int output_hw = output_h * output_w; int mid_w = output_w - 3; int mid_h = output_h - 3; int remain_h = input_h & 0x1; int remain_w = input_w & 0x1; if (remain_h) mid_h--; if (remain_w) mid_w--; int mid_block = mid_w >> 2; int w = 0; //#pragma omp parallel for num_threads(num_thread) for (int c = 0; c < channel; c++) { float tmp0, tmp1; float* output_buf = output + c * output_hw; float* output_buf_1 = output_buf + output_w; float* weight_buf = weight + c * 49; float bias_c = bias ? bias[c] : 0; float* input_1 = input + c * input_hw; float* input_2 = input_1 + input_w; float* input_3 = input_2 + input_w; float* input_4 = input_3 + input_w; float* input_5 = input_4 + input_w; float* input_6 = input_5 + input_w; float32x4_t kernel_0_3 = vld1q_f32(weight_buf); float32x4_t kernel_4_7 = vld1q_f32(weight_buf + 4); float32x4_t kernel_8_11 = vld1q_f32(weight_buf + 8); float32x4_t kernel_12_15 = vld1q_f32(weight_buf + 12); float32x4_t kernel_16_19 = vld1q_f32(weight_buf + 16); float32x4_t kernel_20_23 = vld1q_f32(weight_buf + 20); float32x4_t kernel_24_27 = vld1q_f32(weight_buf + 24); float32x4_t kernel_28_31 = vld1q_f32(weight_buf + 28); float32x4_t kernel_32_35 = vld1q_f32(weight_buf + 32); float32x4_t kernel_36_39 = vld1q_f32(weight_buf + 36); float32x4_t kernel_40_43 = vld1q_f32(weight_buf + 40); float32x4_t kernel_44_47 = vld1q_f32(weight_buf + 44); float32x4_t kernel_48_51 = vld1q_f32(weight_buf + 48); float32x4_t line1 = vld1q_f32(input_1); float32x4_t line2 = vld1q_f32(input_2); float32x4_t line3 = vld1q_f32(input_3); float32x4_t line4 = vld1q_f32(input_4); float32x4_t line5 = vld1q_f32(input_5); float32x4_t line6 = vld1q_f32(input_6); float32x4_t kernel_10_13 = vextq_f32(kernel_8_11, kernel_12_15, 2); float32x4_t kernel_17_20 = vextq_f32(kernel_16_19, kernel_20_23, 1); float32x4_t kernel_31_34 = vextq_f32(kernel_28_31, kernel_32_35, 3); float32x4_t kernel_38_41 = vextq_f32(kernel_36_39, kernel_40_43, 2); float32x4_t kernel_45_48 = vextq_f32(kernel_44_47, kernel_48_51, 1); /* top left1 */ { float32x4_t tmp_4_0 = vmulq_f32(line1, kernel_24_27); tmp_4_0 = vmlaq_f32(tmp_4_0, line2, kernel_31_34); tmp_4_0 = vmlaq_f32(tmp_4_0, line3, kernel_38_41); tmp_4_0 = vmlaq_f32(tmp_4_0, line4, kernel_45_48); tmp0 = vgetq_lane_f32(tmp_4_0, 0) + vgetq_lane_f32(tmp_4_0, 1) + vgetq_lane_f32(tmp_4_0, 2) + vgetq_lane_f32(tmp_4_0, 3) + bias_c; *output_buf++ = elem_activation(tmp0, activation); float32x4_t tmp_4_1 = vmulq_f32(line1, kernel_10_13); tmp_4_1 = vmlaq_f32(tmp_4_1, line2, kernel_17_20); tmp_4_1 = vmlaq_f32(tmp_4_1, line3, kernel_24_27); tmp_4_1 = vmlaq_f32(tmp_4_1, line4, kernel_31_34); tmp_4_1 = vmlaq_f32(tmp_4_1, line5, kernel_38_41); tmp_4_1 = vmlaq_f32(tmp_4_1, line6, kernel_45_48); tmp1 = vgetq_lane_f32(tmp_4_1, 0) + vgetq_lane_f32(tmp_4_1, 1) + vgetq_lane_f32(tmp_4_1, 2) + vgetq_lane_f32(tmp_4_1, 3) + bias_c; *output_buf_1++ = elem_activation(tmp1, activation); } float32x4_t line1_1 = vld1q_f32(input_1 + 4); float32x4_t line2_1 = vld1q_f32(input_2 + 4); float32x4_t line3_1 = vld1q_f32(input_3 + 4); float32x4_t line4_1 = vld1q_f32(input_4 + 4); float32x4_t line5_1 = vld1q_f32(input_5 + 4); float32x4_t line6_1 = vld1q_f32(input_6 + 4); float32x4_t kernel_15_18 = vextq_f32(kernel_12_15, kernel_16_19, 3); float32x4_t kernel_22_25 = vextq_f32(kernel_20_23, kernel_24_27, 2); float32x4_t kernel_29_32 = vextq_f32(kernel_28_31, kernel_32_35, 1); float32x4_t kernel_43_46 = vextq_f32(kernel_40_43, kernel_44_47, 3); /* top left2 */ { float32x4_t tmp_4_0 = vmulq_f32(line1, kernel_22_25); tmp_4_0 = vmlaq_f32(tmp_4_0, line2, kernel_29_32); tmp_4_0 = vmlaq_f32(tmp_4_0, line3, kernel_36_39); tmp_4_0 = vmlaq_f32(tmp_4_0, line4, kernel_43_46); float32x2_t tmp_2_0 = vadd_f32(vget_low_f32(tmp_4_0), vget_high_f32(tmp_4_0)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line1_1), vget_high_f32(kernel_24_27)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line2_1), vget_high_f32(kernel_31_34)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line3_1), vget_high_f32(kernel_38_41)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line4_1), vget_high_f32(kernel_45_48)); tmp0 = vget_lane_f32(tmp_2_0, 0) + vget_lane_f32(tmp_2_0, 1) + bias_c; *output_buf++ = elem_activation(tmp0, activation); float32x4_t tmp_4_1 = vmulq_f32(line1, kernel_8_11); tmp_4_1 = vmlaq_f32(tmp_4_1, line2, kernel_15_18); tmp_4_1 = vmlaq_f32(tmp_4_1, line3, kernel_22_25); tmp_4_1 = vmlaq_f32(tmp_4_1, line4, kernel_29_32); tmp_4_1 = vmlaq_f32(tmp_4_1, line5, kernel_36_39); tmp_4_1 = vmlaq_f32(tmp_4_1, line6, kernel_43_46); float32x2_t tmp_2_1 = vadd_f32(vget_low_f32(tmp_4_1), vget_high_f32(tmp_4_1)); tmp_2_1 = vmla_f32(tmp_2_1, vget_low_f32(line1_1), vget_high_f32(kernel_10_13)); tmp_2_1 = vmla_f32(tmp_2_1, vget_low_f32(line2_1), vget_high_f32(kernel_17_20)); tmp_2_1 = vmla_f32(tmp_2_1, vget_low_f32(line3_1), vget_high_f32(kernel_24_27)); tmp_2_1 = vmla_f32(tmp_2_1, vget_low_f32(line4_1), vget_high_f32(kernel_31_34)); tmp_2_1 = vmla_f32(tmp_2_1, vget_low_f32(line5_1), vget_high_f32(kernel_38_41)); tmp_2_1 = vmla_f32(tmp_2_1, vget_low_f32(line6_1), vget_high_f32(kernel_45_48)); tmp1 = vget_lane_f32(tmp_2_1, 0) + vget_lane_f32(tmp_2_1, 1) + bias_c; *output_buf_1++ = elem_activation(tmp1, activation); } /* top mid */ float32x4x2_t line_1_01 = vuzpq_f32(line1, line1_1); float32x4x2_t line_2_01 = vuzpq_f32(line2, line2_1); float32x4x2_t line_3_01 = vuzpq_f32(line3, line3_1); float32x4x2_t line_4_01 = vuzpq_f32(line4, line4_1); float32x4x2_t line_5_01 = vuzpq_f32(line5, line5_1); float32x4x2_t line_6_01 = vuzpq_f32(line6, line6_1); for (w = 0; w < mid_block; w++) { float32x4x2_t line_1_23 = vld2q_f32(input_1 + 8 + 8 * w); float32x4x2_t line_2_23 = vld2q_f32(input_2 + 8 + 8 * w); float32x4x2_t line_3_23 = vld2q_f32(input_3 + 8 + 8 * w); float32x4x2_t line_4_23 = vld2q_f32(input_4 + 8 + 8 * w); float32x4x2_t line_5_23 = vld2q_f32(input_5 + 8 + 8 * w); float32x4x2_t line_6_23 = vld2q_f32(input_6 + 8 + 8 * w); float32x4_t tmp_4_0 = vdupq_n_f32(bias_c); float32x4_t tmp_4_1 = vdupq_n_f32(bias_c); /* line1 */ tmp_4_0 = vmlaq_lane_f32(tmp_4_0, line_1_01.val[1], vget_low_f32(kernel_20_23), 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, line_1_01.val[1], vget_high_f32(kernel_4_7), 1); float32x4_t tmp = vextq_f32(line_1_01.val[0], line_1_23.val[0], 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_20_23), 0); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_low_f32(kernel_8_11), 0); tmp = vextq_f32(line_1_01.val[1], line_1_23.val[1], 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_20_23), 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_low_f32(kernel_8_11), 1); tmp = vextq_f32(line_1_01.val[0], line_1_23.val[0], 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_24_27), 0); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_high_f32(kernel_8_11), 0); tmp = vextq_f32(line_1_01.val[1], line_1_23.val[1], 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_24_27), 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_high_f32(kernel_8_11), 1); tmp = vextq_f32(line_1_01.val[0], line_1_23.val[0], 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_24_27), 0); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_low_f32(kernel_12_15), 0); tmp = vextq_f32(line_1_01.val[1], line_1_23.val[1], 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_24_27), 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_low_f32(kernel_12_15), 1); /* line2 */ tmp_4_0 = vmlaq_lane_f32(tmp_4_0, line_2_01.val[1], vget_low_f32(kernel_28_31), 0); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, line_2_01.val[1], vget_high_f32(kernel_12_15), 0); tmp = vextq_f32(line_2_01.val[0], line_2_23.val[0], 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_28_31), 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_high_f32(kernel_12_15), 1); tmp = vextq_f32(line_2_01.val[1], line_2_23.val[1], 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_28_31), 0); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_low_f32(kernel_16_19), 0); tmp = vextq_f32(line_2_01.val[0], line_2_23.val[0], 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_28_31), 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_low_f32(kernel_16_19), 1); tmp = vextq_f32(line_2_01.val[1], line_2_23.val[1], 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_32_35), 0); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_high_f32(kernel_16_19), 0); tmp = vextq_f32(line_2_01.val[0], line_2_23.val[0], 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_32_35), 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_high_f32(kernel_16_19), 1); tmp = vextq_f32(line_2_01.val[1], line_2_23.val[1], 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_32_35), 0); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_low_f32(kernel_20_23), 0); /* line3 */ tmp_4_0 = vmlaq_lane_f32(tmp_4_0, line_3_01.val[1], vget_high_f32(kernel_32_35), 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, line_3_01.val[1], vget_low_f32(kernel_20_23), 1); tmp = vextq_f32(line_3_01.val[0], line_3_23.val[0], 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_36_39), 0); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_high_f32(kernel_20_23), 0); tmp = vextq_f32(line_3_01.val[1], line_3_23.val[1], 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_36_39), 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_high_f32(kernel_20_23), 1); tmp = vextq_f32(line_3_01.val[0], line_3_23.val[0], 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_36_39), 0); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_low_f32(kernel_24_27), 0); tmp = vextq_f32(line_3_01.val[1], line_3_23.val[1], 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_36_39), 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_low_f32(kernel_24_27), 1); tmp = vextq_f32(line_3_01.val[0], line_3_23.val[0], 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_40_43), 0); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_high_f32(kernel_24_27), 0); tmp = vextq_f32(line_3_01.val[1], line_3_23.val[1], 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_40_43), 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_high_f32(kernel_24_27), 1); /* line4 */ tmp_4_0 = vmlaq_lane_f32(tmp_4_0, line_4_01.val[1], vget_high_f32(kernel_40_43), 0); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, line_4_01.val[1], vget_low_f32(kernel_28_31), 0); tmp = vextq_f32(line_4_01.val[0], line_4_23.val[0], 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_40_43), 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_low_f32(kernel_28_31), 1); tmp = vextq_f32(line_4_01.val[1], line_4_23.val[1], 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_44_47), 0); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_high_f32(kernel_28_31), 0); tmp = vextq_f32(line_4_01.val[0], line_4_23.val[0], 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_44_47), 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_high_f32(kernel_28_31), 1); tmp = vextq_f32(line_4_01.val[1], line_4_23.val[1], 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_44_47), 0); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_low_f32(kernel_32_35), 0); tmp = vextq_f32(line_4_01.val[0], line_4_23.val[0], 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_44_47), 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_low_f32(kernel_32_35), 1); tmp = vextq_f32(line_4_01.val[1], line_4_23.val[1], 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_48_51), 0); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_high_f32(kernel_32_35), 0); /* line5 */ tmp_4_1 = vmlaq_lane_f32(tmp_4_1, line_5_01.val[1], vget_high_f32(kernel_32_35), 1); tmp = vextq_f32(line_5_01.val[0], line_5_23.val[0], 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_low_f32(kernel_36_39), 0); tmp = vextq_f32(line_5_01.val[1], line_5_23.val[1], 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_low_f32(kernel_36_39), 1); tmp = vextq_f32(line_5_01.val[0], line_5_23.val[0], 2); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_high_f32(kernel_36_39), 0); tmp = vextq_f32(line_5_01.val[1], line_5_23.val[1], 2); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_high_f32(kernel_36_39), 1); tmp = vextq_f32(line_5_01.val[0], line_5_23.val[0], 3); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_low_f32(kernel_40_43), 0); tmp = vextq_f32(line_5_01.val[1], line_5_23.val[1], 3); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_low_f32(kernel_40_43), 1); /* line6 */ tmp_4_1 = vmlaq_lane_f32(tmp_4_1, line_6_01.val[1], vget_high_f32(kernel_40_43), 0); tmp = vextq_f32(line_6_01.val[0], line_6_23.val[0], 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_high_f32(kernel_40_43), 1); tmp = vextq_f32(line_6_01.val[1], line_6_23.val[1], 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_low_f32(kernel_44_47), 0); tmp = vextq_f32(line_6_01.val[0], line_6_23.val[0], 2); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_low_f32(kernel_44_47), 1); tmp = vextq_f32(line_6_01.val[1], line_6_23.val[1], 2); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_high_f32(kernel_44_47), 0); tmp = vextq_f32(line_6_01.val[0], line_6_23.val[0], 3); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_high_f32(kernel_44_47), 1); tmp = vextq_f32(line_6_01.val[1], line_6_23.val[1], 3); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_low_f32(kernel_48_51), 0); tmp_4_0 = vector_activation(tmp_4_0, activation); tmp_4_1 = vector_activation(tmp_4_1, activation); vst1q_f32(output_buf, tmp_4_0); vst1q_f32(output_buf_1, tmp_4_1); output_buf += 4; output_buf_1 += 4; line_1_01 = line_1_23; line_2_01 = line_2_23; line_3_01 = line_3_23; line_4_01 = line_4_23; line_5_01 = line_5_23; line_6_01 = line_6_23; } line_1_01 = vzipq_f32(line_1_01.val[0], line_1_01.val[1]); line_2_01 = vzipq_f32(line_2_01.val[0], line_2_01.val[1]); line_3_01 = vzipq_f32(line_3_01.val[0], line_3_01.val[1]); line_4_01 = vzipq_f32(line_4_01.val[0], line_4_01.val[1]); line_5_01 = vzipq_f32(line_5_01.val[0], line_5_01.val[1]); line_6_01 = vzipq_f32(line_6_01.val[0], line_6_01.val[1]); line1 = line_1_01.val[0]; line1_1 = line_1_01.val[1]; line2 = line_2_01.val[0]; line2_1 = line_2_01.val[1]; line3 = line_3_01.val[0]; line3_1 = line_3_01.val[1]; line4 = line_4_01.val[0]; line4_1 = line_4_01.val[1]; line5 = line_5_01.val[0]; line5_1 = line_5_01.val[1]; line6 = line_6_01.val[0]; line6_1 = line_6_01.val[1]; float32x4_t kernel_7_10 = vextq_f32(kernel_4_7, kernel_8_11, 3); float32x4_t kernel_14_17 = vextq_f32(kernel_12_15, kernel_16_19, 2); float32x4_t kernel_21_24 = vextq_f32(kernel_20_23, kernel_24_27, 1); float32x4_t kernel_35_38 = vextq_f32(kernel_32_35, kernel_36_39, 3); float32x4_t kernel_42_45 = vextq_f32(kernel_40_43, kernel_44_47, 2); float32x4_t zero = vdupq_n_f32(0.0); float32x4_t kernel_0789 = vextq_f32(zero, kernel_7_10, 3); float32x4_t kernel_0141516 = vextq_f32(zero, kernel_14_17, 3); float32x4_t kernel_0212223 = vextq_f32(zero, kernel_21_24, 3); float32x4_t kernel_0282930 = vextq_f32(zero, kernel_28_31, 3); float32x4_t kernel_0353637 = vextq_f32(zero, kernel_35_38, 3); float32x4_t kernel_0424344 = vextq_f32(zero, kernel_42_45, 3); for (w = mid_block * 4; w < mid_w; w++) { float32x4_t line1_2 = vld1q_f32(input_1 + 8 + 2 * w); float32x4_t line2_2 = vld1q_f32(input_2 + 8 + 2 * w); float32x4_t line3_2 = vld1q_f32(input_3 + 8 + 2 * w); float32x4_t line4_2 = vld1q_f32(input_4 + 8 + 2 * w); float32x4_t line5_2 = vld1q_f32(input_5 + 8 + 2 * w); float32x4_t line6_2 = vld1q_f32(input_6 + 8 + 2 * w); float32x4_t tmp_4_0 = vmulq_f32(line1, kernel_0212223); tmp_4_0 = vmlaq_f32(tmp_4_0, line2, kernel_0282930); tmp_4_0 = vmlaq_f32(tmp_4_0, line3, kernel_0353637); tmp_4_0 = vmlaq_f32(tmp_4_0, line4, kernel_0424344); tmp_4_0 = vmlaq_f32(tmp_4_0, line1_1, kernel_24_27); tmp_4_0 = vmlaq_f32(tmp_4_0, line2_1, kernel_31_34); tmp_4_0 = vmlaq_f32(tmp_4_0, line3_1, kernel_38_41); tmp_4_0 = vmlaq_f32(tmp_4_0, line4_1, kernel_45_48); tmp0 = vgetq_lane_f32(tmp_4_0, 0) + vgetq_lane_f32(tmp_4_0, 1) + vgetq_lane_f32(tmp_4_0, 2) + vgetq_lane_f32(tmp_4_0, 3) + bias_c; *output_buf++ = elem_activation(tmp0, activation); float32x4_t tmp_4_1 = vmulq_f32(line1, kernel_0789); tmp_4_1 = vmlaq_f32(tmp_4_1, line2, kernel_0141516); tmp_4_1 = vmlaq_f32(tmp_4_1, line3, kernel_0212223); tmp_4_1 = vmlaq_f32(tmp_4_1, line4, kernel_0282930); tmp_4_1 = vmlaq_f32(tmp_4_1, line5, kernel_0353637); tmp_4_1 = vmlaq_f32(tmp_4_1, line6, kernel_0424344); tmp_4_1 = vmlaq_f32(tmp_4_1, line1_1, kernel_10_13); tmp_4_1 = vmlaq_f32(tmp_4_1, line2_1, kernel_17_20); tmp_4_1 = vmlaq_f32(tmp_4_1, line3_1, kernel_24_27); tmp_4_1 = vmlaq_f32(tmp_4_1, line4_1, kernel_31_34); tmp_4_1 = vmlaq_f32(tmp_4_1, line5_1, kernel_38_41); tmp_4_1 = vmlaq_f32(tmp_4_1, line6_1, kernel_45_48); tmp1 = vgetq_lane_f32(tmp_4_1, 0) + vgetq_lane_f32(tmp_4_1, 1) + vgetq_lane_f32(tmp_4_1, 2) + vgetq_lane_f32(tmp_4_1, 3) + bias_c; *output_buf_1++ = elem_activation(tmp1, activation); line1 = vextq_f32(line1, line1_1, 2); line2 = vextq_f32(line2, line2_1, 2); line3 = vextq_f32(line3, line3_1, 2); line4 = vextq_f32(line4, line4_1, 2); line5 = vextq_f32(line5, line5_1, 2); line6 = vextq_f32(line6, line6_1, 2); line1_1 = vextq_f32(line1_1, line1_2, 2); line2_1 = vextq_f32(line2_1, line2_2, 2); line3_1 = vextq_f32(line3_1, line3_2, 2); line4_1 = vextq_f32(line4_1, line4_2, 2); line5_1 = vextq_f32(line5_1, line5_2, 2); line6_1 = vextq_f32(line6_1, line6_2, 2); } /* top right */ if (remain_w) { float32x4_t kernel_9_12 = vextq_f32(kernel_8_11, kernel_12_15, 1); float32x4_t kernel_23_26 = vextq_f32(kernel_20_23, kernel_24_27, 3); float32x4_t kernel_30_33 = vextq_f32(kernel_28_31, kernel_32_35, 2); float32x4_t kernel_37_40 = vextq_f32(kernel_36_39, kernel_40_43, 1); line1 = vextq_f32(line1, line1_1, 1); line2 = vextq_f32(line2, line2_1, 1); line3 = vextq_f32(line3, line3_1, 1); line4 = vextq_f32(line4, line4_1, 1); line5 = vextq_f32(line5, line5_1, 1); line6 = vextq_f32(line6, line6_1, 1); line1_1 = vextq_f32(line1_1, line1_1, 1); line2_1 = vextq_f32(line2_1, line2_1, 1); line3_1 = vextq_f32(line3_1, line3_1, 1); line4_1 = vextq_f32(line4_1, line4_1, 1); line5_1 = vextq_f32(line5_1, line5_1, 1); line6_1 = vextq_f32(line6_1, line6_1, 1); { float32x4_t tmp_4_0 = vmulq_f32(line1, kernel_21_24); tmp_4_0 = vmlaq_f32(tmp_4_0, line2, kernel_28_31); tmp_4_0 = vmlaq_f32(tmp_4_0, line3, kernel_35_38); tmp_4_0 = vmlaq_f32(tmp_4_0, line4, kernel_42_45); float32x2_t tmp_2_0 = vadd_f32(vget_low_f32(tmp_4_0), vget_high_f32(tmp_4_0)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line1_1), vget_high_f32(kernel_23_26)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line2_1), vget_high_f32(kernel_30_33)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line3_1), vget_high_f32(kernel_37_40)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line4_1), vget_high_f32(kernel_44_47)); tmp0 = vget_lane_f32(tmp_2_0, 0) + vget_lane_f32(tmp_2_0, 1) + bias_c; *output_buf++ = elem_activation(tmp0, activation); float32x4_t tmp_4_1 = vmulq_f32(line1, kernel_7_10); tmp_4_1 = vmlaq_f32(tmp_4_1, line2, kernel_14_17); tmp_4_1 = vmlaq_f32(tmp_4_1, line3, kernel_21_24); tmp_4_1 = vmlaq_f32(tmp_4_1, line4, kernel_28_31); tmp_4_1 = vmlaq_f32(tmp_4_1, line5, kernel_35_38); tmp_4_1 = vmlaq_f32(tmp_4_1, line6, kernel_42_45); float32x2_t tmp_2_1 = vadd_f32(vget_low_f32(tmp_4_1), vget_high_f32(tmp_4_1)); tmp_2_1 = vmla_f32(tmp_2_1, vget_low_f32(line1_1), vget_high_f32(kernel_9_12)); tmp_2_1 = vmla_f32(tmp_2_1, vget_low_f32(line2_1), vget_high_f32(kernel_16_19)); tmp_2_1 = vmla_f32(tmp_2_1, vget_low_f32(line3_1), vget_high_f32(kernel_23_26)); tmp_2_1 = vmla_f32(tmp_2_1, vget_low_f32(line4_1), vget_high_f32(kernel_30_33)); tmp_2_1 = vmla_f32(tmp_2_1, vget_low_f32(line5_1), vget_high_f32(kernel_37_40)); tmp_2_1 = vmla_f32(tmp_2_1, vget_low_f32(line6_1), vget_high_f32(kernel_44_47)); tmp1 = vget_lane_f32(tmp_2_1, 0) + vget_lane_f32(tmp_2_1, 1) + bias_c; *output_buf_1++ = elem_activation(tmp1, activation); } line1 = vextq_f32(line1, line1_1, 2); line2 = vextq_f32(line2, line2_1, 2); line3 = vextq_f32(line3, line3_1, 2); line4 = vextq_f32(line4, line4_1, 2); line5 = vextq_f32(line5, line5_1, 2); line6 = vextq_f32(line6, line6_1, 2); { float32x4_t tmp_4_0 = vmulq_f32(line1, kernel_21_24); tmp_4_0 = vmlaq_f32(tmp_4_0, line2, kernel_28_31); tmp_4_0 = vmlaq_f32(tmp_4_0, line3, kernel_35_38); tmp_4_0 = vmlaq_f32(tmp_4_0, line4, kernel_42_45); tmp0 = vgetq_lane_f32(tmp_4_0, 0) + vgetq_lane_f32(tmp_4_0, 1) + vgetq_lane_f32(tmp_4_0, 2) + vgetq_lane_f32(tmp_4_0, 3) + bias_c; *output_buf++ = elem_activation(tmp0, activation); float32x4_t tmp_4_1 = vmulq_f32(line1, kernel_7_10); tmp_4_1 = vmlaq_f32(tmp_4_1, line2, kernel_14_17); tmp_4_1 = vmlaq_f32(tmp_4_1, line3, kernel_21_24); tmp_4_1 = vmlaq_f32(tmp_4_1, line4, kernel_28_31); tmp_4_1 = vmlaq_f32(tmp_4_1, line5, kernel_35_38); tmp_4_1 = vmlaq_f32(tmp_4_1, line6, kernel_42_45); tmp1 = vgetq_lane_f32(tmp_4_1, 0) + vgetq_lane_f32(tmp_4_1, 1) + vgetq_lane_f32(tmp_4_1, 2) + vgetq_lane_f32(tmp_4_1, 3) + bias_c; *output_buf_1++ = elem_activation(tmp1, activation); } } else { float32x4_t tmp_4_0 = vmulq_f32(line1, kernel_0212223); tmp_4_0 = vmlaq_f32(tmp_4_0, line2, kernel_0282930); tmp_4_0 = vmlaq_f32(tmp_4_0, line3, kernel_0353637); tmp_4_0 = vmlaq_f32(tmp_4_0, line4, kernel_0424344); float32x2_t tmp_2_0 = vadd_f32(vget_low_f32(tmp_4_0), vget_high_f32(tmp_4_0)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line1_1), vget_low_f32(kernel_24_27)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line2_1), vget_low_f32(kernel_31_34)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line3_1), vget_low_f32(kernel_38_41)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line4_1), vget_low_f32(kernel_45_48)); tmp0 = vget_lane_f32(tmp_2_0, 0) + vget_lane_f32(tmp_2_0, 1) + bias_c; *output_buf++ = elem_activation(tmp0, activation); float32x4_t tmp_4_1 = vmulq_f32(line1, kernel_0789); tmp_4_1 = vmlaq_f32(tmp_4_1, line2, kernel_0141516); tmp_4_1 = vmlaq_f32(tmp_4_1, line3, kernel_0212223); tmp_4_1 = vmlaq_f32(tmp_4_1, line4, kernel_0282930); tmp_4_1 = vmlaq_f32(tmp_4_1, line5, kernel_0353637); tmp_4_1 = vmlaq_f32(tmp_4_1, line6, kernel_0424344); float32x2_t tmp_2_1 = vadd_f32(vget_low_f32(tmp_4_1), vget_high_f32(tmp_4_1)); tmp_2_1 = vmla_f32(tmp_2_1, vget_low_f32(line1_1), vget_low_f32(kernel_10_13)); tmp_2_1 = vmla_f32(tmp_2_1, vget_low_f32(line2_1), vget_low_f32(kernel_17_20)); tmp_2_1 = vmla_f32(tmp_2_1, vget_low_f32(line3_1), vget_low_f32(kernel_24_27)); tmp_2_1 = vmla_f32(tmp_2_1, vget_low_f32(line4_1), vget_low_f32(kernel_31_34)); tmp_2_1 = vmla_f32(tmp_2_1, vget_low_f32(line5_1), vget_low_f32(kernel_38_41)); tmp_2_1 = vmla_f32(tmp_2_1, vget_low_f32(line6_1), vget_low_f32(kernel_45_48)); tmp1 = vget_lane_f32(tmp_2_1, 0) + vget_lane_f32(tmp_2_1, 1) + bias_c; *output_buf_1++ = elem_activation(tmp1, activation); } float* input_7; output_buf = output_buf_1; float32x4_t kernel_3_6 = vextq_f32(kernel_0_3, kernel_4_7, 3); float32x4_t kernel_1_4 = vextq_f32(kernel_0_3, kernel_4_7, 1); float32x4_t kernel_0012 = vextq_f32(zero, kernel_0_3, 3); /* mid */ for (int h = 0; h < mid_h; h++) { input_1 = input + c * input_hw + input_w * (1 + 2 * h); input_2 = input_1 + input_w; input_3 = input_2 + input_w; input_4 = input_3 + input_w; input_5 = input_4 + input_w; input_6 = input_5 + input_w; input_7 = input_6 + input_w; line1 = vld1q_f32(input_1); line2 = vld1q_f32(input_2); line3 = vld1q_f32(input_3); line4 = vld1q_f32(input_4); line5 = vld1q_f32(input_5); line6 = vld1q_f32(input_6); float32x4_t line7 = vld1q_f32(input_7); /* mid left 1 */ { float32x4_t tmp_4_0 = vmulq_f32(line1, kernel_3_6); tmp_4_0 = vmlaq_f32(tmp_4_0, line2, kernel_10_13); tmp_4_0 = vmlaq_f32(tmp_4_0, line3, kernel_17_20); tmp_4_0 = vmlaq_f32(tmp_4_0, line4, kernel_24_27); tmp_4_0 = vmlaq_f32(tmp_4_0, line5, kernel_31_34); tmp_4_0 = vmlaq_f32(tmp_4_0, line6, kernel_38_41); tmp_4_0 = vmlaq_f32(tmp_4_0, line7, kernel_45_48); tmp0 = vgetq_lane_f32(tmp_4_0, 0) + vgetq_lane_f32(tmp_4_0, 1) + vgetq_lane_f32(tmp_4_0, 2) + vgetq_lane_f32(tmp_4_0, 3) + bias_c; *output_buf++ = elem_activation(tmp0, activation); } line1_1 = vld1q_f32(input_1 + 4); line2_1 = vld1q_f32(input_2 + 4); line3_1 = vld1q_f32(input_3 + 4); line4_1 = vld1q_f32(input_4 + 4); line5_1 = vld1q_f32(input_5 + 4); line6_1 = vld1q_f32(input_6 + 4); /* mid left 2 */ float32x4_t line7_1 = vld1q_f32(input_7 + 4); { float32x4_t tmp_4_0 = vmulq_f32(line1, kernel_1_4); tmp_4_0 = vmlaq_f32(tmp_4_0, line2, kernel_8_11); tmp_4_0 = vmlaq_f32(tmp_4_0, line3, kernel_15_18); tmp_4_0 = vmlaq_f32(tmp_4_0, line4, kernel_22_25); tmp_4_0 = vmlaq_f32(tmp_4_0, line5, kernel_29_32); tmp_4_0 = vmlaq_f32(tmp_4_0, line6, kernel_36_39); tmp_4_0 = vmlaq_f32(tmp_4_0, line7, kernel_43_46); float32x2_t tmp_2_0 = vadd_f32(vget_low_f32(tmp_4_0), vget_high_f32(tmp_4_0)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line1_1), vget_high_f32(kernel_3_6)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line2_1), vget_high_f32(kernel_10_13)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line3_1), vget_high_f32(kernel_17_20)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line4_1), vget_high_f32(kernel_24_27)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line5_1), vget_high_f32(kernel_31_34)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line6_1), vget_high_f32(kernel_38_41)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line7_1), vget_high_f32(kernel_45_48)); tmp0 = vget_lane_f32(tmp_2_0, 0) + vget_lane_f32(tmp_2_0, 1) + bias_c; *output_buf++ = elem_activation(tmp0, activation); } line_1_01 = vuzpq_f32(line1, line1_1); line_2_01 = vuzpq_f32(line2, line2_1); line_3_01 = vuzpq_f32(line3, line3_1); line_4_01 = vuzpq_f32(line4, line4_1); line_5_01 = vuzpq_f32(line5, line5_1); line_6_01 = vuzpq_f32(line6, line6_1); float32x4x2_t line_7_01 = vuzpq_f32(line7, line7_1); /* mid mid */ for (w = 0; w < mid_block; w++) { float32x4x2_t line_1_23 = vld2q_f32(input_1 + 8 + 8 * w); float32x4x2_t line_2_23 = vld2q_f32(input_2 + 8 + 8 * w); float32x4x2_t line_3_23 = vld2q_f32(input_3 + 8 + 8 * w); float32x4x2_t line_4_23 = vld2q_f32(input_4 + 8 + 8 * w); float32x4x2_t line_5_23 = vld2q_f32(input_5 + 8 + 8 * w); float32x4x2_t line_6_23 = vld2q_f32(input_6 + 8 + 8 * w); float32x4x2_t line_7_23 = vld2q_f32(input_7 + 8 + 8 * w); float32x4_t tmp_4_0 = vdupq_n_f32(bias_c); /* line1 */ tmp_4_0 = vmlaq_lane_f32(tmp_4_0, line_1_01.val[1], vget_low_f32(kernel_0_3), 0); float32x4_t tmp = vextq_f32(line_1_01.val[0], line_1_23.val[0], 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_0_3), 1); tmp = vextq_f32(line_1_01.val[1], line_1_23.val[1], 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_0_3), 0); tmp = vextq_f32(line_1_01.val[0], line_1_23.val[0], 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_0_3), 1); tmp = vextq_f32(line_1_01.val[1], line_1_23.val[1], 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_4_7), 0); tmp = vextq_f32(line_1_01.val[0], line_1_23.val[0], 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_4_7), 1); tmp = vextq_f32(line_1_01.val[1], line_1_23.val[1], 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_4_7), 0); /* line2 */ tmp_4_0 = vmlaq_lane_f32(tmp_4_0, line_2_01.val[1], vget_high_f32(kernel_4_7), 1); tmp = vextq_f32(line_2_01.val[0], line_2_23.val[0], 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_8_11), 0); tmp = vextq_f32(line_2_01.val[1], line_2_23.val[1], 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_8_11), 1); tmp = vextq_f32(line_2_01.val[0], line_2_23.val[0], 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_8_11), 0); tmp = vextq_f32(line_2_01.val[1], line_2_23.val[1], 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_8_11), 1); tmp = vextq_f32(line_2_01.val[0], line_2_23.val[0], 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_12_15), 0); tmp = vextq_f32(line_2_01.val[1], line_2_23.val[1], 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_12_15), 1); /* line3 */ tmp_4_0 = vmlaq_lane_f32(tmp_4_0, line_3_01.val[1], vget_high_f32(kernel_12_15), 0); tmp = vextq_f32(line_3_01.val[0], line_3_23.val[0], 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_12_15), 1); tmp = vextq_f32(line_3_01.val[1], line_3_23.val[1], 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_16_19), 0); tmp = vextq_f32(line_3_01.val[0], line_3_23.val[0], 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_16_19), 1); tmp = vextq_f32(line_3_01.val[1], line_3_23.val[1], 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_16_19), 0); tmp = vextq_f32(line_3_01.val[0], line_3_23.val[0], 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_16_19), 1); tmp = vextq_f32(line_3_01.val[1], line_3_23.val[1], 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_20_23), 0); /* line4 */ tmp_4_0 = vmlaq_lane_f32(tmp_4_0, line_4_01.val[1], vget_low_f32(kernel_20_23), 1); tmp = vextq_f32(line_4_01.val[0], line_4_23.val[0], 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_20_23), 0); tmp = vextq_f32(line_4_01.val[1], line_4_23.val[1], 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_20_23), 1); tmp = vextq_f32(line_4_01.val[0], line_4_23.val[0], 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_24_27), 0); tmp = vextq_f32(line_4_01.val[1], line_4_23.val[1], 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_24_27), 1); tmp = vextq_f32(line_4_01.val[0], line_4_23.val[0], 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_24_27), 0); tmp = vextq_f32(line_4_01.val[1], line_4_23.val[1], 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_24_27), 1); /* line5 */ tmp_4_0 = vmlaq_lane_f32(tmp_4_0, line_5_01.val[1], vget_low_f32(kernel_28_31), 0); tmp = vextq_f32(line_5_01.val[0], line_5_23.val[0], 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_28_31), 1); tmp = vextq_f32(line_5_01.val[1], line_5_23.val[1], 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_28_31), 0); tmp = vextq_f32(line_5_01.val[0], line_5_23.val[0], 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_28_31), 1); tmp = vextq_f32(line_5_01.val[1], line_5_23.val[1], 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_32_35), 0); tmp = vextq_f32(line_5_01.val[0], line_5_23.val[0], 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_32_35), 1); tmp = vextq_f32(line_5_01.val[1], line_5_23.val[1], 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_32_35), 0); /* line6 */ tmp_4_0 = vmlaq_lane_f32(tmp_4_0, line_6_01.val[1], vget_high_f32(kernel_32_35), 1); tmp = vextq_f32(line_6_01.val[0], line_6_23.val[0], 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_36_39), 0); tmp = vextq_f32(line_6_01.val[1], line_6_23.val[1], 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_36_39), 1); tmp = vextq_f32(line_6_01.val[0], line_6_23.val[0], 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_36_39), 0); tmp = vextq_f32(line_6_01.val[1], line_6_23.val[1], 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_36_39), 1); tmp = vextq_f32(line_6_01.val[0], line_6_23.val[0], 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_40_43), 0); tmp = vextq_f32(line_6_01.val[1], line_6_23.val[1], 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_40_43), 1); /* line7 */ tmp_4_0 = vmlaq_lane_f32(tmp_4_0, line_7_01.val[1], vget_high_f32(kernel_40_43), 0); tmp = vextq_f32(line_7_01.val[0], line_7_23.val[0], 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_40_43), 1); tmp = vextq_f32(line_7_01.val[1], line_7_23.val[1], 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_44_47), 0); tmp = vextq_f32(line_7_01.val[0], line_7_23.val[0], 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_44_47), 1); tmp = vextq_f32(line_7_01.val[1], line_7_23.val[1], 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_44_47), 0); tmp = vextq_f32(line_7_01.val[0], line_7_23.val[0], 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_44_47), 1); tmp = vextq_f32(line_7_01.val[1], line_7_23.val[1], 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_48_51), 0); tmp_4_0 = vector_activation(tmp_4_0, activation); vst1q_f32(output_buf, tmp_4_0); output_buf += 4; line_1_01 = line_1_23; line_2_01 = line_2_23; line_3_01 = line_3_23; line_4_01 = line_4_23; line_5_01 = line_5_23; line_6_01 = line_6_23; line_7_01 = line_7_23; } line_1_01 = vzipq_f32(line_1_01.val[0], line_1_01.val[1]); line_2_01 = vzipq_f32(line_2_01.val[0], line_2_01.val[1]); line_3_01 = vzipq_f32(line_3_01.val[0], line_3_01.val[1]); line_4_01 = vzipq_f32(line_4_01.val[0], line_4_01.val[1]); line_5_01 = vzipq_f32(line_5_01.val[0], line_5_01.val[1]); line_6_01 = vzipq_f32(line_6_01.val[0], line_6_01.val[1]); line_7_01 = vzipq_f32(line_7_01.val[0], line_7_01.val[1]); line1 = line_1_01.val[0]; line1_1 = line_1_01.val[1]; line2 = line_2_01.val[0]; line2_1 = line_2_01.val[1]; line3 = line_3_01.val[0]; line3_1 = line_3_01.val[1]; line4 = line_4_01.val[0]; line4_1 = line_4_01.val[1]; line5 = line_5_01.val[0]; line5_1 = line_5_01.val[1]; line6 = line_6_01.val[0]; line6_1 = line_6_01.val[1]; line7 = line_7_01.val[0]; line7_1 = line_7_01.val[1]; for (w = mid_block * 4; w < mid_w; w++) { float32x4_t line1_2 = vld1q_f32(input_1 + 8 + 2 * w); float32x4_t line2_2 = vld1q_f32(input_2 + 8 + 2 * w); float32x4_t line3_2 = vld1q_f32(input_3 + 8 + 2 * w); float32x4_t line4_2 = vld1q_f32(input_4 + 8 + 2 * w); float32x4_t line5_2 = vld1q_f32(input_5 + 8 + 2 * w); float32x4_t line6_2 = vld1q_f32(input_6 + 8 + 2 * w); float32x4_t line7_2 = vld1q_f32(input_7 + 8 + 2 * w); float32x4_t tmp_4_0 = vmulq_f32(line1, kernel_0012); tmp_4_0 = vmlaq_f32(tmp_4_0, line2, kernel_0789); tmp_4_0 = vmlaq_f32(tmp_4_0, line3, kernel_0141516); tmp_4_0 = vmlaq_f32(tmp_4_0, line4, kernel_0212223); tmp_4_0 = vmlaq_f32(tmp_4_0, line5, kernel_0282930); tmp_4_0 = vmlaq_f32(tmp_4_0, line6, kernel_0353637); tmp_4_0 = vmlaq_f32(tmp_4_0, line7, kernel_0424344); tmp_4_0 = vmlaq_f32(tmp_4_0, line1_1, kernel_3_6); tmp_4_0 = vmlaq_f32(tmp_4_0, line2_1, kernel_10_13); tmp_4_0 = vmlaq_f32(tmp_4_0, line3_1, kernel_17_20); tmp_4_0 = vmlaq_f32(tmp_4_0, line4_1, kernel_24_27); tmp_4_0 = vmlaq_f32(tmp_4_0, line5_1, kernel_31_34); tmp_4_0 = vmlaq_f32(tmp_4_0, line6_1, kernel_38_41); tmp_4_0 = vmlaq_f32(tmp_4_0, line7_1, kernel_45_48); tmp0 = vgetq_lane_f32(tmp_4_0, 0) + vgetq_lane_f32(tmp_4_0, 1) + vgetq_lane_f32(tmp_4_0, 2) + vgetq_lane_f32(tmp_4_0, 3) + bias_c; *output_buf++ = elem_activation(tmp0, activation); line1 = vextq_f32(line1, line1_1, 2); line2 = vextq_f32(line2, line2_1, 2); line3 = vextq_f32(line3, line3_1, 2); line4 = vextq_f32(line4, line4_1, 2); line5 = vextq_f32(line5, line5_1, 2); line6 = vextq_f32(line6, line6_1, 2); line7 = vextq_f32(line7, line7_1, 2); line1_1 = vextq_f32(line1_1, line1_2, 2); line2_1 = vextq_f32(line2_1, line2_2, 2); line3_1 = vextq_f32(line3_1, line3_2, 2); line4_1 = vextq_f32(line4_1, line4_2, 2); line5_1 = vextq_f32(line5_1, line5_2, 2); line6_1 = vextq_f32(line6_1, line6_2, 2); line7_1 = vextq_f32(line7_1, line7_2, 2); } /* mid right */ if (remain_w) { float32x4_t kernel_9_12 = vextq_f32(kernel_8_11, kernel_12_15, 1); float32x4_t kernel_23_26 = vextq_f32(kernel_20_23, kernel_24_27, 3); float32x4_t kernel_30_33 = vextq_f32(kernel_28_31, kernel_32_35, 2); float32x4_t kernel_37_40 = vextq_f32(kernel_36_39, kernel_40_43, 1); line1 = vextq_f32(line1, line1_1, 1); line2 = vextq_f32(line2, line2_1, 1); line3 = vextq_f32(line3, line3_1, 1); line4 = vextq_f32(line4, line4_1, 1); line5 = vextq_f32(line5, line5_1, 1); line6 = vextq_f32(line6, line6_1, 1); line7 = vextq_f32(line7, line7_1, 1); line1_1 = vextq_f32(line1_1, line1_1, 1); line2_1 = vextq_f32(line2_1, line2_1, 1); line3_1 = vextq_f32(line3_1, line3_1, 1); line4_1 = vextq_f32(line4_1, line4_1, 1); line5_1 = vextq_f32(line5_1, line5_1, 1); line6_1 = vextq_f32(line6_1, line6_1, 1); line7_1 = vextq_f32(line7_1, line7_1, 1); float32x4_t tmp_4_0 = vmulq_f32(line1, kernel_0_3); tmp_4_0 = vmlaq_f32(tmp_4_0, line2, kernel_7_10); tmp_4_0 = vmlaq_f32(tmp_4_0, line3, kernel_14_17); tmp_4_0 = vmlaq_f32(tmp_4_0, line4, kernel_21_24); tmp_4_0 = vmlaq_f32(tmp_4_0, line5, kernel_28_31); tmp_4_0 = vmlaq_f32(tmp_4_0, line6, kernel_35_38); tmp_4_0 = vmlaq_f32(tmp_4_0, line7, kernel_42_45); float32x2_t tmp_2_0 = vadd_f32(vget_low_f32(tmp_4_0), vget_high_f32(tmp_4_0)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line1_1), vget_low_f32(kernel_4_7)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line2_1), vget_high_f32(kernel_9_12)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line3_1), vget_high_f32(kernel_16_19)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line4_1), vget_high_f32(kernel_23_26)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line5_1), vget_high_f32(kernel_30_33)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line6_1), vget_high_f32(kernel_37_40)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line7_1), vget_high_f32(kernel_44_47)); tmp0 = vget_lane_f32(tmp_2_0, 0) + vget_lane_f32(tmp_2_0, 1) + bias_c; *output_buf++ = elem_activation(tmp0, activation); line1 = vextq_f32(line1, line1_1, 2); line2 = vextq_f32(line2, line2_1, 2); line3 = vextq_f32(line3, line3_1, 2); line4 = vextq_f32(line4, line4_1, 2); line5 = vextq_f32(line5, line5_1, 2); line6 = vextq_f32(line6, line6_1, 2); line7 = vextq_f32(line7, line7_1, 2); tmp_4_0 = vmulq_f32(line1, kernel_0_3); tmp_4_0 = vmlaq_f32(tmp_4_0, line2, kernel_7_10); tmp_4_0 = vmlaq_f32(tmp_4_0, line3, kernel_14_17); tmp_4_0 = vmlaq_f32(tmp_4_0, line4, kernel_21_24); tmp_4_0 = vmlaq_f32(tmp_4_0, line5, kernel_28_31); tmp_4_0 = vmlaq_f32(tmp_4_0, line6, kernel_35_38); tmp_4_0 = vmlaq_f32(tmp_4_0, line7, kernel_42_45); tmp0 = vgetq_lane_f32(tmp_4_0, 0) + vgetq_lane_f32(tmp_4_0, 1) + vgetq_lane_f32(tmp_4_0, 2) + vgetq_lane_f32(tmp_4_0, 3) + bias_c; *output_buf++ = elem_activation(tmp0, activation); } else { float32x4_t tmp_4_0 = vmulq_f32(line1, kernel_0012); tmp_4_0 = vmlaq_f32(tmp_4_0, line2, kernel_0789); tmp_4_0 = vmlaq_f32(tmp_4_0, line3, kernel_0141516); tmp_4_0 = vmlaq_f32(tmp_4_0, line4, kernel_0212223); tmp_4_0 = vmlaq_f32(tmp_4_0, line5, kernel_0282930); tmp_4_0 = vmlaq_f32(tmp_4_0, line6, kernel_0353637); tmp_4_0 = vmlaq_f32(tmp_4_0, line7, kernel_0424344); float32x2_t tmp_2_0 = vadd_f32(vget_low_f32(tmp_4_0), vget_high_f32(tmp_4_0)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line1_1), vget_low_f32(kernel_3_6)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line2_1), vget_low_f32(kernel_10_13)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line3_1), vget_low_f32(kernel_17_20)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line4_1), vget_low_f32(kernel_24_27)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line5_1), vget_low_f32(kernel_31_34)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line6_1), vget_low_f32(kernel_38_41)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line7_1), vget_low_f32(kernel_45_48)); tmp0 = vget_lane_f32(tmp_2_0, 0) + vget_lane_f32(tmp_2_0, 1) + bias_c; *output_buf++ = elem_activation(tmp0, activation); } } /* bottom */ if (remain_h) { output_buf_1 = output_buf + output_w; input_1 = input + c * input_hw + (input_h - 6) * input_w; input_2 = input_1 + input_w; input_3 = input_2 + input_w; input_4 = input_3 + input_w; input_5 = input_4 + input_w; input_6 = input_5 + input_w; line1 = vld1q_f32(input_1); line2 = vld1q_f32(input_2); line3 = vld1q_f32(input_3); line4 = vld1q_f32(input_4); line5 = vld1q_f32(input_5); line6 = vld1q_f32(input_6); /* bottom 1 left */ { float32x4_t tmp_4_0 = vmulq_f32(line1, kernel_3_6); tmp_4_0 = vmlaq_f32(tmp_4_0, line2, kernel_10_13); tmp_4_0 = vmlaq_f32(tmp_4_0, line3, kernel_17_20); tmp_4_0 = vmlaq_f32(tmp_4_0, line4, kernel_24_27); tmp_4_0 = vmlaq_f32(tmp_4_0, line5, kernel_31_34); tmp_4_0 = vmlaq_f32(tmp_4_0, line6, kernel_38_41); tmp0 = vgetq_lane_f32(tmp_4_0, 0) + vgetq_lane_f32(tmp_4_0, 1) + vgetq_lane_f32(tmp_4_0, 2) + vgetq_lane_f32(tmp_4_0, 3) + bias_c; *output_buf++ = elem_activation(tmp0, activation); float32x4_t tmp_4_1 = vmulq_f32(line3, kernel_3_6); tmp_4_1 = vmlaq_f32(tmp_4_1, line4, kernel_10_13); tmp_4_1 = vmlaq_f32(tmp_4_1, line5, kernel_17_20); tmp_4_1 = vmlaq_f32(tmp_4_1, line6, kernel_24_27); tmp1 = vgetq_lane_f32(tmp_4_1, 0) + vgetq_lane_f32(tmp_4_1, 1) + vgetq_lane_f32(tmp_4_1, 2) + vgetq_lane_f32(tmp_4_1, 3) + bias_c; *output_buf_1++ = elem_activation(tmp1, activation); } line1_1 = vld1q_f32(input_1 + 4); line2_1 = vld1q_f32(input_2 + 4); line3_1 = vld1q_f32(input_3 + 4); line4_1 = vld1q_f32(input_4 + 4); line5_1 = vld1q_f32(input_5 + 4); line6_1 = vld1q_f32(input_6 + 4); { float32x4_t tmp_4_0 = vmulq_f32(line1, kernel_1_4); tmp_4_0 = vmlaq_f32(tmp_4_0, line2, kernel_8_11); tmp_4_0 = vmlaq_f32(tmp_4_0, line3, kernel_15_18); tmp_4_0 = vmlaq_f32(tmp_4_0, line4, kernel_22_25); tmp_4_0 = vmlaq_f32(tmp_4_0, line5, kernel_29_32); tmp_4_0 = vmlaq_f32(tmp_4_0, line6, kernel_36_39); float32x2_t tmp_2_0 = vadd_f32(vget_low_f32(tmp_4_0), vget_high_f32(tmp_4_0)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line1_1), vget_high_f32(kernel_3_6)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line2_1), vget_high_f32(kernel_10_13)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line3_1), vget_high_f32(kernel_17_20)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line4_1), vget_high_f32(kernel_24_27)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line5_1), vget_high_f32(kernel_31_34)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line6_1), vget_high_f32(kernel_38_41)); tmp0 = vget_lane_f32(tmp_2_0, 0) + vget_lane_f32(tmp_2_0, 1) + bias_c; *output_buf++ = elem_activation(tmp0, activation); float32x4_t tmp_4_1 = vmulq_f32(line3, kernel_1_4); tmp_4_1 = vmlaq_f32(tmp_4_1, line4, kernel_8_11); tmp_4_1 = vmlaq_f32(tmp_4_1, line5, kernel_15_18); tmp_4_1 = vmlaq_f32(tmp_4_1, line6, kernel_22_25); float32x2_t tmp_2_1 = vadd_f32(vget_low_f32(tmp_4_1), vget_high_f32(tmp_4_1)); tmp_2_1 = vmla_f32(tmp_2_1, vget_low_f32(line3_1), vget_high_f32(kernel_3_6)); tmp_2_1 = vmla_f32(tmp_2_1, vget_low_f32(line4_1), vget_high_f32(kernel_10_13)); tmp_2_1 = vmla_f32(tmp_2_1, vget_low_f32(line5_1), vget_high_f32(kernel_17_20)); tmp_2_1 = vmla_f32(tmp_2_1, vget_low_f32(line6_1), vget_high_f32(kernel_24_27)); tmp1 = vget_lane_f32(tmp_2_1, 0) + vget_lane_f32(tmp_2_1, 1) + bias_c; *output_buf_1++ = elem_activation(tmp1, activation); } line_1_01 = vuzpq_f32(line1, line1_1); line_2_01 = vuzpq_f32(line2, line2_1); line_3_01 = vuzpq_f32(line3, line3_1); line_4_01 = vuzpq_f32(line4, line4_1); line_5_01 = vuzpq_f32(line5, line5_1); line_6_01 = vuzpq_f32(line6, line6_1); /* bottom 1 mid */ for (w = 0; w < mid_block; w++) { float32x4x2_t line_1_23 = vld2q_f32(input_1 + 8 + 8 * w); float32x4x2_t line_2_23 = vld2q_f32(input_2 + 8 + 8 * w); float32x4x2_t line_3_23 = vld2q_f32(input_3 + 8 + 8 * w); float32x4x2_t line_4_23 = vld2q_f32(input_4 + 8 + 8 * w); float32x4x2_t line_5_23 = vld2q_f32(input_5 + 8 + 8 * w); float32x4x2_t line_6_23 = vld2q_f32(input_6 + 8 + 8 * w); float32x4_t tmp_4_0 = vdupq_n_f32(bias_c); float32x4_t tmp_4_1 = vdupq_n_f32(bias_c); /* line1 */ tmp_4_0 = vmlaq_lane_f32(tmp_4_0, line_1_01.val[1], vget_low_f32(kernel_0_3), 0); float32x4_t tmp = vextq_f32(line_1_01.val[0], line_1_23.val[0], 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_0_3), 1); tmp = vextq_f32(line_1_01.val[1], line_1_23.val[1], 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_0_3), 0); tmp = vextq_f32(line_1_01.val[0], line_1_23.val[0], 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_0_3), 1); tmp = vextq_f32(line_1_01.val[1], line_1_23.val[1], 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_4_7), 0); tmp = vextq_f32(line_1_01.val[0], line_1_23.val[0], 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_4_7), 1); tmp = vextq_f32(line_1_01.val[1], line_1_23.val[1], 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_4_7), 0); /* line2 */ tmp_4_0 = vmlaq_lane_f32(tmp_4_0, line_2_01.val[1], vget_high_f32(kernel_4_7), 1); tmp = vextq_f32(line_2_01.val[0], line_2_23.val[0], 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_8_11), 0); tmp = vextq_f32(line_2_01.val[1], line_2_23.val[1], 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_8_11), 1); tmp = vextq_f32(line_2_01.val[0], line_2_23.val[0], 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_8_11), 0); tmp = vextq_f32(line_2_01.val[1], line_2_23.val[1], 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_8_11), 1); tmp = vextq_f32(line_2_01.val[0], line_2_23.val[0], 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_12_15), 0); tmp = vextq_f32(line_2_01.val[1], line_2_23.val[1], 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_12_15), 1); /* line3 */ tmp_4_0 = vmlaq_lane_f32(tmp_4_0, line_3_01.val[1], vget_high_f32(kernel_12_15), 0); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, line_3_01.val[1], vget_low_f32(kernel_0_3), 0); tmp = vextq_f32(line_3_01.val[0], line_3_23.val[0], 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_12_15), 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_low_f32(kernel_0_3), 1); tmp = vextq_f32(line_3_01.val[1], line_3_23.val[1], 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_16_19), 0); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_high_f32(kernel_0_3), 0); tmp = vextq_f32(line_3_01.val[0], line_3_23.val[0], 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_16_19), 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_high_f32(kernel_0_3), 1); tmp = vextq_f32(line_3_01.val[1], line_3_23.val[1], 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_16_19), 0); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_low_f32(kernel_4_7), 0); tmp = vextq_f32(line_3_01.val[0], line_3_23.val[0], 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_16_19), 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_low_f32(kernel_4_7), 1); tmp = vextq_f32(line_3_01.val[1], line_3_23.val[1], 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_20_23), 0); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_high_f32(kernel_4_7), 0); /* line4 */ tmp_4_0 = vmlaq_lane_f32(tmp_4_0, line_4_01.val[1], vget_low_f32(kernel_20_23), 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, line_4_01.val[1], vget_high_f32(kernel_4_7), 1); tmp = vextq_f32(line_4_01.val[0], line_4_23.val[0], 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_20_23), 0); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_low_f32(kernel_8_11), 0); tmp = vextq_f32(line_4_01.val[1], line_4_23.val[1], 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_20_23), 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_low_f32(kernel_8_11), 1); tmp = vextq_f32(line_4_01.val[0], line_4_23.val[0], 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_24_27), 0); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_high_f32(kernel_8_11), 0); tmp = vextq_f32(line_4_01.val[1], line_4_23.val[1], 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_24_27), 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_high_f32(kernel_8_11), 1); tmp = vextq_f32(line_4_01.val[0], line_4_23.val[0], 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_24_27), 0); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_low_f32(kernel_12_15), 0); tmp = vextq_f32(line_4_01.val[1], line_4_23.val[1], 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_24_27), 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_low_f32(kernel_12_15), 1); /* line5 */ tmp_4_0 = vmlaq_lane_f32(tmp_4_0, line_5_01.val[1], vget_low_f32(kernel_28_31), 0); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, line_5_01.val[1], vget_high_f32(kernel_12_15), 0); tmp = vextq_f32(line_5_01.val[0], line_5_23.val[0], 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_28_31), 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_high_f32(kernel_12_15), 1); tmp = vextq_f32(line_5_01.val[1], line_5_23.val[1], 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_28_31), 0); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_low_f32(kernel_16_19), 0); tmp = vextq_f32(line_5_01.val[0], line_5_23.val[0], 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_28_31), 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_low_f32(kernel_16_19), 1); tmp = vextq_f32(line_5_01.val[1], line_5_23.val[1], 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_32_35), 0); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_high_f32(kernel_16_19), 0); tmp = vextq_f32(line_5_01.val[0], line_5_23.val[0], 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_32_35), 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_high_f32(kernel_16_19), 1); tmp = vextq_f32(line_5_01.val[1], line_5_23.val[1], 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_32_35), 0); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_low_f32(kernel_20_23), 0); /* line6 */ tmp_4_0 = vmlaq_lane_f32(tmp_4_0, line_6_01.val[1], vget_high_f32(kernel_32_35), 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, line_6_01.val[1], vget_low_f32(kernel_20_23), 1); tmp = vextq_f32(line_6_01.val[0], line_6_23.val[0], 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_36_39), 0); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_high_f32(kernel_20_23), 0); tmp = vextq_f32(line_6_01.val[1], line_6_23.val[1], 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_36_39), 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_high_f32(kernel_20_23), 1); tmp = vextq_f32(line_6_01.val[0], line_6_23.val[0], 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_36_39), 0); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_low_f32(kernel_24_27), 0); tmp = vextq_f32(line_6_01.val[1], line_6_23.val[1], 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_36_39), 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_low_f32(kernel_24_27), 1); tmp = vextq_f32(line_6_01.val[0], line_6_23.val[0], 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_40_43), 0); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_high_f32(kernel_24_27), 0); tmp = vextq_f32(line_6_01.val[1], line_6_23.val[1], 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_40_43), 1); tmp_4_1 = vmlaq_lane_f32(tmp_4_1, tmp, vget_high_f32(kernel_24_27), 1); tmp_4_0 = vector_activation(tmp_4_0, activation); vst1q_f32(output_buf, tmp_4_0); output_buf += 4; tmp_4_1 = vector_activation(tmp_4_1, activation); vst1q_f32(output_buf_1, tmp_4_1); output_buf_1 += 4; line_1_01 = line_1_23; line_2_01 = line_2_23; line_3_01 = line_3_23; line_4_01 = line_4_23; line_5_01 = line_5_23; line_6_01 = line_6_23; } line_1_01 = vzipq_f32(line_1_01.val[0], line_1_01.val[1]); line_2_01 = vzipq_f32(line_2_01.val[0], line_2_01.val[1]); line_3_01 = vzipq_f32(line_3_01.val[0], line_3_01.val[1]); line_4_01 = vzipq_f32(line_4_01.val[0], line_4_01.val[1]); line_5_01 = vzipq_f32(line_5_01.val[0], line_5_01.val[1]); line_6_01 = vzipq_f32(line_6_01.val[0], line_6_01.val[1]); line1 = line_1_01.val[0]; line1_1 = line_1_01.val[1]; line2 = line_2_01.val[0]; line2_1 = line_2_01.val[1]; line3 = line_3_01.val[0]; line3_1 = line_3_01.val[1]; line4 = line_4_01.val[0]; line4_1 = line_4_01.val[1]; line5 = line_5_01.val[0]; line5_1 = line_5_01.val[1]; line6 = line_6_01.val[0]; line6_1 = line_6_01.val[1]; for (w = mid_block * 4; w < mid_w; w++) { float32x4_t line1_2 = vld1q_f32(input_1 + 8 + 2 * w); float32x4_t line2_2 = vld1q_f32(input_2 + 8 + 2 * w); float32x4_t line3_2 = vld1q_f32(input_3 + 8 + 2 * w); float32x4_t line4_2 = vld1q_f32(input_4 + 8 + 2 * w); float32x4_t line5_2 = vld1q_f32(input_5 + 8 + 2 * w); float32x4_t line6_2 = vld1q_f32(input_6 + 8 + 2 * w); float32x4_t tmp_4_0 = vmulq_f32(line1, kernel_0012); tmp_4_0 = vmlaq_f32(tmp_4_0, line2, kernel_0789); tmp_4_0 = vmlaq_f32(tmp_4_0, line3, kernel_0141516); tmp_4_0 = vmlaq_f32(tmp_4_0, line4, kernel_0212223); tmp_4_0 = vmlaq_f32(tmp_4_0, line5, kernel_0282930); tmp_4_0 = vmlaq_f32(tmp_4_0, line6, kernel_0353637); tmp_4_0 = vmlaq_f32(tmp_4_0, line1_1, kernel_3_6); tmp_4_0 = vmlaq_f32(tmp_4_0, line2_1, kernel_10_13); tmp_4_0 = vmlaq_f32(tmp_4_0, line3_1, kernel_17_20); tmp_4_0 = vmlaq_f32(tmp_4_0, line4_1, kernel_24_27); tmp_4_0 = vmlaq_f32(tmp_4_0, line5_1, kernel_31_34); tmp_4_0 = vmlaq_f32(tmp_4_0, line6_1, kernel_38_41); tmp0 = vgetq_lane_f32(tmp_4_0, 0) + vgetq_lane_f32(tmp_4_0, 1) + vgetq_lane_f32(tmp_4_0, 2) + vgetq_lane_f32(tmp_4_0, 3) + bias_c; *output_buf++ = elem_activation(tmp0, activation); float32x4_t tmp_4_1 = vmulq_f32(line3, kernel_0012); tmp_4_1 = vmlaq_f32(tmp_4_1, line4, kernel_0789); tmp_4_1 = vmlaq_f32(tmp_4_1, line5, kernel_0141516); tmp_4_1 = vmlaq_f32(tmp_4_1, line6, kernel_0212223); tmp_4_1 = vmlaq_f32(tmp_4_1, line3_1, kernel_3_6); tmp_4_1 = vmlaq_f32(tmp_4_1, line4_1, kernel_10_13); tmp_4_1 = vmlaq_f32(tmp_4_1, line5_1, kernel_17_20); tmp_4_1 = vmlaq_f32(tmp_4_1, line6_1, kernel_24_27); tmp1 = vgetq_lane_f32(tmp_4_1, 0) + vgetq_lane_f32(tmp_4_1, 1) + vgetq_lane_f32(tmp_4_1, 2) + vgetq_lane_f32(tmp_4_1, 3) + bias_c; *output_buf_1++ = elem_activation(tmp1, activation); line1 = vextq_f32(line1, line1_1, 2); line2 = vextq_f32(line2, line2_1, 2); line3 = vextq_f32(line3, line3_1, 2); line4 = vextq_f32(line4, line4_1, 2); line5 = vextq_f32(line5, line5_1, 2); line6 = vextq_f32(line6, line6_1, 2); line1_1 = vextq_f32(line1_1, line1_2, 2); line2_1 = vextq_f32(line2_1, line2_2, 2); line3_1 = vextq_f32(line3_1, line3_2, 2); line4_1 = vextq_f32(line4_1, line4_2, 2); line5_1 = vextq_f32(line5_1, line5_2, 2); line6_1 = vextq_f32(line6_1, line6_2, 2); } /* bottom 1 right */ if (remain_w) { float32x4_t kernel_9_12 = vextq_f32(kernel_8_11, kernel_12_15, 1); float32x4_t kernel_23_26 = vextq_f32(kernel_20_23, kernel_24_27, 3); float32x4_t kernel_30_33 = vextq_f32(kernel_28_31, kernel_32_35, 2); float32x4_t kernel_37_40 = vextq_f32(kernel_36_39, kernel_40_43, 1); line1 = vextq_f32(line1, line1_1, 1); line2 = vextq_f32(line2, line2_1, 1); line3 = vextq_f32(line3, line3_1, 1); line4 = vextq_f32(line4, line4_1, 1); line5 = vextq_f32(line5, line5_1, 1); line6 = vextq_f32(line6, line6_1, 1); line1_1 = vextq_f32(line1_1, line1_1, 1); line2_1 = vextq_f32(line2_1, line2_1, 1); line3_1 = vextq_f32(line3_1, line3_1, 1); line4_1 = vextq_f32(line4_1, line4_1, 1); line5_1 = vextq_f32(line5_1, line5_1, 1); line6_1 = vextq_f32(line6_1, line6_1, 1); { float32x4_t tmp_4_0 = vmulq_f32(line1, kernel_0_3); tmp_4_0 = vmlaq_f32(tmp_4_0, line2, kernel_7_10); tmp_4_0 = vmlaq_f32(tmp_4_0, line3, kernel_14_17); tmp_4_0 = vmlaq_f32(tmp_4_0, line4, kernel_21_24); tmp_4_0 = vmlaq_f32(tmp_4_0, line5, kernel_28_31); tmp_4_0 = vmlaq_f32(tmp_4_0, line6, kernel_35_38); float32x2_t tmp_2_0 = vadd_f32(vget_low_f32(tmp_4_0), vget_high_f32(tmp_4_0)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line1_1), vget_low_f32(kernel_4_7)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line2_1), vget_high_f32(kernel_9_12)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line3_1), vget_high_f32(kernel_16_19)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line4_1), vget_high_f32(kernel_23_26)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line5_1), vget_high_f32(kernel_30_33)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line6_1), vget_high_f32(kernel_37_40)); tmp0 = vget_lane_f32(tmp_2_0, 0) + vget_lane_f32(tmp_2_0, 1) + bias_c; *output_buf++ = elem_activation(tmp0, activation); float32x4_t tmp_4_1 = vmulq_f32(line3, kernel_0_3); tmp_4_1 = vmlaq_f32(tmp_4_1, line4, kernel_7_10); tmp_4_1 = vmlaq_f32(tmp_4_1, line5, kernel_14_17); tmp_4_1 = vmlaq_f32(tmp_4_1, line6, kernel_21_24); float32x2_t tmp_2_1 = vadd_f32(vget_low_f32(tmp_4_1), vget_high_f32(tmp_4_1)); tmp_2_1 = vmla_f32(tmp_2_1, vget_low_f32(line3_1), vget_low_f32(kernel_4_7)); tmp_2_1 = vmla_f32(tmp_2_1, vget_low_f32(line4_1), vget_high_f32(kernel_9_12)); tmp_2_1 = vmla_f32(tmp_2_1, vget_low_f32(line5_1), vget_high_f32(kernel_16_19)); tmp_2_1 = vmla_f32(tmp_2_1, vget_low_f32(line6_1), vget_high_f32(kernel_23_26)); tmp1 = vget_lane_f32(tmp_2_1, 0) + vget_lane_f32(tmp_2_1, 1) + bias_c; *output_buf_1++ = elem_activation(tmp1, activation); } line1 = vextq_f32(line1, line1_1, 2); line2 = vextq_f32(line2, line2_1, 2); line3 = vextq_f32(line3, line3_1, 2); line4 = vextq_f32(line4, line4_1, 2); line5 = vextq_f32(line5, line5_1, 2); line6 = vextq_f32(line6, line6_1, 2); { float32x4_t tmp_4_0 = vmulq_f32(line1, kernel_0_3); tmp_4_0 = vmlaq_f32(tmp_4_0, line2, kernel_7_10); tmp_4_0 = vmlaq_f32(tmp_4_0, line3, kernel_14_17); tmp_4_0 = vmlaq_f32(tmp_4_0, line4, kernel_21_24); tmp_4_0 = vmlaq_f32(tmp_4_0, line5, kernel_28_31); tmp_4_0 = vmlaq_f32(tmp_4_0, line6, kernel_35_38); tmp0 = vgetq_lane_f32(tmp_4_0, 0) + vgetq_lane_f32(tmp_4_0, 1) + vgetq_lane_f32(tmp_4_0, 2) + vgetq_lane_f32(tmp_4_0, 3) + bias_c; *output_buf++ = elem_activation(tmp0, activation); float32x4_t tmp_4_1 = vmulq_f32(line3, kernel_0_3); tmp_4_1 = vmlaq_f32(tmp_4_1, line4, kernel_7_10); tmp_4_1 = vmlaq_f32(tmp_4_1, line5, kernel_14_17); tmp_4_1 = vmlaq_f32(tmp_4_1, line6, kernel_21_24); tmp1 = vgetq_lane_f32(tmp_4_1, 0) + vgetq_lane_f32(tmp_4_1, 1) + vgetq_lane_f32(tmp_4_1, 2) + vgetq_lane_f32(tmp_4_1, 3) + bias_c; *output_buf_1++ = elem_activation(tmp1, activation); } } else { float32x4_t tmp_4_0 = vmulq_f32(line1, kernel_0012); tmp_4_0 = vmlaq_f32(tmp_4_0, line2, kernel_0789); tmp_4_0 = vmlaq_f32(tmp_4_0, line3, kernel_0141516); tmp_4_0 = vmlaq_f32(tmp_4_0, line4, kernel_0212223); tmp_4_0 = vmlaq_f32(tmp_4_0, line5, kernel_0282930); tmp_4_0 = vmlaq_f32(tmp_4_0, line6, kernel_0353637); float32x2_t tmp_2_0 = vadd_f32(vget_low_f32(tmp_4_0), vget_high_f32(tmp_4_0)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line1_1), vget_low_f32(kernel_3_6)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line2_1), vget_low_f32(kernel_10_13)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line3_1), vget_low_f32(kernel_17_20)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line4_1), vget_low_f32(kernel_24_27)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line5_1), vget_low_f32(kernel_31_34)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line6_1), vget_low_f32(kernel_38_41)); tmp0 = vget_lane_f32(tmp_2_0, 0) + vget_lane_f32(tmp_2_0, 1) + bias_c; *output_buf++ = elem_activation(tmp0, activation); float32x4_t tmp_4_1 = vmulq_f32(line3, kernel_0012); tmp_4_1 = vmlaq_f32(tmp_4_1, line4, kernel_0789); tmp_4_1 = vmlaq_f32(tmp_4_1, line5, kernel_0141516); tmp_4_1 = vmlaq_f32(tmp_4_1, line6, kernel_0212223); float32x2_t tmp_2_1 = vadd_f32(vget_low_f32(tmp_4_1), vget_high_f32(tmp_4_1)); tmp_2_1 = vmla_f32(tmp_2_1, vget_low_f32(line3_1), vget_low_f32(kernel_3_6)); tmp_2_1 = vmla_f32(tmp_2_1, vget_low_f32(line4_1), vget_low_f32(kernel_10_13)); tmp_2_1 = vmla_f32(tmp_2_1, vget_low_f32(line5_1), vget_low_f32(kernel_17_20)); tmp_2_1 = vmla_f32(tmp_2_1, vget_low_f32(line6_1), vget_low_f32(kernel_24_27)); tmp1 = vget_lane_f32(tmp_2_1, 0) + vget_lane_f32(tmp_2_1, 1) + bias_c; *output_buf_1++ = elem_activation(tmp1, activation); } } else { input_1 = input + c * input_hw + (input_h - 5) * input_w; input_2 = input_1 + input_w; input_3 = input_2 + input_w; input_4 = input_3 + input_w; input_5 = input_4 + input_w; line1 = vld1q_f32(input_1); line2 = vld1q_f32(input_2); line3 = vld1q_f32(input_3); line4 = vld1q_f32(input_4); line5 = vld1q_f32(input_5); /* bottom 0 left */ { float32x4_t tmp_4_0 = vmulq_f32(line1, kernel_3_6); tmp_4_0 = vmlaq_f32(tmp_4_0, line2, kernel_10_13); tmp_4_0 = vmlaq_f32(tmp_4_0, line3, kernel_17_20); tmp_4_0 = vmlaq_f32(tmp_4_0, line4, kernel_24_27); tmp_4_0 = vmlaq_f32(tmp_4_0, line5, kernel_31_34); tmp0 = vgetq_lane_f32(tmp_4_0, 0) + vgetq_lane_f32(tmp_4_0, 1) + vgetq_lane_f32(tmp_4_0, 2) + vgetq_lane_f32(tmp_4_0, 3) + bias_c; *output_buf++ = elem_activation(tmp0, activation); } line1_1 = vld1q_f32(input_1 + 4); line2_1 = vld1q_f32(input_2 + 4); line3_1 = vld1q_f32(input_3 + 4); line4_1 = vld1q_f32(input_4 + 4); line5_1 = vld1q_f32(input_5 + 4); { float32x4_t tmp_4_0 = vmulq_f32(line1, kernel_1_4); tmp_4_0 = vmlaq_f32(tmp_4_0, line2, kernel_8_11); tmp_4_0 = vmlaq_f32(tmp_4_0, line3, kernel_15_18); tmp_4_0 = vmlaq_f32(tmp_4_0, line4, kernel_22_25); tmp_4_0 = vmlaq_f32(tmp_4_0, line5, kernel_29_32); float32x2_t tmp_2_0 = vadd_f32(vget_low_f32(tmp_4_0), vget_high_f32(tmp_4_0)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line1_1), vget_high_f32(kernel_3_6)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line2_1), vget_high_f32(kernel_10_13)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line3_1), vget_high_f32(kernel_17_20)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line4_1), vget_high_f32(kernel_24_27)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line5_1), vget_high_f32(kernel_31_34)); tmp0 = vget_lane_f32(tmp_2_0, 0) + vget_lane_f32(tmp_2_0, 1) + bias_c; *output_buf++ = elem_activation(tmp0, activation); } line_1_01 = vuzpq_f32(line1, line1_1); line_2_01 = vuzpq_f32(line2, line2_1); line_3_01 = vuzpq_f32(line3, line3_1); line_4_01 = vuzpq_f32(line4, line4_1); line_5_01 = vuzpq_f32(line5, line5_1); /* bottom 0 mid */ for (w = 0; w < mid_block; w++) { float32x4x2_t line_1_23 = vld2q_f32(input_1 + 8 + 8 * w); float32x4x2_t line_2_23 = vld2q_f32(input_2 + 8 + 8 * w); float32x4x2_t line_3_23 = vld2q_f32(input_3 + 8 + 8 * w); float32x4x2_t line_4_23 = vld2q_f32(input_4 + 8 + 8 * w); float32x4x2_t line_5_23 = vld2q_f32(input_5 + 8 + 8 * w); float32x4_t tmp_4_0 = vdupq_n_f32(bias_c); /* line1 */ tmp_4_0 = vmlaq_lane_f32(tmp_4_0, line_1_01.val[1], vget_low_f32(kernel_0_3), 0); float32x4_t tmp = vextq_f32(line_1_01.val[0], line_1_23.val[0], 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_0_3), 1); tmp = vextq_f32(line_1_01.val[1], line_1_23.val[1], 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_0_3), 0); tmp = vextq_f32(line_1_01.val[0], line_1_23.val[0], 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_0_3), 1); tmp = vextq_f32(line_1_01.val[1], line_1_23.val[1], 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_4_7), 0); tmp = vextq_f32(line_1_01.val[0], line_1_23.val[0], 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_4_7), 1); tmp = vextq_f32(line_1_01.val[1], line_1_23.val[1], 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_4_7), 0); /* line2 */ tmp_4_0 = vmlaq_lane_f32(tmp_4_0, line_2_01.val[1], vget_high_f32(kernel_4_7), 1); tmp = vextq_f32(line_2_01.val[0], line_2_23.val[0], 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_8_11), 0); tmp = vextq_f32(line_2_01.val[1], line_2_23.val[1], 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_8_11), 1); tmp = vextq_f32(line_2_01.val[0], line_2_23.val[0], 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_8_11), 0); tmp = vextq_f32(line_2_01.val[1], line_2_23.val[1], 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_8_11), 1); tmp = vextq_f32(line_2_01.val[0], line_2_23.val[0], 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_12_15), 0); tmp = vextq_f32(line_2_01.val[1], line_2_23.val[1], 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_12_15), 1); /* line3 */ tmp_4_0 = vmlaq_lane_f32(tmp_4_0, line_3_01.val[1], vget_high_f32(kernel_12_15), 0); tmp = vextq_f32(line_3_01.val[0], line_3_23.val[0], 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_12_15), 1); tmp = vextq_f32(line_3_01.val[1], line_3_23.val[1], 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_16_19), 0); tmp = vextq_f32(line_3_01.val[0], line_3_23.val[0], 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_16_19), 1); tmp = vextq_f32(line_3_01.val[1], line_3_23.val[1], 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_16_19), 0); tmp = vextq_f32(line_3_01.val[0], line_3_23.val[0], 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_16_19), 1); tmp = vextq_f32(line_3_01.val[1], line_3_23.val[1], 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_20_23), 0); /* line4 */ tmp_4_0 = vmlaq_lane_f32(tmp_4_0, line_4_01.val[1], vget_low_f32(kernel_20_23), 1); tmp = vextq_f32(line_4_01.val[0], line_4_23.val[0], 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_20_23), 0); tmp = vextq_f32(line_4_01.val[1], line_4_23.val[1], 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_20_23), 1); tmp = vextq_f32(line_4_01.val[0], line_4_23.val[0], 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_24_27), 0); tmp = vextq_f32(line_4_01.val[1], line_4_23.val[1], 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_24_27), 1); tmp = vextq_f32(line_4_01.val[0], line_4_23.val[0], 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_24_27), 0); tmp = vextq_f32(line_4_01.val[1], line_4_23.val[1], 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_24_27), 1); /* line5 */ tmp_4_0 = vmlaq_lane_f32(tmp_4_0, line_5_01.val[1], vget_low_f32(kernel_28_31), 0); tmp = vextq_f32(line_5_01.val[0], line_5_23.val[0], 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_28_31), 1); tmp = vextq_f32(line_5_01.val[1], line_5_23.val[1], 1); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_28_31), 0); tmp = vextq_f32(line_5_01.val[0], line_5_23.val[0], 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_28_31), 1); tmp = vextq_f32(line_5_01.val[1], line_5_23.val[1], 2); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_32_35), 0); tmp = vextq_f32(line_5_01.val[0], line_5_23.val[0], 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_low_f32(kernel_32_35), 1); tmp = vextq_f32(line_5_01.val[1], line_5_23.val[1], 3); tmp_4_0 = vmlaq_lane_f32(tmp_4_0, tmp, vget_high_f32(kernel_32_35), 0); tmp_4_0 = vector_activation(tmp_4_0, activation); vst1q_f32(output_buf, tmp_4_0); output_buf += 4; line_1_01 = line_1_23; line_2_01 = line_2_23; line_3_01 = line_3_23; line_4_01 = line_4_23; line_5_01 = line_5_23; } line_1_01 = vzipq_f32(line_1_01.val[0], line_1_01.val[1]); line_2_01 = vzipq_f32(line_2_01.val[0], line_2_01.val[1]); line_3_01 = vzipq_f32(line_3_01.val[0], line_3_01.val[1]); line_4_01 = vzipq_f32(line_4_01.val[0], line_4_01.val[1]); line_5_01 = vzipq_f32(line_5_01.val[0], line_5_01.val[1]); line1 = line_1_01.val[0]; line1_1 = line_1_01.val[1]; line2 = line_2_01.val[0]; line2_1 = line_2_01.val[1]; line3 = line_3_01.val[0]; line3_1 = line_3_01.val[1]; line4 = line_4_01.val[0]; line4_1 = line_4_01.val[1]; line5 = line_5_01.val[0]; line5_1 = line_5_01.val[1]; for (w = mid_block * 4; w < mid_w; w++) { float32x4_t line1_2 = vld1q_f32(input_1 + 8 + 2 * w); float32x4_t line2_2 = vld1q_f32(input_2 + 8 + 2 * w); float32x4_t line3_2 = vld1q_f32(input_3 + 8 + 2 * w); float32x4_t line4_2 = vld1q_f32(input_4 + 8 + 2 * w); float32x4_t line5_2 = vld1q_f32(input_5 + 8 + 2 * w); float32x4_t tmp_4_0 = vmulq_f32(line1, kernel_0012); tmp_4_0 = vmlaq_f32(tmp_4_0, line2, kernel_0789); tmp_4_0 = vmlaq_f32(tmp_4_0, line3, kernel_0141516); tmp_4_0 = vmlaq_f32(tmp_4_0, line4, kernel_0212223); tmp_4_0 = vmlaq_f32(tmp_4_0, line5, kernel_0282930); tmp_4_0 = vmlaq_f32(tmp_4_0, line1_1, kernel_3_6); tmp_4_0 = vmlaq_f32(tmp_4_0, line2_1, kernel_10_13); tmp_4_0 = vmlaq_f32(tmp_4_0, line3_1, kernel_17_20); tmp_4_0 = vmlaq_f32(tmp_4_0, line4_1, kernel_24_27); tmp_4_0 = vmlaq_f32(tmp_4_0, line5_1, kernel_31_34); tmp0 = vgetq_lane_f32(tmp_4_0, 0) + vgetq_lane_f32(tmp_4_0, 1) + vgetq_lane_f32(tmp_4_0, 2) + vgetq_lane_f32(tmp_4_0, 3) + bias_c; *output_buf++ = elem_activation(tmp0, activation); line1 = vextq_f32(line1, line1_1, 2); line2 = vextq_f32(line2, line2_1, 2); line3 = vextq_f32(line3, line3_1, 2); line4 = vextq_f32(line4, line4_1, 2); line5 = vextq_f32(line5, line5_1, 2); line1_1 = vextq_f32(line1_1, line1_2, 2); line2_1 = vextq_f32(line2_1, line2_2, 2); line3_1 = vextq_f32(line3_1, line3_2, 2); line4_1 = vextq_f32(line4_1, line4_2, 2); line5_1 = vextq_f32(line5_1, line5_2, 2); } /* bottom 0 right */ if (remain_w) { float32x4_t kernel_9_12 = vextq_f32(kernel_8_11, kernel_12_15, 1); float32x4_t kernel_23_26 = vextq_f32(kernel_20_23, kernel_24_27, 3); float32x4_t kernel_30_33 = vextq_f32(kernel_28_31, kernel_32_35, 2); line1 = vextq_f32(line1, line1_1, 1); line2 = vextq_f32(line2, line2_1, 1); line3 = vextq_f32(line3, line3_1, 1); line4 = vextq_f32(line4, line4_1, 1); line5 = vextq_f32(line5, line5_1, 1); line1_1 = vextq_f32(line1_1, line1_1, 1); line2_1 = vextq_f32(line2_1, line2_1, 1); line3_1 = vextq_f32(line3_1, line3_1, 1); line4_1 = vextq_f32(line4_1, line4_1, 1); line5_1 = vextq_f32(line5_1, line5_1, 1); { float32x4_t tmp_4_0 = vmulq_f32(line1, kernel_0_3); tmp_4_0 = vmlaq_f32(tmp_4_0, line2, kernel_7_10); tmp_4_0 = vmlaq_f32(tmp_4_0, line3, kernel_14_17); tmp_4_0 = vmlaq_f32(tmp_4_0, line4, kernel_21_24); tmp_4_0 = vmlaq_f32(tmp_4_0, line5, kernel_28_31); float32x2_t tmp_2_0 = vadd_f32(vget_low_f32(tmp_4_0), vget_high_f32(tmp_4_0)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line1_1), vget_low_f32(kernel_4_7)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line2_1), vget_high_f32(kernel_9_12)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line3_1), vget_high_f32(kernel_16_19)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line4_1), vget_high_f32(kernel_23_26)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line5_1), vget_high_f32(kernel_30_33)); tmp0 = vget_lane_f32(tmp_2_0, 0) + vget_lane_f32(tmp_2_0, 1) + bias_c; *output_buf++ = elem_activation(tmp0, activation); } line1 = vextq_f32(line1, line1_1, 2); line2 = vextq_f32(line2, line2_1, 2); line3 = vextq_f32(line3, line3_1, 2); line4 = vextq_f32(line4, line4_1, 2); line5 = vextq_f32(line5, line5_1, 2); { float32x4_t tmp_4_0 = vmulq_f32(line1, kernel_0_3); tmp_4_0 = vmlaq_f32(tmp_4_0, line2, kernel_7_10); tmp_4_0 = vmlaq_f32(tmp_4_0, line3, kernel_14_17); tmp_4_0 = vmlaq_f32(tmp_4_0, line4, kernel_21_24); tmp_4_0 = vmlaq_f32(tmp_4_0, line5, kernel_28_31); tmp0 = vgetq_lane_f32(tmp_4_0, 0) + vgetq_lane_f32(tmp_4_0, 1) + vgetq_lane_f32(tmp_4_0, 2) + vgetq_lane_f32(tmp_4_0, 3) + bias_c; *output_buf++ = elem_activation(tmp0, activation); } } else { float32x4_t tmp_4_0 = vmulq_f32(line1, kernel_0012); tmp_4_0 = vmlaq_f32(tmp_4_0, line2, kernel_0789); tmp_4_0 = vmlaq_f32(tmp_4_0, line3, kernel_0141516); tmp_4_0 = vmlaq_f32(tmp_4_0, line4, kernel_0212223); tmp_4_0 = vmlaq_f32(tmp_4_0, line5, kernel_0282930); float32x2_t tmp_2_0 = vadd_f32(vget_low_f32(tmp_4_0), vget_high_f32(tmp_4_0)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line1_1), vget_low_f32(kernel_3_6)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line2_1), vget_low_f32(kernel_10_13)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line3_1), vget_low_f32(kernel_17_20)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line4_1), vget_low_f32(kernel_24_27)); tmp_2_0 = vmla_f32(tmp_2_0, vget_low_f32(line5_1), vget_low_f32(kernel_31_34)); tmp0 = vget_lane_f32(tmp_2_0, 0) + vget_lane_f32(tmp_2_0, 1) + bias_c; *output_buf++ = elem_activation(tmp0, activation); } } } } #endif
3d7pt.c
/* * Order-1, 3D 7 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 16; tile_size[1] = 16; tile_size[2] = 4; tile_size[3] = 32; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; const double alpha = 0.0876; const double beta = 0.0765; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt-1; t++) { for (i = 1; i < Nz-1; i++) { for (j = 1; j < Ny-1; j++) { for (k = 1; k < Nx-1; k++) { A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k]) + beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] + A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]); } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays (Causing performance degradation /* for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); */ return 0; }
2937.c
/* POLYBENCH/GPU-OPENMP * * This file is a part of the Polybench/GPU-OpenMP suite * * Contact: * William Killian <killian@udel.edu> * * Copyright 2013, The University of Delaware */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4000. */ #include "correlation.h" /* Array initialization. */ static void init_array (int m, int n, DATA_TYPE *float_n, DATA_TYPE POLYBENCH_2D(data,M,N,m,n)) { int i, j; *float_n = 1.2; for (i = 0; i < m; i++) for (j = 0; j < n; j++) data[i][j] = ((DATA_TYPE) i*j) / M; } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int m, DATA_TYPE POLYBENCH_2D(symmat,M,M,m,m)) { int i, j; for (i = 0; i < m; i++) for (j = 0; j < m; j++) { fprintf (stderr, DATA_PRINTF_MODIFIER, symmat[i][j]); if ((i * m + j) % 20 == 0) fprintf (stderr, "\n"); } fprintf (stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_correlation(int m, int n, DATA_TYPE float_n, DATA_TYPE POLYBENCH_2D(data,M,N,m,n), DATA_TYPE POLYBENCH_2D(symmat,M,M,m,m), DATA_TYPE POLYBENCH_1D(mean,M,m), DATA_TYPE POLYBENCH_1D(stddev,M,m)) { int i, j, j1, j2; DATA_TYPE eps = 0.1f; #define sqrt_of_array_cell(x,j) sqrt(x[j]) #pragma scop /* Determine mean of column vectors of input data matrix */ { #pragma omp for (j = 0; j < _PB_M; j++) { mean[j] = 0.0; for (i = 0; i < _PB_N; i++) mean[j] += data[i][j]; mean[j] /= float_n; } /* Determine standard deviations of column vectors of data matrix. */ #pragma omp for (j = 0; j < _PB_M; j++) { stddev[j] = 0.0; for (i = 0; i < _PB_N; i++) stddev[j] += (data[i][j] - mean[j]) * (data[i][j] - mean[j]); stddev[j] /= float_n; stddev[j] = sqrt_of_array_cell(stddev, j); /* The following in an inelegant but usual way to handle near-zero std. dev. values, which below would cause a zero- divide. */ stddev[j] = stddev[j] <= eps ? 1.0 : stddev[j]; } /* Center and reduce the column vectors. */ #pragma omp for (i = 0; i < _PB_N; i++) { #pragma omp for (j = 0; j < _PB_M; j++) { data[i][j] -= mean[j]; data[i][j] /= sqrt(float_n) * stddev[j]; } } /* Calculate the m * m correlation matrix. */ #pragma omp for (j1 = 0; j1 < _PB_M-1; j1++) { symmat[j1][j1] = 1.0; for (j2 = j1+1; j2 < _PB_M; j2++) { symmat[j1][j2] = 0.0; for (i = 0; i < _PB_N; i++) symmat[j1][j2] += (data[i][j1] * data[i][j2]); symmat[j2][j1] = symmat[j1][j2]; } } } #pragma endscop symmat[_PB_M-1][_PB_M-1] = 1.0; } int main(int argc, char** argv) { /* Retrieve problem size. */ int n = N; int m = M; /* Variable declaration/allocation. */ DATA_TYPE float_n; POLYBENCH_2D_ARRAY_DECL(data,DATA_TYPE,M,N,m,n); POLYBENCH_2D_ARRAY_DECL(symmat,DATA_TYPE,M,M,m,m); POLYBENCH_1D_ARRAY_DECL(mean,DATA_TYPE,M,m); POLYBENCH_1D_ARRAY_DECL(stddev,DATA_TYPE,M,m); /* Initialize array(s). */ init_array (m, n, &float_n, POLYBENCH_ARRAY(data)); /* Start timer. */ polybench_start_instruments; /* Run kernel. */ kernel_correlation (m, n, float_n, POLYBENCH_ARRAY(data), POLYBENCH_ARRAY(symmat), POLYBENCH_ARRAY(mean), POLYBENCH_ARRAY(stddev)); /* Stop and print timer. */ polybench_stop_instruments; polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(m, POLYBENCH_ARRAY(symmat))); /* Be clean. */ POLYBENCH_FREE_ARRAY(data); POLYBENCH_FREE_ARRAY(symmat); POLYBENCH_FREE_ARRAY(mean); POLYBENCH_FREE_ARRAY(stddev); return 0; }
colorspace.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % CCCC OOO L OOO RRRR SSSSS PPPP AAA CCCC EEEEE % % C O O L O O R R SS P P A A C E % % C O O L O O RRRR SSS PPPP AAAAA C EEE % % C O O L O O R R SS P A A C E % % CCCC OOO LLLLL OOO R R SSSSS P A A CCCC EEEEE % % % % % % MagickCore Image Colorspace Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2014 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/property.h" #include "magick/cache.h" #include "magick/cache-private.h" #include "magick/cache-view.h" #include "magick/color.h" #include "magick/color-private.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/gem.h" #include "magick/gem-private.h" #include "magick/memory_.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/pixel-private.h" #include "magick/quantize.h" #include "magick/quantum.h" #include "magick/resource_.h" #include "magick/string_.h" #include "magick/string-private.h" #include "magick/utility.h" /* Typedef declarations. */ typedef struct _TransformPacket { MagickRealType x, y, z; } TransformPacket; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R G B T r a n s f o r m I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RGBTransformImage() converts the reference image from sRGB to an alternate % colorspace. The transformation matrices are not the standard ones: the % weights are rescaled to normalized the range of the transformed values to % be [0..QuantumRange]. % % The format of the RGBTransformImage method is: % % MagickBooleanType RGBTransformImage(Image *image, % const ColorspaceType colorspace) % % A description of each parameter follows: % % o image: the image. % % o colorspace: the colorspace to transform the image to. % */ static inline void ConvertRGBToCMY(const Quantum red,const Quantum green, const Quantum blue,double *cyan,double *magenta,double *yellow) { *cyan=QuantumScale*(QuantumRange-red); *magenta=QuantumScale*(QuantumRange-green); *yellow=QuantumScale*(QuantumRange-blue); } static void ConvertRGBToLab(const Quantum red,const Quantum green, const Quantum blue,double *L,double *a,double *b) { double X, Y, Z; ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z); ConvertXYZToLab(X,Y,Z,L,a,b); } static inline void ConvertXYZToLMS(const double x,const double y, const double z,double *L,double *M,double *S) { *L=0.7328*x+0.4296*y-0.1624*z; *M=(-0.7036*x+1.6975*y+0.0061*z); *S=0.0030*x+0.0136*y+0.9834*z; } static void ConvertRGBToLMS(const Quantum red,const Quantum green, const Quantum blue,double *L,double *M,double *S) { double X, Y, Z; ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z); ConvertXYZToLMS(X,Y,Z,L,M,S); } static void ConvertRGBToLuv(const Quantum red,const Quantum green, const Quantum blue,double *L,double *u,double *v) { double X, Y, Z; ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z); ConvertXYZToLuv(X,Y,Z,L,u,v); } static void ConvertRGBToxyY(const Quantum red,const Quantum green, const Quantum blue,double *low_x,double *low_y,double *cap_Y) { double X, Y, Z; ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z); *low_x=X/(X+Y+Z); *low_y=Y/(X+Y+Z); *cap_Y=Y; } static void ConvertRGBToYPbPr(const Quantum red,const Quantum green, const Quantum blue,double *Y,double *Pb,double *Pr) { *Y=QuantumScale*(0.298839*red+0.586811*green+0.114350*blue); *Pb=QuantumScale*((-0.1687367)*red-0.331264*green+0.5*blue)+0.5; *Pr=QuantumScale*(0.5*red-0.418688*green-0.081312*blue)+0.5; } static void ConvertRGBToYCbCr(const Quantum red,const Quantum green, const Quantum blue,double *Y,double *Cb,double *Cr) { ConvertRGBToYPbPr(red,green,blue,Y,Cb,Cr); } static void ConvertRGBToYUV(const Quantum red,const Quantum green, const Quantum blue,double *Y,double *U,double *V) { *Y=QuantumScale*(0.298839*red+0.586811*green+0.114350*blue); *U=QuantumScale*((-0.147)*red-0.289*green+0.436*blue)+0.5; *V=QuantumScale*(0.615*red-0.515*green-0.100*blue)+0.5; } static void ConvertRGBToYDbDr(const Quantum red,const Quantum green, const Quantum blue,double *Y,double *Db,double *Dr) { *Y=QuantumScale*(0.298839*red+0.586811*green+0.114350*blue); *Db=QuantumScale*(-0.450*red-0.883*green+1.333*blue)+0.5; *Dr=QuantumScale*(-1.333*red+1.116*green+0.217*blue)+0.5; } static void ConvertRGBToYIQ(const Quantum red,const Quantum green, const Quantum blue,double *Y,double *I,double *Q) { *Y=QuantumScale*(0.298839*red+0.586811*green+0.114350*blue); *I=QuantumScale*(0.595716*red-0.274453*green-0.321263*blue)+0.5; *Q=QuantumScale*(0.211456*red-0.522591*green+0.311135*blue)+0.5; } MagickExport MagickBooleanType RGBTransformImage(Image *image, const ColorspaceType colorspace) { #define RGBTransformImageTag "RGBTransform/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; PrimaryInfo primary_info; register ssize_t i; ssize_t y; TransformPacket *x_map, *y_map, *z_map; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(colorspace != sRGBColorspace); assert(colorspace != TransparentColorspace); assert(colorspace != UndefinedColorspace); status=MagickTrue; progress=0; exception=(&image->exception); switch (colorspace) { case CMYKColorspace: { MagickPixelPacket zero; /* Convert RGB to CMYK colorspace. */ if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } if (SetImageColorspace(image,colorspace) == MagickFalse) return(MagickFalse); GetMagickPixelPacket(image,&zero); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; MagickPixelPacket pixel; register IndexPacket *restrict indexes; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { SetMagickPixelPacket(image,q,indexes+x,&pixel); pixel.red=(MagickRealType) pixel.red; pixel.green=(MagickRealType) pixel.green; pixel.blue=(MagickRealType) pixel.blue; ConvertRGBToCMYK(&pixel); SetPixelPacket(image,&pixel,q,indexes+x); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); image->type=image->matte == MagickFalse ? ColorSeparationType : ColorSeparationMatteType; if (SetImageColorspace(image,colorspace) == MagickFalse) return(MagickFalse); return(status); } case GRAYColorspace: { /* Transform image from sRGB to GRAY. */ if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { SetPixelGray(q,ClampToQuantum(GetPixelIntensity(image,q))); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,colorspace) == MagickFalse) return(MagickFalse); image->type=GrayscaleType; return(status); } case CMYColorspace: case HCLColorspace: case HCLpColorspace: case HSBColorspace: case HSIColorspace: case HSLColorspace: case HSVColorspace: case HWBColorspace: case LabColorspace: case LCHColorspace: case LCHabColorspace: case LCHuvColorspace: case LMSColorspace: case LuvColorspace: case xyYColorspace: case XYZColorspace: case YCbCrColorspace: case YDbDrColorspace: case YIQColorspace: case YPbPrColorspace: case YUVColorspace: { /* Transform image from sRGB to HSI. */ if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double X, Y, Z; Quantum blue, green, red; red=ClampToQuantum((MagickRealType) GetPixelRed(q)); green=ClampToQuantum((MagickRealType) GetPixelGreen(q)); blue=ClampToQuantum((MagickRealType) GetPixelBlue(q)); switch (colorspace) { case CMYColorspace: { ConvertRGBToCMY(red,green,blue,&X,&Y,&Z); break; } case HCLColorspace: { ConvertRGBToHCL(red,green,blue,&X,&Y,&Z); break; } case HCLpColorspace: { ConvertRGBToHCLp(red,green,blue,&X,&Y,&Z); break; } case HSBColorspace: { ConvertRGBToHSB(red,green,blue,&X,&Y,&Z); break; } case HSIColorspace: { ConvertRGBToHSI(red,green,blue,&X,&Y,&Z); break; } case HSLColorspace: { ConvertRGBToHSL(red,green,blue,&X,&Y,&Z); break; } case HSVColorspace: { ConvertRGBToHSV(red,green,blue,&X,&Y,&Z); break; } case HWBColorspace: { ConvertRGBToHWB(red,green,blue,&X,&Y,&Z); break; } case LabColorspace: { ConvertRGBToLab(red,green,blue,&X,&Y,&Z); break; } case LCHColorspace: case LCHabColorspace: { ConvertRGBToLCHab(red,green,blue,&X,&Y,&Z); break; } case LCHuvColorspace: { ConvertRGBToLCHuv(red,green,blue,&X,&Y,&Z); break; } case LMSColorspace: { ConvertRGBToLMS(red,green,blue,&X,&Y,&Z); break; } case LuvColorspace: { ConvertRGBToLuv(red,green,blue,&X,&Y,&Z); break; } case xyYColorspace: { ConvertRGBToxyY(red,green,blue,&X,&Y,&Z); break; } case XYZColorspace: { ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z); break; } case YCbCrColorspace: { ConvertRGBToYCbCr(red,green,blue,&X,&Y,&Z); break; } case YDbDrColorspace: { ConvertRGBToYDbDr(red,green,blue,&X,&Y,&Z); break; } case YIQColorspace: { ConvertRGBToYIQ(red,green,blue,&X,&Y,&Z); break; } case YPbPrColorspace: { ConvertRGBToYPbPr(red,green,blue,&X,&Y,&Z); break; } case YUVColorspace: { ConvertRGBToYUV(red,green,blue,&X,&Y,&Z); break; } default: { X=QuantumScale*red; Y=QuantumScale*green; Z=QuantumScale*blue; break; } } SetPixelRed(q,ClampToQuantum((MagickRealType) QuantumRange*X)); SetPixelGreen(q,ClampToQuantum((MagickRealType) QuantumRange*Y)); SetPixelBlue(q,ClampToQuantum((MagickRealType) QuantumRange*Z)); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,colorspace) == MagickFalse) return(MagickFalse); return(status); } case LogColorspace: { #define DisplayGamma (1.0/1.7) #define FilmGamma 0.6 #define ReferenceBlack 95.0 #define ReferenceWhite 685.0 const char *value; double black, density, film_gamma, gamma, reference_black, reference_white; Quantum *logmap; /* Transform RGB to Log colorspace. */ density=DisplayGamma; gamma=DisplayGamma; value=GetImageProperty(image,"gamma"); if (value != (const char *) NULL) gamma=PerceptibleReciprocal(StringToDouble(value,(char **) NULL)); film_gamma=FilmGamma; value=GetImageProperty(image,"film-gamma"); if (value != (const char *) NULL) film_gamma=StringToDouble(value,(char **) NULL); reference_black=ReferenceBlack; value=GetImageProperty(image,"reference-black"); if (value != (const char *) NULL) reference_black=StringToDouble(value,(char **) NULL); reference_white=ReferenceWhite; value=GetImageProperty(image,"reference-white"); if (value != (const char *) NULL) reference_white=StringToDouble(value,(char **) NULL); logmap=(Quantum *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*logmap)); if (logmap == (Quantum *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); black=pow(10.0,(reference_black-reference_white)*(gamma/density)*0.002/ film_gamma); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) \ magick_threads(image,image,1,1) #endif for (i=0; i <= (ssize_t) MaxMap; i++) logmap[i]=ScaleMapToQuantum((MagickRealType) (MaxMap*(reference_white+ log10(black+(1.0*i/MaxMap)*(1.0-black))/((gamma/density)*0.002/ film_gamma))/1024.0)); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=(ssize_t) image->columns; x != 0; x--) { Quantum blue, green, red; red=ClampToQuantum(DecodePixelGamma((MagickRealType) GetPixelRed(q))); green=ClampToQuantum(DecodePixelGamma((MagickRealType) GetPixelGreen(q))); blue=ClampToQuantum(DecodePixelGamma((MagickRealType) GetPixelBlue(q))); SetPixelRed(q,logmap[ScaleQuantumToMap(red)]); SetPixelGreen(q,logmap[ScaleQuantumToMap(green)]); SetPixelBlue(q,logmap[ScaleQuantumToMap(blue)]); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); logmap=(Quantum *) RelinquishMagickMemory(logmap); if (SetImageColorspace(image,colorspace) == MagickFalse) return(MagickFalse); return(status); } case RGBColorspace: case scRGBColorspace: { /* Transform image from sRGB to linear RGB. */ if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { Quantum blue, green, red; red=ClampToQuantum(DecodePixelGamma((MagickRealType) GetPixelRed(q))); green=ClampToQuantum(DecodePixelGamma((MagickRealType) GetPixelGreen(q))); blue=ClampToQuantum(DecodePixelGamma((MagickRealType) GetPixelBlue(q))); SetPixelRed(q,red); SetPixelGreen(q,green); SetPixelBlue(q,blue); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,colorspace) == MagickFalse) return(MagickFalse); return(status); } default: break; } /* Allocate the tables. */ x_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*x_map)); y_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*y_map)); z_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*z_map)); if ((x_map == (TransformPacket *) NULL) || (y_map == (TransformPacket *) NULL) || (z_map == (TransformPacket *) NULL)) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); (void) ResetMagickMemory(&primary_info,0,sizeof(primary_info)); switch (colorspace) { case OHTAColorspace: { /* Initialize OHTA tables: I1 = 0.33333*R+0.33334*G+0.33333*B I2 = 0.50000*R+0.00000*G-0.50000*B I3 =-0.25000*R+0.50000*G-0.25000*B I and Q, normally -0.5 through 0.5, are normalized to the range 0 through QuantumRange. */ primary_info.y=(double) (MaxMap+1.0)/2.0; primary_info.z=(double) (MaxMap+1.0)/2.0; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) \ magick_threads(image,image,1,1) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (0.33333*(double) i); y_map[i].x=(MagickRealType) (0.33334*(double) i); z_map[i].x=(MagickRealType) (0.33333*(double) i); x_map[i].y=(MagickRealType) (0.50000*(double) i); y_map[i].y=(MagickRealType) (0.00000*(double) i); z_map[i].y=(MagickRealType) (-0.50000*(double) i); x_map[i].z=(MagickRealType) (-0.25000*(double) i); y_map[i].z=(MagickRealType) (0.50000*(double) i); z_map[i].z=(MagickRealType) (-0.25000*(double) i); } break; } case Rec601LumaColorspace: { /* Initialize Rec601 luma tables: G = 0.298839*R+0.586811*G+0.114350*B */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (0.298839*(double) i); y_map[i].x=(MagickRealType) (0.586811*(double) i); z_map[i].x=(MagickRealType) (0.114350*(double) i); x_map[i].y=(MagickRealType) (0.298839*(double) i); y_map[i].y=(MagickRealType) (0.586811*(double) i); z_map[i].y=(MagickRealType) (0.114350*(double) i); x_map[i].z=(MagickRealType) (0.298839*(double) i); y_map[i].z=(MagickRealType) (0.586811*(double) i); z_map[i].z=(MagickRealType) (0.114350*(double) i); } break; } case Rec601YCbCrColorspace: { /* Initialize YCbCr tables (ITU-R BT.601): Y = 0.2988390*R+0.5868110*G+0.1143500*B Cb= -0.1687367*R-0.3312640*G+0.5000000*B Cr= 0.5000000*R-0.4186880*G-0.0813120*B Cb and Cr, normally -0.5 through 0.5, are normalized to the range 0 through QuantumRange. */ primary_info.y=(double) (MaxMap+1.0)/2.0; primary_info.z=(double) (MaxMap+1.0)/2.0; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) \ magick_threads(image,image,1,1) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (0.298839*(double) i); y_map[i].x=(MagickRealType) (0.586811*(double) i); z_map[i].x=(MagickRealType) (0.114350*(double) i); x_map[i].y=(MagickRealType) (-0.1687367*(double) i); y_map[i].y=(MagickRealType) (-0.331264*(double) i); z_map[i].y=(MagickRealType) (0.500000*(double) i); x_map[i].z=(MagickRealType) (0.500000*(double) i); y_map[i].z=(MagickRealType) (-0.418688*(double) i); z_map[i].z=(MagickRealType) (-0.081312*(double) i); } break; } case Rec709LumaColorspace: { /* Initialize Rec709 luma tables: G = 0.212656*R+0.715158*G+0.072186*B */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (0.212656*(double) i); y_map[i].x=(MagickRealType) (0.715158*(double) i); z_map[i].x=(MagickRealType) (0.072186*(double) i); x_map[i].y=(MagickRealType) (0.212656*(double) i); y_map[i].y=(MagickRealType) (0.715158*(double) i); z_map[i].y=(MagickRealType) (0.072186*(double) i); x_map[i].z=(MagickRealType) (0.212656*(double) i); y_map[i].z=(MagickRealType) (0.715158*(double) i); z_map[i].z=(MagickRealType) (0.072186*(double) i); } break; } case Rec709YCbCrColorspace: { /* Initialize YCbCr tables (ITU-R BT.709): Y = 0.212656*R+0.715158*G+0.072186*B Cb= -0.114572*R-0.385428*G+0.500000*B Cr= 0.500000*R-0.454153*G-0.045847*B Cb and Cr, normally -0.5 through 0.5, are normalized to the range 0 through QuantumRange. */ primary_info.y=(double) (MaxMap+1.0)/2.0; primary_info.z=(double) (MaxMap+1.0)/2.0; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) \ magick_threads(image,image,1,1) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (0.212656*(double) i); y_map[i].x=(MagickRealType) (0.715158*(double) i); z_map[i].x=(MagickRealType) (0.072186*(double) i); x_map[i].y=(MagickRealType) (-0.114572*(double) i); y_map[i].y=(MagickRealType) (-0.385428*(double) i); z_map[i].y=(MagickRealType) (0.500000*(double) i); x_map[i].z=(MagickRealType) (0.500000*(double) i); y_map[i].z=(MagickRealType) (-0.454153*(double) i); z_map[i].z=(MagickRealType) (-0.045847*(double) i); } break; } case YCCColorspace: { /* Initialize YCC tables: Y = 0.298839*R+0.586811*G+0.114350*B C1= -0.298839*R-0.586811*G+0.88600*B C2= 0.70100*R-0.586811*G-0.114350*B YCC is scaled by 1.3584. C1 zero is 156 and C2 is at 137. */ primary_info.y=(double) ScaleQuantumToMap(ScaleCharToQuantum(156)); primary_info.z=(double) ScaleQuantumToMap(ScaleCharToQuantum(137)); for (i=0; i <= (ssize_t) (0.018*MaxMap); i++) { x_map[i].x=0.003962014134275617*i; y_map[i].x=0.007778268551236748*i; z_map[i].x=0.001510600706713781*i; x_map[i].y=(-0.002426619775463276)*i; y_map[i].y=(-0.004763965913702149)*i; z_map[i].y=0.007190585689165425*i; x_map[i].z=0.006927257754597858*i; y_map[i].z=(-0.005800713697502058)*i; z_map[i].z=(-0.0011265440570958)*i; } for ( ; i <= (ssize_t) MaxMap; i++) { x_map[i].x=0.2201118963486454*(1.099*i-0.099); y_map[i].x=0.4321260306242638*(1.099*i-0.099); z_map[i].x=0.08392226148409894*(1.099*i-0.099); x_map[i].y=(-0.1348122097479598)*(1.099*i-0.099); y_map[i].y=(-0.2646647729834528)*(1.099*i-0.099); z_map[i].y=0.3994769827314126*(1.099*i-0.099); x_map[i].z=0.3848476530332144*(1.099*i-0.099); y_map[i].z=(-0.3222618720834477)*(1.099*i-0.099); z_map[i].z=(-0.06258578094976668)*(1.099*i-0.099); } break; } default: { /* Linear conversion tables. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) \ magick_threads(image,image,1,1) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (1.0*(double) i); y_map[i].x=(MagickRealType) 0.0; z_map[i].x=(MagickRealType) 0.0; x_map[i].y=(MagickRealType) 0.0; y_map[i].y=(MagickRealType) (1.0*(double) i); z_map[i].y=(MagickRealType) 0.0; x_map[i].z=(MagickRealType) 0.0; y_map[i].z=(MagickRealType) 0.0; z_map[i].z=(MagickRealType) (1.0*(double) i); } break; } } /* Convert from sRGB. */ switch (image->storage_class) { case DirectClass: default: { /* Convert DirectClass image. */ image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; MagickPixelPacket pixel; register ssize_t x; register PixelPacket *restrict q; register size_t blue, green, red; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { red=ScaleQuantumToMap(ClampToQuantum((MagickRealType) GetPixelRed(q))); green=ScaleQuantumToMap(ClampToQuantum((MagickRealType) GetPixelGreen(q))); blue=ScaleQuantumToMap(ClampToQuantum((MagickRealType) GetPixelBlue(q))); pixel.red=(x_map[red].x+y_map[green].x+z_map[blue].x)+ (MagickRealType) primary_info.x; pixel.green=(x_map[red].y+y_map[green].y+z_map[blue].y)+ (MagickRealType) primary_info.y; pixel.blue=(x_map[red].z+y_map[green].z+z_map[blue].z)+ (MagickRealType) primary_info.z; SetPixelRed(q,ScaleMapToQuantum(pixel.red)); SetPixelGreen(q,ScaleMapToQuantum(pixel.green)); SetPixelBlue(q,ScaleMapToQuantum(pixel.blue)); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_RGBTransformImage) #endif proceed=SetImageProgress(image,RGBTransformImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); break; } case PseudoClass: { register size_t blue, green, red; /* Convert PseudoClass image. */ for (i=0; i < (ssize_t) image->colors; i++) { MagickPixelPacket pixel; red=ScaleQuantumToMap(ClampToQuantum((MagickRealType) image->colormap[i].red)); green=ScaleQuantumToMap(ClampToQuantum((MagickRealType) image->colormap[i].green)); blue=ScaleQuantumToMap(ClampToQuantum((MagickRealType) image->colormap[i].blue)); pixel.red=x_map[red].x+y_map[green].x+z_map[blue].x+primary_info.x; pixel.green=x_map[red].y+y_map[green].y+z_map[blue].y+primary_info.y; pixel.blue=x_map[red].z+y_map[green].z+z_map[blue].z+primary_info.z; image->colormap[i].red=ScaleMapToQuantum(pixel.red); image->colormap[i].green=ScaleMapToQuantum(pixel.green); image->colormap[i].blue=ScaleMapToQuantum(pixel.blue); } (void) SyncImage(image); break; } } /* Relinquish resources. */ z_map=(TransformPacket *) RelinquishMagickMemory(z_map); y_map=(TransformPacket *) RelinquishMagickMemory(y_map); x_map=(TransformPacket *) RelinquishMagickMemory(x_map); if (SetImageColorspace(image,colorspace) == MagickFalse) return(MagickFalse); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e C o l o r s p a c e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageColorspace() sets the colorspace member of the Image structure. % % The format of the SetImageColorspace method is: % % MagickBooleanType SetImageColorspace(Image *image, % const ColorspaceType colorspace) % % A description of each parameter follows: % % o image: the image. % % o colorspace: the colorspace. % */ MagickExport MagickBooleanType SetImageColorspace(Image *image, const ColorspaceType colorspace) { ImageType type; MagickBooleanType status; if (image->colorspace == colorspace) return(MagickTrue); image->colorspace=colorspace; image->rendering_intent=UndefinedIntent; image->gamma=1.000/2.200; (void) ResetMagickMemory(&image->chromaticity,0,sizeof(image->chromaticity)); type=image->type; if (IsGrayColorspace(colorspace) != MagickFalse) { if ((image->intensity == Rec601LuminancePixelIntensityMethod) || (image->intensity == Rec709LuminancePixelIntensityMethod)) image->gamma=1.0; type=GrayscaleType; } else if ((IsRGBColorspace(colorspace) != MagickFalse) || (colorspace == XYZColorspace) || (colorspace == xyYColorspace)) image->gamma=1.0; else { image->rendering_intent=PerceptualIntent; image->chromaticity.red_primary.x=0.6400; image->chromaticity.red_primary.y=0.3300; image->chromaticity.red_primary.z=0.0300; image->chromaticity.green_primary.x=0.3000; image->chromaticity.green_primary.y=0.6000; image->chromaticity.green_primary.z=0.1000; image->chromaticity.blue_primary.x=0.1500; image->chromaticity.blue_primary.y=0.0600; image->chromaticity.blue_primary.z=0.7900; image->chromaticity.white_point.x=0.3127; image->chromaticity.white_point.y=0.3290; image->chromaticity.white_point.z=0.3583; } status=SyncImagePixelCache(image,&image->exception); image->type=type; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s f o r m I m a g e C o l o r s p a c e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransformImageColorspace() transforms an image colorspace. % % The format of the TransformImageColorspace method is: % % MagickBooleanType TransformImageColorspace(Image *image, % const ColorspaceType colorspace) % % A description of each parameter follows: % % o image: the image. % % o colorspace: the colorspace. % */ MagickExport MagickBooleanType TransformImageColorspace(Image *image, const ColorspaceType colorspace) { MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->colorspace == colorspace) return(MagickTrue); if ((image->colorspace == Rec709LumaColorspace) && (colorspace == sRGBColorspace)) return(MagickTrue); if ((image->colorspace == GRAYColorspace) && (image->gamma != 1.0) && (colorspace == sRGBColorspace)) return(MagickTrue); if (colorspace == UndefinedColorspace) return(SetImageColorspace(image,colorspace)); /* Convert the reference image from an alternate colorspace to sRGB. */ (void) DeleteImageProfile(image,"icc"); (void) DeleteImageProfile(image,"icm"); if (IssRGBColorspace(colorspace) != MagickFalse) return(TransformRGBImage(image,image->colorspace)); status=MagickTrue; if (IssRGBColorspace(image->colorspace) == MagickFalse) status=TransformRGBImage(image,image->colorspace); if (status == MagickFalse) return(status); /* Convert the reference image from sRGB to an alternate colorspace. */ if (RGBTransformImage(image,colorspace) == MagickFalse) status=MagickFalse; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + T r a n s f o r m R G B I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransformRGBImage() converts the reference image from an alternate % colorspace to sRGB. The transformation matrices are not the standard ones: % the weights are rescaled to normalize the range of the transformed values to % be [0..QuantumRange]. % % The format of the TransformRGBImage method is: % % MagickBooleanType TransformRGBImage(Image *image, % const ColorspaceType colorspace) % % A description of each parameter follows: % % o image: the image. % % o colorspace: the colorspace to transform the image to. % */ static inline void ConvertCMYToRGB(const double cyan,const double magenta, const double yellow,Quantum *red,Quantum *green,Quantum *blue) { *red=ClampToQuantum(QuantumRange*(1.0-cyan)); *green=ClampToQuantum(QuantumRange*(1.0-magenta)); *blue=ClampToQuantum(QuantumRange*(1.0-yellow)); } static inline void ConvertLMSToXYZ(const double L,const double M,const double S, double *X,double *Y,double *Z) { *X=1.096123820835514*L-0.278869000218287*M+0.182745179382773*S; *Y=0.454369041975359*L+0.473533154307412*M+0.072097803717229*S; *Z=(-0.009627608738429)*L-0.005698031216113*M+1.015325639954543*S; } static inline void ConvertLMSToRGB(const double L,const double M, const double S,Quantum *red,Quantum *green,Quantum *blue) { double X, Y, Z; ConvertLMSToXYZ(L,M,S,&X,&Y,&Z); ConvertXYZToRGB(X,Y,Z,red,green,blue); } static inline void ConvertLuvToRGB(const double L,const double u, const double v,Quantum *red,Quantum *green,Quantum *blue) { double X, Y, Z; ConvertLuvToXYZ(100.0*L,354.0*u-134.0,262.0*v-140.0,&X,&Y,&Z); ConvertXYZToRGB(X,Y,Z,red,green,blue); } static inline ssize_t RoundToYCC(const MagickRealType value) { if (value <= 0.0) return(0); if (value >= 1388.0) return(1388); return((ssize_t) (value+0.5)); } static inline void ConvertCMYKToRGB(MagickPixelPacket *pixel) { pixel->red=((QuantumRange-(QuantumScale*pixel->red* (QuantumRange-pixel->index)+pixel->index))); pixel->green=((QuantumRange-(QuantumScale*pixel->green* (QuantumRange-pixel->index)+pixel->index))); pixel->blue=((QuantumRange-(QuantumScale*pixel->blue* (QuantumRange-pixel->index)+pixel->index))); } static inline void ConvertLabToRGB(const double L,const double a, const double b,Quantum *red,Quantum *green,Quantum *blue) { double X, Y, Z; ConvertLabToXYZ(100.0*L,255.0*(a-0.5),255.0*(b-0.5),&X,&Y,&Z); ConvertXYZToRGB(X,Y,Z,red,green,blue); } static inline void ConvertxyYToRGB(const double low_x,const double low_y, const double cap_Y,Quantum *red,Quantum *green,Quantum *blue) { double X, Y, Z; X=cap_Y/low_y*low_x; Y=cap_Y; Z=cap_Y/low_y*(1.0-low_x-low_y); ConvertXYZToRGB(X,Y,Z,red,green,blue); } static void ConvertYPbPrToRGB(const double Y,const double Pb,const double Pr, Quantum *red,Quantum *green,Quantum *blue) { *red=ClampToQuantum(QuantumRange*(0.99999999999914679361*Y- 1.2188941887145875e-06*(Pb-0.5)+1.4019995886561440468*(Pr-0.5))); *green=ClampToQuantum(QuantumRange*(0.99999975910502514331*Y- 0.34413567816504303521*(Pb-0.5)-0.71413649331646789076*(Pr-0.5))); *blue=ClampToQuantum(QuantumRange*(1.00000124040004623180*Y+ 1.77200006607230409200*(Pb-0.5)+2.1453384174593273e-06*(Pr-0.5))); } static void ConvertYCbCrToRGB(const double Y,const double Cb, const double Cr,Quantum *red,Quantum *green,Quantum *blue) { ConvertYPbPrToRGB(Y,Cb,Cr,red,green,blue); } static void ConvertYDbDrToRGB(const double Y,const double Db,const double Dr, Quantum *red,Quantum *green,Quantum *blue) { *red=ClampToQuantum(QuantumRange*(Y+9.2303716147657e-05*(Db-0.5)- 0.52591263066186533*(Dr-0.5))); *green=ClampToQuantum(QuantumRange*(Y-0.12913289889050927*(Db-0.5)+ 0.26789932820759876*(Dr-0.5))); *blue=ClampToQuantum(QuantumRange*(Y+0.66467905997895482*(Db-0.5)- 7.9202543533108e-05*(Dr-0.5))); } static void ConvertYIQToRGB(const double Y,const double I,const double Q, Quantum *red,Quantum *green,Quantum *blue) { *red=ClampToQuantum(QuantumRange*(Y+0.9562957197589482261*(I-0.5)+ 0.6210244164652610754*(Q-0.5))); *green=ClampToQuantum(QuantumRange*(Y-0.2721220993185104464*(I-0.5)- 0.6473805968256950427*(Q-0.5))); *blue=ClampToQuantum(QuantumRange*(Y-1.1069890167364901945*(I-0.5)+ 1.7046149983646481374*(Q-0.5))); } static void ConvertYUVToRGB(const double Y,const double U,const double V, Quantum *red,Quantum *green,Quantum *blue) { *red=ClampToQuantum(QuantumRange*(Y-3.945707070708279e-05*(U-0.5)+ 1.1398279671717170825*(V-0.5))); *green=ClampToQuantum(QuantumRange*(Y-0.3946101641414141437*(U-0.5)- 0.5805003156565656797*(V-0.5))); *blue=ClampToQuantum(QuantumRange*(Y+2.0319996843434342537*(U-0.5)- 4.813762626262513e-04*(V-0.5))); } MagickExport MagickBooleanType TransformRGBImage(Image *image, const ColorspaceType colorspace) { #define TransformRGBImageTag "Transform/Image" static const float YCCMap[1389] = { 0.000000, 0.000720f, 0.001441f, 0.002161f, 0.002882f, 0.003602f, 0.004323f, 0.005043f, 0.005764f, 0.006484f, 0.007205f, 0.007925f, 0.008646f, 0.009366f, 0.010086f, 0.010807f, 0.011527f, 0.012248f, 0.012968f, 0.013689f, 0.014409f, 0.015130f, 0.015850f, 0.016571f, 0.017291f, 0.018012f, 0.018732f, 0.019452f, 0.020173f, 0.020893f, 0.021614f, 0.022334f, 0.023055f, 0.023775f, 0.024496f, 0.025216f, 0.025937f, 0.026657f, 0.027378f, 0.028098f, 0.028818f, 0.029539f, 0.030259f, 0.030980f, 0.031700f, 0.032421f, 0.033141f, 0.033862f, 0.034582f, 0.035303f, 0.036023f, 0.036744f, 0.037464f, 0.038184f, 0.038905f, 0.039625f, 0.040346f, 0.041066f, 0.041787f, 0.042507f, 0.043228f, 0.043948f, 0.044669f, 0.045389f, 0.046110f, 0.046830f, 0.047550f, 0.048271f, 0.048991f, 0.049712f, 0.050432f, 0.051153f, 0.051873f, 0.052594f, 0.053314f, 0.054035f, 0.054755f, 0.055476f, 0.056196f, 0.056916f, 0.057637f, 0.058357f, 0.059078f, 0.059798f, 0.060519f, 0.061239f, 0.061960f, 0.062680f, 0.063401f, 0.064121f, 0.064842f, 0.065562f, 0.066282f, 0.067003f, 0.067723f, 0.068444f, 0.069164f, 0.069885f, 0.070605f, 0.071326f, 0.072046f, 0.072767f, 0.073487f, 0.074207f, 0.074928f, 0.075648f, 0.076369f, 0.077089f, 0.077810f, 0.078530f, 0.079251f, 0.079971f, 0.080692f, 0.081412f, 0.082133f, 0.082853f, 0.083573f, 0.084294f, 0.085014f, 0.085735f, 0.086455f, 0.087176f, 0.087896f, 0.088617f, 0.089337f, 0.090058f, 0.090778f, 0.091499f, 0.092219f, 0.092939f, 0.093660f, 0.094380f, 0.095101f, 0.095821f, 0.096542f, 0.097262f, 0.097983f, 0.098703f, 0.099424f, 0.100144f, 0.100865f, 0.101585f, 0.102305f, 0.103026f, 0.103746f, 0.104467f, 0.105187f, 0.105908f, 0.106628f, 0.107349f, 0.108069f, 0.108790f, 0.109510f, 0.110231f, 0.110951f, 0.111671f, 0.112392f, 0.113112f, 0.113833f, 0.114553f, 0.115274f, 0.115994f, 0.116715f, 0.117435f, 0.118156f, 0.118876f, 0.119597f, 0.120317f, 0.121037f, 0.121758f, 0.122478f, 0.123199f, 0.123919f, 0.124640f, 0.125360f, 0.126081f, 0.126801f, 0.127522f, 0.128242f, 0.128963f, 0.129683f, 0.130403f, 0.131124f, 0.131844f, 0.132565f, 0.133285f, 0.134006f, 0.134726f, 0.135447f, 0.136167f, 0.136888f, 0.137608f, 0.138329f, 0.139049f, 0.139769f, 0.140490f, 0.141210f, 0.141931f, 0.142651f, 0.143372f, 0.144092f, 0.144813f, 0.145533f, 0.146254f, 0.146974f, 0.147695f, 0.148415f, 0.149135f, 0.149856f, 0.150576f, 0.151297f, 0.152017f, 0.152738f, 0.153458f, 0.154179f, 0.154899f, 0.155620f, 0.156340f, 0.157061f, 0.157781f, 0.158501f, 0.159222f, 0.159942f, 0.160663f, 0.161383f, 0.162104f, 0.162824f, 0.163545f, 0.164265f, 0.164986f, 0.165706f, 0.166427f, 0.167147f, 0.167867f, 0.168588f, 0.169308f, 0.170029f, 0.170749f, 0.171470f, 0.172190f, 0.172911f, 0.173631f, 0.174352f, 0.175072f, 0.175793f, 0.176513f, 0.177233f, 0.177954f, 0.178674f, 0.179395f, 0.180115f, 0.180836f, 0.181556f, 0.182277f, 0.182997f, 0.183718f, 0.184438f, 0.185159f, 0.185879f, 0.186599f, 0.187320f, 0.188040f, 0.188761f, 0.189481f, 0.190202f, 0.190922f, 0.191643f, 0.192363f, 0.193084f, 0.193804f, 0.194524f, 0.195245f, 0.195965f, 0.196686f, 0.197406f, 0.198127f, 0.198847f, 0.199568f, 0.200288f, 0.201009f, 0.201729f, 0.202450f, 0.203170f, 0.203890f, 0.204611f, 0.205331f, 0.206052f, 0.206772f, 0.207493f, 0.208213f, 0.208934f, 0.209654f, 0.210375f, 0.211095f, 0.211816f, 0.212536f, 0.213256f, 0.213977f, 0.214697f, 0.215418f, 0.216138f, 0.216859f, 0.217579f, 0.218300f, 0.219020f, 0.219741f, 0.220461f, 0.221182f, 0.221902f, 0.222622f, 0.223343f, 0.224063f, 0.224784f, 0.225504f, 0.226225f, 0.226945f, 0.227666f, 0.228386f, 0.229107f, 0.229827f, 0.230548f, 0.231268f, 0.231988f, 0.232709f, 0.233429f, 0.234150f, 0.234870f, 0.235591f, 0.236311f, 0.237032f, 0.237752f, 0.238473f, 0.239193f, 0.239914f, 0.240634f, 0.241354f, 0.242075f, 0.242795f, 0.243516f, 0.244236f, 0.244957f, 0.245677f, 0.246398f, 0.247118f, 0.247839f, 0.248559f, 0.249280f, 0.250000f, 0.250720f, 0.251441f, 0.252161f, 0.252882f, 0.253602f, 0.254323f, 0.255043f, 0.255764f, 0.256484f, 0.257205f, 0.257925f, 0.258646f, 0.259366f, 0.260086f, 0.260807f, 0.261527f, 0.262248f, 0.262968f, 0.263689f, 0.264409f, 0.265130f, 0.265850f, 0.266571f, 0.267291f, 0.268012f, 0.268732f, 0.269452f, 0.270173f, 0.270893f, 0.271614f, 0.272334f, 0.273055f, 0.273775f, 0.274496f, 0.275216f, 0.275937f, 0.276657f, 0.277378f, 0.278098f, 0.278818f, 0.279539f, 0.280259f, 0.280980f, 0.281700f, 0.282421f, 0.283141f, 0.283862f, 0.284582f, 0.285303f, 0.286023f, 0.286744f, 0.287464f, 0.288184f, 0.288905f, 0.289625f, 0.290346f, 0.291066f, 0.291787f, 0.292507f, 0.293228f, 0.293948f, 0.294669f, 0.295389f, 0.296109f, 0.296830f, 0.297550f, 0.298271f, 0.298991f, 0.299712f, 0.300432f, 0.301153f, 0.301873f, 0.302594f, 0.303314f, 0.304035f, 0.304755f, 0.305476f, 0.306196f, 0.306916f, 0.307637f, 0.308357f, 0.309078f, 0.309798f, 0.310519f, 0.311239f, 0.311960f, 0.312680f, 0.313401f, 0.314121f, 0.314842f, 0.315562f, 0.316282f, 0.317003f, 0.317723f, 0.318444f, 0.319164f, 0.319885f, 0.320605f, 0.321326f, 0.322046f, 0.322767f, 0.323487f, 0.324207f, 0.324928f, 0.325648f, 0.326369f, 0.327089f, 0.327810f, 0.328530f, 0.329251f, 0.329971f, 0.330692f, 0.331412f, 0.332133f, 0.332853f, 0.333573f, 0.334294f, 0.335014f, 0.335735f, 0.336455f, 0.337176f, 0.337896f, 0.338617f, 0.339337f, 0.340058f, 0.340778f, 0.341499f, 0.342219f, 0.342939f, 0.343660f, 0.344380f, 0.345101f, 0.345821f, 0.346542f, 0.347262f, 0.347983f, 0.348703f, 0.349424f, 0.350144f, 0.350865f, 0.351585f, 0.352305f, 0.353026f, 0.353746f, 0.354467f, 0.355187f, 0.355908f, 0.356628f, 0.357349f, 0.358069f, 0.358790f, 0.359510f, 0.360231f, 0.360951f, 0.361671f, 0.362392f, 0.363112f, 0.363833f, 0.364553f, 0.365274f, 0.365994f, 0.366715f, 0.367435f, 0.368156f, 0.368876f, 0.369597f, 0.370317f, 0.371037f, 0.371758f, 0.372478f, 0.373199f, 0.373919f, 0.374640f, 0.375360f, 0.376081f, 0.376801f, 0.377522f, 0.378242f, 0.378963f, 0.379683f, 0.380403f, 0.381124f, 0.381844f, 0.382565f, 0.383285f, 0.384006f, 0.384726f, 0.385447f, 0.386167f, 0.386888f, 0.387608f, 0.388329f, 0.389049f, 0.389769f, 0.390490f, 0.391210f, 0.391931f, 0.392651f, 0.393372f, 0.394092f, 0.394813f, 0.395533f, 0.396254f, 0.396974f, 0.397695f, 0.398415f, 0.399135f, 0.399856f, 0.400576f, 0.401297f, 0.402017f, 0.402738f, 0.403458f, 0.404179f, 0.404899f, 0.405620f, 0.406340f, 0.407061f, 0.407781f, 0.408501f, 0.409222f, 0.409942f, 0.410663f, 0.411383f, 0.412104f, 0.412824f, 0.413545f, 0.414265f, 0.414986f, 0.415706f, 0.416427f, 0.417147f, 0.417867f, 0.418588f, 0.419308f, 0.420029f, 0.420749f, 0.421470f, 0.422190f, 0.422911f, 0.423631f, 0.424352f, 0.425072f, 0.425793f, 0.426513f, 0.427233f, 0.427954f, 0.428674f, 0.429395f, 0.430115f, 0.430836f, 0.431556f, 0.432277f, 0.432997f, 0.433718f, 0.434438f, 0.435158f, 0.435879f, 0.436599f, 0.437320f, 0.438040f, 0.438761f, 0.439481f, 0.440202f, 0.440922f, 0.441643f, 0.442363f, 0.443084f, 0.443804f, 0.444524f, 0.445245f, 0.445965f, 0.446686f, 0.447406f, 0.448127f, 0.448847f, 0.449568f, 0.450288f, 0.451009f, 0.451729f, 0.452450f, 0.453170f, 0.453891f, 0.454611f, 0.455331f, 0.456052f, 0.456772f, 0.457493f, 0.458213f, 0.458934f, 0.459654f, 0.460375f, 0.461095f, 0.461816f, 0.462536f, 0.463256f, 0.463977f, 0.464697f, 0.465418f, 0.466138f, 0.466859f, 0.467579f, 0.468300f, 0.469020f, 0.469741f, 0.470461f, 0.471182f, 0.471902f, 0.472622f, 0.473343f, 0.474063f, 0.474784f, 0.475504f, 0.476225f, 0.476945f, 0.477666f, 0.478386f, 0.479107f, 0.479827f, 0.480548f, 0.481268f, 0.481988f, 0.482709f, 0.483429f, 0.484150f, 0.484870f, 0.485591f, 0.486311f, 0.487032f, 0.487752f, 0.488473f, 0.489193f, 0.489914f, 0.490634f, 0.491354f, 0.492075f, 0.492795f, 0.493516f, 0.494236f, 0.494957f, 0.495677f, 0.496398f, 0.497118f, 0.497839f, 0.498559f, 0.499280f, 0.500000f, 0.500720f, 0.501441f, 0.502161f, 0.502882f, 0.503602f, 0.504323f, 0.505043f, 0.505764f, 0.506484f, 0.507205f, 0.507925f, 0.508646f, 0.509366f, 0.510086f, 0.510807f, 0.511527f, 0.512248f, 0.512968f, 0.513689f, 0.514409f, 0.515130f, 0.515850f, 0.516571f, 0.517291f, 0.518012f, 0.518732f, 0.519452f, 0.520173f, 0.520893f, 0.521614f, 0.522334f, 0.523055f, 0.523775f, 0.524496f, 0.525216f, 0.525937f, 0.526657f, 0.527378f, 0.528098f, 0.528818f, 0.529539f, 0.530259f, 0.530980f, 0.531700f, 0.532421f, 0.533141f, 0.533862f, 0.534582f, 0.535303f, 0.536023f, 0.536744f, 0.537464f, 0.538184f, 0.538905f, 0.539625f, 0.540346f, 0.541066f, 0.541787f, 0.542507f, 0.543228f, 0.543948f, 0.544669f, 0.545389f, 0.546109f, 0.546830f, 0.547550f, 0.548271f, 0.548991f, 0.549712f, 0.550432f, 0.551153f, 0.551873f, 0.552594f, 0.553314f, 0.554035f, 0.554755f, 0.555476f, 0.556196f, 0.556916f, 0.557637f, 0.558357f, 0.559078f, 0.559798f, 0.560519f, 0.561239f, 0.561960f, 0.562680f, 0.563401f, 0.564121f, 0.564842f, 0.565562f, 0.566282f, 0.567003f, 0.567723f, 0.568444f, 0.569164f, 0.569885f, 0.570605f, 0.571326f, 0.572046f, 0.572767f, 0.573487f, 0.574207f, 0.574928f, 0.575648f, 0.576369f, 0.577089f, 0.577810f, 0.578530f, 0.579251f, 0.579971f, 0.580692f, 0.581412f, 0.582133f, 0.582853f, 0.583573f, 0.584294f, 0.585014f, 0.585735f, 0.586455f, 0.587176f, 0.587896f, 0.588617f, 0.589337f, 0.590058f, 0.590778f, 0.591499f, 0.592219f, 0.592939f, 0.593660f, 0.594380f, 0.595101f, 0.595821f, 0.596542f, 0.597262f, 0.597983f, 0.598703f, 0.599424f, 0.600144f, 0.600865f, 0.601585f, 0.602305f, 0.603026f, 0.603746f, 0.604467f, 0.605187f, 0.605908f, 0.606628f, 0.607349f, 0.608069f, 0.608790f, 0.609510f, 0.610231f, 0.610951f, 0.611671f, 0.612392f, 0.613112f, 0.613833f, 0.614553f, 0.615274f, 0.615994f, 0.616715f, 0.617435f, 0.618156f, 0.618876f, 0.619597f, 0.620317f, 0.621037f, 0.621758f, 0.622478f, 0.623199f, 0.623919f, 0.624640f, 0.625360f, 0.626081f, 0.626801f, 0.627522f, 0.628242f, 0.628963f, 0.629683f, 0.630403f, 0.631124f, 0.631844f, 0.632565f, 0.633285f, 0.634006f, 0.634726f, 0.635447f, 0.636167f, 0.636888f, 0.637608f, 0.638329f, 0.639049f, 0.639769f, 0.640490f, 0.641210f, 0.641931f, 0.642651f, 0.643372f, 0.644092f, 0.644813f, 0.645533f, 0.646254f, 0.646974f, 0.647695f, 0.648415f, 0.649135f, 0.649856f, 0.650576f, 0.651297f, 0.652017f, 0.652738f, 0.653458f, 0.654179f, 0.654899f, 0.655620f, 0.656340f, 0.657061f, 0.657781f, 0.658501f, 0.659222f, 0.659942f, 0.660663f, 0.661383f, 0.662104f, 0.662824f, 0.663545f, 0.664265f, 0.664986f, 0.665706f, 0.666427f, 0.667147f, 0.667867f, 0.668588f, 0.669308f, 0.670029f, 0.670749f, 0.671470f, 0.672190f, 0.672911f, 0.673631f, 0.674352f, 0.675072f, 0.675793f, 0.676513f, 0.677233f, 0.677954f, 0.678674f, 0.679395f, 0.680115f, 0.680836f, 0.681556f, 0.682277f, 0.682997f, 0.683718f, 0.684438f, 0.685158f, 0.685879f, 0.686599f, 0.687320f, 0.688040f, 0.688761f, 0.689481f, 0.690202f, 0.690922f, 0.691643f, 0.692363f, 0.693084f, 0.693804f, 0.694524f, 0.695245f, 0.695965f, 0.696686f, 0.697406f, 0.698127f, 0.698847f, 0.699568f, 0.700288f, 0.701009f, 0.701729f, 0.702450f, 0.703170f, 0.703891f, 0.704611f, 0.705331f, 0.706052f, 0.706772f, 0.707493f, 0.708213f, 0.708934f, 0.709654f, 0.710375f, 0.711095f, 0.711816f, 0.712536f, 0.713256f, 0.713977f, 0.714697f, 0.715418f, 0.716138f, 0.716859f, 0.717579f, 0.718300f, 0.719020f, 0.719741f, 0.720461f, 0.721182f, 0.721902f, 0.722622f, 0.723343f, 0.724063f, 0.724784f, 0.725504f, 0.726225f, 0.726945f, 0.727666f, 0.728386f, 0.729107f, 0.729827f, 0.730548f, 0.731268f, 0.731988f, 0.732709f, 0.733429f, 0.734150f, 0.734870f, 0.735591f, 0.736311f, 0.737032f, 0.737752f, 0.738473f, 0.739193f, 0.739914f, 0.740634f, 0.741354f, 0.742075f, 0.742795f, 0.743516f, 0.744236f, 0.744957f, 0.745677f, 0.746398f, 0.747118f, 0.747839f, 0.748559f, 0.749280f, 0.750000f, 0.750720f, 0.751441f, 0.752161f, 0.752882f, 0.753602f, 0.754323f, 0.755043f, 0.755764f, 0.756484f, 0.757205f, 0.757925f, 0.758646f, 0.759366f, 0.760086f, 0.760807f, 0.761527f, 0.762248f, 0.762968f, 0.763689f, 0.764409f, 0.765130f, 0.765850f, 0.766571f, 0.767291f, 0.768012f, 0.768732f, 0.769452f, 0.770173f, 0.770893f, 0.771614f, 0.772334f, 0.773055f, 0.773775f, 0.774496f, 0.775216f, 0.775937f, 0.776657f, 0.777378f, 0.778098f, 0.778818f, 0.779539f, 0.780259f, 0.780980f, 0.781700f, 0.782421f, 0.783141f, 0.783862f, 0.784582f, 0.785303f, 0.786023f, 0.786744f, 0.787464f, 0.788184f, 0.788905f, 0.789625f, 0.790346f, 0.791066f, 0.791787f, 0.792507f, 0.793228f, 0.793948f, 0.794669f, 0.795389f, 0.796109f, 0.796830f, 0.797550f, 0.798271f, 0.798991f, 0.799712f, 0.800432f, 0.801153f, 0.801873f, 0.802594f, 0.803314f, 0.804035f, 0.804755f, 0.805476f, 0.806196f, 0.806916f, 0.807637f, 0.808357f, 0.809078f, 0.809798f, 0.810519f, 0.811239f, 0.811960f, 0.812680f, 0.813401f, 0.814121f, 0.814842f, 0.815562f, 0.816282f, 0.817003f, 0.817723f, 0.818444f, 0.819164f, 0.819885f, 0.820605f, 0.821326f, 0.822046f, 0.822767f, 0.823487f, 0.824207f, 0.824928f, 0.825648f, 0.826369f, 0.827089f, 0.827810f, 0.828530f, 0.829251f, 0.829971f, 0.830692f, 0.831412f, 0.832133f, 0.832853f, 0.833573f, 0.834294f, 0.835014f, 0.835735f, 0.836455f, 0.837176f, 0.837896f, 0.838617f, 0.839337f, 0.840058f, 0.840778f, 0.841499f, 0.842219f, 0.842939f, 0.843660f, 0.844380f, 0.845101f, 0.845821f, 0.846542f, 0.847262f, 0.847983f, 0.848703f, 0.849424f, 0.850144f, 0.850865f, 0.851585f, 0.852305f, 0.853026f, 0.853746f, 0.854467f, 0.855187f, 0.855908f, 0.856628f, 0.857349f, 0.858069f, 0.858790f, 0.859510f, 0.860231f, 0.860951f, 0.861671f, 0.862392f, 0.863112f, 0.863833f, 0.864553f, 0.865274f, 0.865994f, 0.866715f, 0.867435f, 0.868156f, 0.868876f, 0.869597f, 0.870317f, 0.871037f, 0.871758f, 0.872478f, 0.873199f, 0.873919f, 0.874640f, 0.875360f, 0.876081f, 0.876801f, 0.877522f, 0.878242f, 0.878963f, 0.879683f, 0.880403f, 0.881124f, 0.881844f, 0.882565f, 0.883285f, 0.884006f, 0.884726f, 0.885447f, 0.886167f, 0.886888f, 0.887608f, 0.888329f, 0.889049f, 0.889769f, 0.890490f, 0.891210f, 0.891931f, 0.892651f, 0.893372f, 0.894092f, 0.894813f, 0.895533f, 0.896254f, 0.896974f, 0.897695f, 0.898415f, 0.899135f, 0.899856f, 0.900576f, 0.901297f, 0.902017f, 0.902738f, 0.903458f, 0.904179f, 0.904899f, 0.905620f, 0.906340f, 0.907061f, 0.907781f, 0.908501f, 0.909222f, 0.909942f, 0.910663f, 0.911383f, 0.912104f, 0.912824f, 0.913545f, 0.914265f, 0.914986f, 0.915706f, 0.916427f, 0.917147f, 0.917867f, 0.918588f, 0.919308f, 0.920029f, 0.920749f, 0.921470f, 0.922190f, 0.922911f, 0.923631f, 0.924352f, 0.925072f, 0.925793f, 0.926513f, 0.927233f, 0.927954f, 0.928674f, 0.929395f, 0.930115f, 0.930836f, 0.931556f, 0.932277f, 0.932997f, 0.933718f, 0.934438f, 0.935158f, 0.935879f, 0.936599f, 0.937320f, 0.938040f, 0.938761f, 0.939481f, 0.940202f, 0.940922f, 0.941643f, 0.942363f, 0.943084f, 0.943804f, 0.944524f, 0.945245f, 0.945965f, 0.946686f, 0.947406f, 0.948127f, 0.948847f, 0.949568f, 0.950288f, 0.951009f, 0.951729f, 0.952450f, 0.953170f, 0.953891f, 0.954611f, 0.955331f, 0.956052f, 0.956772f, 0.957493f, 0.958213f, 0.958934f, 0.959654f, 0.960375f, 0.961095f, 0.961816f, 0.962536f, 0.963256f, 0.963977f, 0.964697f, 0.965418f, 0.966138f, 0.966859f, 0.967579f, 0.968300f, 0.969020f, 0.969741f, 0.970461f, 0.971182f, 0.971902f, 0.972622f, 0.973343f, 0.974063f, 0.974784f, 0.975504f, 0.976225f, 0.976945f, 0.977666f, 0.978386f, 0.979107f, 0.979827f, 0.980548f, 0.981268f, 0.981988f, 0.982709f, 0.983429f, 0.984150f, 0.984870f, 0.985591f, 0.986311f, 0.987032f, 0.987752f, 0.988473f, 0.989193f, 0.989914f, 0.990634f, 0.991354f, 0.992075f, 0.992795f, 0.993516f, 0.994236f, 0.994957f, 0.995677f, 0.996398f, 0.997118f, 0.997839f, 0.998559f, 0.999280f, 1.000000 }; CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; ssize_t y; TransformPacket *y_map, *x_map, *z_map; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); status=MagickTrue; progress=0; exception=(&image->exception); switch (colorspace) { case CMYKColorspace: { MagickPixelPacket zero; /* Transform image from CMYK to sRGB. */ if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } GetMagickPixelPacket(image,&zero); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; MagickPixelPacket pixel; register IndexPacket *restrict indexes; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { SetMagickPixelPacket(image,q,indexes+x,&pixel); ConvertCMYKToRGB(&pixel); SetPixelPacket(image,&pixel,q,indexes+x); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,sRGBColorspace) == MagickFalse) return(MagickFalse); return(status); } case GRAYColorspace: case Rec601LumaColorspace: case Rec709LumaColorspace: { /* Transform linear RGB to sRGB colorspace. */ if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } if (SetImageColorspace(image,sRGBColorspace) == MagickFalse) return(MagickFalse); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=(ssize_t) image->columns; x != 0; x--) { MagickRealType gray; gray=(MagickRealType) GetPixelGray(q); if ((image->intensity == Rec601LuminancePixelIntensityMethod) || (image->intensity == Rec709LuminancePixelIntensityMethod)) gray=EncodePixelGamma(gray); SetPixelRed(q,ClampToQuantum(gray)); SetPixelGreen(q,ClampToQuantum(gray)); SetPixelBlue(q,ClampToQuantum(gray)); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,sRGBColorspace) == MagickFalse) return(MagickFalse); return(status); } case CMYColorspace: case HCLColorspace: case HCLpColorspace: case HSBColorspace: case HSIColorspace: case HSLColorspace: case HSVColorspace: case HWBColorspace: case LabColorspace: case LCHColorspace: case LCHabColorspace: case LCHuvColorspace: case LMSColorspace: case LuvColorspace: case xyYColorspace: case XYZColorspace: case YCbCrColorspace: case YDbDrColorspace: case YIQColorspace: case YPbPrColorspace: case YUVColorspace: { /* Transform image from source colorspace to sRGB. */ if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double X, Y, Z; Quantum blue, green, red; X=QuantumScale*GetPixelRed(q); Y=QuantumScale*GetPixelGreen(q); Z=QuantumScale*GetPixelBlue(q); switch (colorspace) { case CMYColorspace: { ConvertCMYToRGB(X,Y,Z,&red,&green,&blue); break; } case HCLColorspace: { ConvertHCLToRGB(X,Y,Z,&red,&green,&blue); break; } case HCLpColorspace: { ConvertHCLpToRGB(X,Y,Z,&red,&green,&blue); break; } case HSBColorspace: { ConvertHSBToRGB(X,Y,Z,&red,&green,&blue); break; } case HSIColorspace: { ConvertHSIToRGB(X,Y,Z,&red,&green,&blue); break; } case HSLColorspace: { ConvertHSLToRGB(X,Y,Z,&red,&green,&blue); break; } case HSVColorspace: { ConvertHSVToRGB(X,Y,Z,&red,&green,&blue); break; } case HWBColorspace: { ConvertHWBToRGB(X,Y,Z,&red,&green,&blue); break; } case LabColorspace: { ConvertLabToRGB(X,Y,Z,&red,&green,&blue); break; } case LCHColorspace: case LCHabColorspace: { ConvertLCHabToRGB(X,Y,Z,&red,&green,&blue); break; } case LCHuvColorspace: { ConvertLCHuvToRGB(X,Y,Z,&red,&green,&blue); break; } case LMSColorspace: { ConvertLMSToRGB(X,Y,Z,&red,&green,&blue); break; } case LuvColorspace: { ConvertLuvToRGB(X,Y,Z,&red,&green,&blue); break; } case xyYColorspace: { ConvertxyYToRGB(X,Y,Z,&red,&green,&blue); break; } case XYZColorspace: { ConvertXYZToRGB(X,Y,Z,&red,&green,&blue); break; } case YCbCrColorspace: { ConvertYCbCrToRGB(X,Y,Z,&red,&green,&blue); break; } case YDbDrColorspace: { ConvertYDbDrToRGB(X,Y,Z,&red,&green,&blue); break; } case YIQColorspace: { ConvertYIQToRGB(X,Y,Z,&red,&green,&blue); break; } case YPbPrColorspace: { ConvertYPbPrToRGB(X,Y,Z,&red,&green,&blue); break; } case YUVColorspace: { ConvertYUVToRGB(X,Y,Z,&red,&green,&blue); break; } default: { red=QuantumRange*X; green=QuantumRange*Y; blue=QuantumRange*Z; break; } } SetPixelRed(q,ClampToQuantum((MagickRealType) red)); SetPixelGreen(q,ClampToQuantum((MagickRealType) green)); SetPixelBlue(q,ClampToQuantum((MagickRealType) blue)); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,sRGBColorspace) == MagickFalse) return(MagickFalse); return(status); } case LogColorspace: { const char *value; double black, density, film_gamma, gamma, reference_black, reference_white; Quantum *logmap; /* Transform Log to sRGB colorspace. */ density=DisplayGamma; gamma=DisplayGamma; value=GetImageProperty(image,"gamma"); if (value != (const char *) NULL) gamma=PerceptibleReciprocal(StringToDouble(value,(char **) NULL)); film_gamma=FilmGamma; value=GetImageProperty(image,"film-gamma"); if (value != (const char *) NULL) film_gamma=StringToDouble(value,(char **) NULL); reference_black=ReferenceBlack; value=GetImageProperty(image,"reference-black"); if (value != (const char *) NULL) reference_black=StringToDouble(value,(char **) NULL); reference_white=ReferenceWhite; value=GetImageProperty(image,"reference-white"); if (value != (const char *) NULL) reference_white=StringToDouble(value,(char **) NULL); logmap=(Quantum *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*logmap)); if (logmap == (Quantum *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); black=pow(10.0,(reference_black-reference_white)*(gamma/density)*0.002/ film_gamma); for (i=0; i <= (ssize_t) (reference_black*MaxMap/1024.0); i++) logmap[i]=(Quantum) 0; for ( ; i < (ssize_t) (reference_white*MaxMap/1024.0); i++) logmap[i]=ClampToQuantum((MagickRealType) QuantumRange/(1.0-black)* (pow(10.0,(1024.0*i/MaxMap-reference_white)*(gamma/density)*0.002/ film_gamma)-black)); for ( ; i <= (ssize_t) MaxMap; i++) logmap[i]=QuantumRange; if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=(ssize_t) image->columns; x != 0; x--) { Quantum blue, green, red; red=ClampToQuantum(EncodePixelGamma((MagickRealType) logmap[ScaleQuantumToMap(GetPixelRed(q))])); green=ClampToQuantum(EncodePixelGamma((MagickRealType) logmap[ScaleQuantumToMap(GetPixelGreen(q))])); blue=ClampToQuantum(EncodePixelGamma((MagickRealType) logmap[ScaleQuantumToMap(GetPixelBlue(q))])); SetPixelRed(q,red); SetPixelGreen(q,green); SetPixelBlue(q,blue); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); logmap=(Quantum *) RelinquishMagickMemory(logmap); if (SetImageColorspace(image,sRGBColorspace) == MagickFalse) return(MagickFalse); return(status); } case RGBColorspace: case scRGBColorspace: { /* Transform linear RGB to sRGB colorspace. */ if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=(ssize_t) image->columns; x != 0; x--) { Quantum blue, green, red; red=ClampToQuantum(EncodePixelGamma((MagickRealType) GetPixelRed(q))); green=ClampToQuantum(EncodePixelGamma((MagickRealType) GetPixelGreen(q))); blue=ClampToQuantum(EncodePixelGamma((MagickRealType) GetPixelBlue(q))); SetPixelRed(q,red); SetPixelGreen(q,green); SetPixelBlue(q,blue); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,sRGBColorspace) == MagickFalse) return(MagickFalse); return(status); } default: break; } /* Allocate the tables. */ x_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*x_map)); y_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*y_map)); z_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*z_map)); if ((x_map == (TransformPacket *) NULL) || (y_map == (TransformPacket *) NULL) || (z_map == (TransformPacket *) NULL)) { if (z_map != (TransformPacket *) NULL) z_map=(TransformPacket *) RelinquishMagickMemory(z_map); if (y_map != (TransformPacket *) NULL) y_map=(TransformPacket *) RelinquishMagickMemory(y_map); if (x_map != (TransformPacket *) NULL) x_map=(TransformPacket *) RelinquishMagickMemory(x_map); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } switch (colorspace) { case OHTAColorspace: { /* Initialize OHTA tables: R = I1+1.00000*I2-0.66668*I3 G = I1+0.00000*I2+1.33333*I3 B = I1-1.00000*I2-0.66668*I3 I and Q, normally -0.5 through 0.5, must be normalized to the range 0 through QuantumRange. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) \ magick_threads(image,image,1,1) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(1.0*(double) i); y_map[i].x=(0.5*1.00000*(2.0*(double) i-MaxMap)); z_map[i].x=(-0.5*0.66668*(2.0*(double) i-MaxMap)); x_map[i].y=(1.0*(double) i); y_map[i].y=(0.5*0.00000*(2.0*(double) i-MaxMap)); z_map[i].y=(0.5*1.33333*(2.0*(double) i-MaxMap)); x_map[i].z=(1.0*(double) i); y_map[i].z=(-0.5*1.00000*(2.0*(double) i-MaxMap)); z_map[i].z=(-0.5*0.66668*(2.0*(double) i-MaxMap)); } break; } case Rec601YCbCrColorspace: { /* Initialize YCbCr tables: R = Y +1.402000*Cr G = Y-0.344136*Cb-0.714136*Cr B = Y+1.772000*Cb Cb and Cr, normally -0.5 through 0.5, must be normalized to the range 0 through QuantumRange. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) \ magick_threads(image,image,1,1) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=0.99999999999914679361*(double) i; y_map[i].x=0.5*(-1.2188941887145875e-06)*(2.00*(double) i-MaxMap); z_map[i].x=0.5*1.4019995886561440468*(2.00*(double) i-MaxMap); x_map[i].y=0.99999975910502514331*(double) i; y_map[i].y=0.5*(-0.34413567816504303521)*(2.00*(double) i-MaxMap); z_map[i].y=0.5*(-0.71413649331646789076)*(2.00*(double) i-MaxMap); x_map[i].z=1.00000124040004623180*(double) i; y_map[i].z=0.5*1.77200006607230409200*(2.00*(double) i-MaxMap); z_map[i].z=0.5*2.1453384174593273e-06*(2.00*(double) i-MaxMap); } break; } case Rec709YCbCrColorspace: { /* Initialize YCbCr tables: R = Y +1.574800*Cr G = Y-0.187324*Cb-0.468124*Cr B = Y+1.855600*Cb Cb and Cr, normally -0.5 through 0.5, must be normalized to the range 0 through QuantumRange. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) \ magick_threads(image,image,1,1) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (1.0*(double) i); y_map[i].x=(MagickRealType) (0.5*0.000000*(2.0*(double) i-MaxMap)); z_map[i].x=(MagickRealType) (0.5*1.574800*(2.0*(double) i-MaxMap)); x_map[i].y=(MagickRealType) (1.0*(double) i); y_map[i].y=(MagickRealType) (0.5*(-0.187324)*(2.0*(double) i-MaxMap)); z_map[i].y=(MagickRealType) (0.5*(-0.468124)*(2.0*(double) i-MaxMap)); x_map[i].z=(MagickRealType) (1.0*(double) i); y_map[i].z=(MagickRealType) (0.5*1.855600*(2.0*(double) i-MaxMap)); z_map[i].z=(MagickRealType) (0.5*0.000000*(2.0*(double) i-MaxMap)); } break; } case YCCColorspace: { /* Initialize YCC tables: R = Y +1.340762*C2 G = Y-0.317038*C1-0.682243*C2 B = Y+1.632639*C1 YCC is scaled by 1.3584. C1 zero is 156 and C2 is at 137. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) \ magick_threads(image,image,1,1) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (1.3584000*(double) i); y_map[i].x=(MagickRealType) (0.0000000); z_map[i].x=(MagickRealType) (1.8215000*((double) i-(MagickRealType) ScaleQuantumToMap(ScaleCharToQuantum(137)))); x_map[i].y=(MagickRealType) (1.3584000*(double) i); y_map[i].y=(MagickRealType) ((-0.4302726)*((double) i-(MagickRealType) ScaleQuantumToMap(ScaleCharToQuantum(156)))); z_map[i].y=(MagickRealType) ((-0.9271435)*((double) i-(MagickRealType) ScaleQuantumToMap(ScaleCharToQuantum(137)))); x_map[i].z=(MagickRealType) (1.3584000*(double) i); y_map[i].z=(MagickRealType) (2.2179000*((double) i-(MagickRealType) ScaleQuantumToMap(ScaleCharToQuantum(156)))); z_map[i].z=(MagickRealType) (0.0000000); } break; } default: { /* Linear conversion tables. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) \ magick_threads(image,image,1,1) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (1.0*(double) i); y_map[i].x=(MagickRealType) 0.0; z_map[i].x=(MagickRealType) 0.0; x_map[i].y=(MagickRealType) 0.0; y_map[i].y=(MagickRealType) (1.0*(double) i); z_map[i].y=(MagickRealType) 0.0; x_map[i].z=(MagickRealType) 0.0; y_map[i].z=(MagickRealType) 0.0; z_map[i].z=(MagickRealType) (1.0*(double) i); } break; } } /* Convert to sRGB. */ switch (image->storage_class) { case DirectClass: default: { /* Convert DirectClass image. */ image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; MagickPixelPacket pixel; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register size_t blue, green, red; red=ScaleQuantumToMap(GetPixelRed(q)); green=ScaleQuantumToMap(GetPixelGreen(q)); blue=ScaleQuantumToMap(GetPixelBlue(q)); pixel.red=x_map[red].x+y_map[green].x+z_map[blue].x; pixel.green=x_map[red].y+y_map[green].y+z_map[blue].y; pixel.blue=x_map[red].z+y_map[green].z+z_map[blue].z; if (colorspace == YCCColorspace) { pixel.red=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.red/ (double) MaxMap)]; pixel.green=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.green/ (double) MaxMap)]; pixel.blue=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.blue/ (double) MaxMap)]; } else { pixel.red=(MagickRealType) ScaleMapToQuantum(pixel.red); pixel.green=(MagickRealType) ScaleMapToQuantum(pixel.green); pixel.blue=(MagickRealType) ScaleMapToQuantum(pixel.blue); } SetPixelRed(q,ClampToQuantum(pixel.red)); SetPixelGreen(q,ClampToQuantum(pixel.green)); SetPixelBlue(q,ClampToQuantum(pixel.blue)); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_TransformRGBImage) #endif proceed=SetImageProgress(image,TransformRGBImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); break; } case PseudoClass: { /* Convert PseudoClass image. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,1,1) #endif for (i=0; i < (ssize_t) image->colors; i++) { MagickPixelPacket pixel; register size_t blue, green, red; red=ScaleQuantumToMap(image->colormap[i].red); green=ScaleQuantumToMap(image->colormap[i].green); blue=ScaleQuantumToMap(image->colormap[i].blue); pixel.red=x_map[red].x+y_map[green].x+z_map[blue].x; pixel.green=x_map[red].y+y_map[green].y+z_map[blue].y; pixel.blue=x_map[red].z+y_map[green].z+z_map[blue].z; if (colorspace == YCCColorspace) { pixel.red=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.red/ (double) MaxMap)]; pixel.green=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.green/ (double) MaxMap)]; pixel.blue=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.blue/ (double) MaxMap)]; } else { pixel.red=(MagickRealType) ScaleMapToQuantum(pixel.red); pixel.green=(MagickRealType) ScaleMapToQuantum(pixel.green); pixel.blue=(MagickRealType) ScaleMapToQuantum(pixel.blue); } image->colormap[i].red=ClampToQuantum(pixel.red); image->colormap[i].green=ClampToQuantum(pixel.green); image->colormap[i].blue=ClampToQuantum(pixel.blue); } (void) SyncImage(image); break; } } /* Relinquish resources. */ z_map=(TransformPacket *) RelinquishMagickMemory(z_map); y_map=(TransformPacket *) RelinquishMagickMemory(y_map); x_map=(TransformPacket *) RelinquishMagickMemory(x_map); if (SetImageColorspace(image,sRGBColorspace) == MagickFalse) return(MagickFalse); return(MagickTrue); }
simd-function_0.c
/* { dg-lto-do link } */ /* { dg-require-effective-target vect_simd_clones } */ /* { dg-require-effective-target avx2 } */ /* { dg-lto-options { { -fopenmp-simd -O3 -ffast-math -mavx2 -flto -flto-partition=max } } } */ #define SIZE 4096 float x[SIZE]; #pragma omp declare simd float __attribute__ ((noinline)) my_mul (float x, float y) { return x * y; } __attribute__ ((noinline)) int foo () { int i = 0; #pragma omp simd safelen (16) for (i = 0; i < SIZE; i++) x[i] = my_mul ((float)i, 9932.3323); return (int)x[0]; } int main () { int i = 0; for (i = 0; i < SIZE; i++) x[i] = my_mul ((float) i, 9932.3323); foo (); return (int)x[0]; }
nowait-orig-yes.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ // Some threads may finish the for loop early and execute errors = dt[9]+1 // while another thread may still be simultaneously executing // the for worksharing region by writing to d[9], // which may cause a data race. // This is a good test for dynamic tools since the data race does not always happen at runtime. // // Liao, source paper: Ma Symbolic Analysis of Concurrency Errors in OpenMP Programs, ICPP 2013 #include <stdio.h> #include <assert.h> int main() { int i,error; int len = 1000; int a[1000], b=5; for (i=0; i<len; i++) a[i]= i; #pragma omp parallel shared(b, error) { #pragma omp for nowait for(i = 0; i < len; i++) a[i] = b + a[i]*5; #pragma omp single error = a[9] + 1; } printf ("error = %d\n", error); // assert (error==51); return 0; }
myblas.h
#include<omp.h> #include<math.h> // y = alpha * x + y void axpy(size_t N, double alpha, const double* x, double* y){ #pragma omp parallel for for(size_t i=0; i<N; i++){ y[i] = alpha * x[i] + y[i]; } } // z = alpha * x + y void axpyz(size_t N, double alpha, const double* x, const double* y, double* z){ #pragma omp parallel for for(size_t i=0; i<N; i++){ z[i] = alpha * x[i] + y[i]; } } // x = x + alpha * y void xpay(size_t N, double alpha, double* x, const double* y){ #pragma omp parallel for for(size_t i=0; i<N; i++){ x[i] = x[i] + alpha * y[i]; } } // x = alpha * x void scal(size_t N, double alpha, double* x){ #pragma omp parallel for for(size_t i=0; i<N; i++){ x[i] = alpha * x[i]; } } // inner product double dot(size_t N, const double* x, const double* y){ double ans=0; #pragma omp parallel for reduction(+:ans) for(size_t i=0; i<N; i++){ ans += x[i] * y[i]; } return ans; } // 2nrm double nrm2(size_t N, const double* x){ return sqrt(dot(N, x, x)); } // copy (y=x) void copy(size_t N, const double* x, double* y){ #pragma omp parallel for for(size_t i=0; i<N; i++){ y[i] = x[i]; } } // matvec:y=Ax void matvec(int N, const int* row_ptr, const int* col_ind, const double* val, const double* x, double* y) { #pragma omp parallel for for(size_t i = 0; i < N; i++){ y[i] = 0; for(size_t j = row_ptr[i]; j < row_ptr[i+1]; j++){ y[i] += x[col_ind[j]] * val[j]; } } }
issue_001.c
#include <stdio.h> #include "assert.h" #include <unistd.h> // 920 fails #define TRIALS 600 //#919 // 6000 fails #define N 64*5000 int main() { int fail = 0; double A[N], B[N], C[N]; for (int i = 0; i < N; i++) { A[i] = 0.0; B[i] = 0.0; C[i] = 1.0; } int nte = 32; int tl = 64; int blockSize = tl; for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target #pragma omp teams num_teams(nte) thread_limit(tl) { #pragma omp distribute for(int j = 0 ; j < N ; j += blockSize) { #pragma omp parallel for for(int i = j ; i < j+blockSize; i++) { A[i] += B[i] + C[i]; } } } } for(int i = 0 ; i < N ; i++) { if (A[i] != TRIALS) { printf("Error at A[%d], h = %lf, d = %lf\n", i, (double) (2.0+3.0)*TRIALS, A[i]); fail = 1; break; } } if(fail) { printf("Failed\n"); return 1; } else { printf("Succeeded\n"); return 0; } }
divsufsort.c
/* * divsufsort.c for libdivsufsort * Copyright (c) 2003-2008 Yuta Mori All Rights Reserved. * * Permission is hereby granted, free of charge, to any person * obtaining a copy of this software and associated documentation * files (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, * copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following * conditions: * * The above copyright notice and this permission notice shall be * included in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. */ #include "divsufsort_private.h" #ifdef _OPENMP # include <omp.h> #endif /*- Private Functions -*/ /* Sorts suffixes of type B*. */ static saidx_t sort_typeBstar(const sauchar_t *T, saidx_t *SA, saidx_t *bucket_A, saidx_t *bucket_B, saidx_t n) { saidx_t *PAb, *ISAb, *buf; #ifdef _OPENMP saidx_t *curbuf; saidx_t l; #endif saidx_t i, j, k, t, m, bufsize; saint_t c0, c1; #ifdef _OPENMP saint_t d0, d1; int tmp; #endif /* Initialize bucket arrays. */ for(i = 0; i < BUCKET_A_SIZE; ++i) { bucket_A[i] = 0; } for(i = 0; i < BUCKET_B_SIZE; ++i) { bucket_B[i] = 0; } /* Count the number of occurrences of the first one or two characters of each type A, B and B* suffix. Moreover, store the beginning position of all type B* suffixes into the array SA. */ for(i = n - 1, m = n, c0 = T[n - 1]; 0 <= i;) { /* type A suffix. */ do { ++BUCKET_A(c1 = c0); } while((0 <= --i) && ((c0 = T[i]) >= c1)); if(0 <= i) { /* type B* suffix. */ ++BUCKET_BSTAR(c0, c1); SA[--m] = i; /* type B suffix. */ for(--i, c1 = c0; (0 <= i) && ((c0 = T[i]) <= c1); --i, c1 = c0) { ++BUCKET_B(c0, c1); } } } m = n - m; /* note: A type B* suffix is lexicographically smaller than a type B suffix that begins with the same first two characters. */ /* Calculate the index of start/end point of each bucket. */ for(c0 = 0, i = 0, j = 0; c0 < ALPHABET_SIZE; ++c0) { t = i + BUCKET_A(c0); BUCKET_A(c0) = i + j; /* start point */ i = t + BUCKET_B(c0, c0); for(c1 = c0 + 1; c1 < ALPHABET_SIZE; ++c1) { j += BUCKET_BSTAR(c0, c1); BUCKET_BSTAR(c0, c1) = j; /* end point */ i += BUCKET_B(c0, c1); } } if(0 < m) { /* Sort the type B* suffixes by their first two characters. */ PAb = SA + n - m; ISAb = SA + m; for(i = m - 2; 0 <= i; --i) { t = PAb[i], c0 = T[t], c1 = T[t + 1]; SA[--BUCKET_BSTAR(c0, c1)] = i; } t = PAb[m - 1], c0 = T[t], c1 = T[t + 1]; SA[--BUCKET_BSTAR(c0, c1)] = m - 1; /* Sort the type B* substrings using sssort. */ #ifdef _OPENMP tmp = omp_get_max_threads(); buf = SA + m, bufsize = (n - (2 * m)) / tmp; c0 = ALPHABET_SIZE - 2, c1 = ALPHABET_SIZE - 1, j = m; #pragma omp parallel default(shared) private(curbuf, k, l, d0, d1, tmp) { tmp = omp_get_thread_num(); curbuf = buf + tmp * bufsize; k = 0; for(;;) { #pragma omp critical(sssort_lock) { if(0 < (l = j)) { d0 = c0, d1 = c1; do { k = BUCKET_BSTAR(d0, d1); if(--d1 <= d0) { d1 = ALPHABET_SIZE - 1; if(--d0 < 0) { break; } } } while(((l - k) <= 1) && (0 < (l = k))); c0 = d0, c1 = d1, j = k; } } if(l == 0) { break; } sssort(T, PAb, SA + k, SA + l, curbuf, bufsize, 2, n, *(SA + k) == (m - 1)); } } #else buf = SA + m, bufsize = n - (2 * m); for(c0 = ALPHABET_SIZE - 2, j = m; 0 < j; --c0) { for(c1 = ALPHABET_SIZE - 1; c0 < c1; j = i, --c1) { i = BUCKET_BSTAR(c0, c1); if(1 < (j - i)) { sssort(T, PAb, SA + i, SA + j, buf, bufsize, 2, n, *(SA + i) == (m - 1)); } } } #endif /* Compute ranks of type B* substrings. */ for(i = m - 1; 0 <= i; --i) { if(0 <= SA[i]) { j = i; do { ISAb[SA[i]] = i; } while((0 <= --i) && (0 <= SA[i])); SA[i + 1] = i - j; if(i <= 0) { break; } } j = i; do { ISAb[SA[i] = ~SA[i]] = j; } while(SA[--i] < 0); ISAb[SA[i]] = j; } /* Construct the inverse suffix array of type B* suffixes using trsort. */ trsort(ISAb, SA, m, 1); /* Set the sorted order of tyoe B* suffixes. */ for(i = n - 1, j = m, c0 = T[n - 1]; 0 <= i;) { for(--i, c1 = c0; (0 <= i) && ((c0 = T[i]) >= c1); --i, c1 = c0) { } if(0 <= i) { t = i; for(--i, c1 = c0; (0 <= i) && ((c0 = T[i]) <= c1); --i, c1 = c0) { } SA[ISAb[--j]] = ((t == 0) || (1 < (t - i))) ? t : ~t; } } /* Calculate the index of start/end point of each bucket. */ BUCKET_B(ALPHABET_SIZE - 1, ALPHABET_SIZE - 1) = n; /* end point */ for(c0 = ALPHABET_SIZE - 2, k = m - 1; 0 <= c0; --c0) { i = BUCKET_A(c0 + 1) - 1; for(c1 = ALPHABET_SIZE - 1; c0 < c1; --c1) { t = i - BUCKET_B(c0, c1); BUCKET_B(c0, c1) = i; /* end point */ /* Move all type B* suffixes to the correct position. */ for(i = t, j = BUCKET_BSTAR(c0, c1); j <= k; --i, --k) { SA[i] = SA[k]; } } BUCKET_BSTAR(c0, c0 + 1) = i - BUCKET_B(c0, c0) + 1; /* start point */ BUCKET_B(c0, c0) = i; /* end point */ } } return m; } /* Constructs the suffix array by using the sorted order of type B* suffixes. */ static void construct_SA(const sauchar_t *T, saidx_t *SA, saidx_t *bucket_A, saidx_t *bucket_B, saidx_t n, saidx_t m) { saidx_t *i, *j, *k; saidx_t s; saint_t c0, c1, c2; if(0 < m) { /* Construct the sorted order of type B suffixes by using the sorted order of type B* suffixes. */ for(c1 = ALPHABET_SIZE - 2; 0 <= c1; --c1) { /* Scan the suffix array from right to left. */ for(i = SA + BUCKET_BSTAR(c1, c1 + 1), j = SA + BUCKET_A(c1 + 1) - 1, k = NULL, c2 = -1; i <= j; --j) { if(0 < (s = *j)) { assert(T[s] == c1); assert(((s + 1) < n) && (T[s] <= T[s + 1])); assert(T[s - 1] <= T[s]); *j = ~s; c0 = T[--s]; if((0 < s) && (T[s - 1] > c0)) { s = ~s; } if(c0 != c2) { if(0 <= c2) { BUCKET_B(c2, c1) = k - SA; } k = SA + BUCKET_B(c2 = c0, c1); } assert(k < j); *k-- = s; } else { assert(((s == 0) && (T[s] == c1)) || (s < 0)); *j = ~s; } } } } /* Construct the suffix array by using the sorted order of type B suffixes. */ k = SA + BUCKET_A(c2 = T[n - 1]); *k++ = (T[n - 2] < c2) ? ~(n - 1) : (n - 1); /* Scan the suffix array from left to right. */ for(i = SA, j = SA + n; i < j; ++i) { if(0 < (s = *i)) { assert(T[s - 1] >= T[s]); c0 = T[--s]; if((s == 0) || (T[s - 1] < c0)) { s = ~s; } if(c0 != c2) { BUCKET_A(c2) = k - SA; k = SA + BUCKET_A(c2 = c0); } assert(i < k); *k++ = s; } else { assert(s < 0); *i = ~s; } } } #if 0 /* Constructs the burrows-wheeler transformed string directly by using the sorted order of type B* suffixes. */ static saidx_t construct_BWT(const sauchar_t *T, saidx_t *SA, saidx_t *bucket_A, saidx_t *bucket_B, saidx_t n, saidx_t m) { saidx_t *i, *j, *k, *orig; saidx_t s; saint_t c0, c1, c2; if(0 < m) { /* Construct the sorted order of type B suffixes by using the sorted order of type B* suffixes. */ for(c1 = ALPHABET_SIZE - 2; 0 <= c1; --c1) { /* Scan the suffix array from right to left. */ for(i = SA + BUCKET_BSTAR(c1, c1 + 1), j = SA + BUCKET_A(c1 + 1) - 1, k = NULL, c2 = -1; i <= j; --j) { if(0 < (s = *j)) { assert(T[s] == c1); assert(((s + 1) < n) && (T[s] <= T[s + 1])); assert(T[s - 1] <= T[s]); c0 = T[--s]; *j = ~((saidx_t)c0); if((0 < s) && (T[s - 1] > c0)) { s = ~s; } if(c0 != c2) { if(0 <= c2) { BUCKET_B(c2, c1) = k - SA; } k = SA + BUCKET_B(c2 = c0, c1); } assert(k < j); *k-- = s; } else if(s != 0) { *j = ~s; #ifndef NDEBUG } else { assert(T[s] == c1); #endif } } } } /* Construct the BWTed string by using the sorted order of type B suffixes. */ k = SA + BUCKET_A(c2 = T[n - 1]); *k++ = (T[n - 2] < c2) ? ~((saidx_t)T[n - 2]) : (n - 1); /* Scan the suffix array from left to right. */ for(i = SA, j = SA + n, orig = SA; i < j; ++i) { if(0 < (s = *i)) { assert(T[s - 1] >= T[s]); c0 = T[--s]; *i = c0; if((0 < s) && (T[s - 1] < c0)) { s = ~((saidx_t)T[s - 1]); } if(c0 != c2) { BUCKET_A(c2) = k - SA; k = SA + BUCKET_A(c2 = c0); } assert(i < k); *k++ = s; } else if(s != 0) { *i = ~s; } else { orig = i; } } return orig - SA; } #endif /*---------------------------------------------------------------------------*/ /** * Initialize suffix array context * * @return 0 for success, or non-zero in case of an error */ int divsufsort_init(divsufsort_ctx_t *ctx) { ctx->bucket_A = (saidx_t *)malloc(BUCKET_A_SIZE * sizeof(saidx_t)); ctx->bucket_B = NULL; if (ctx->bucket_A) { ctx->bucket_B = (saidx_t *)malloc(BUCKET_B_SIZE * sizeof(saidx_t)); if (ctx->bucket_B) return 0; } divsufsort_destroy(ctx); return -1; } /** * Destroy suffix array context * * @param ctx suffix array context to destroy */ void divsufsort_destroy(divsufsort_ctx_t *ctx) { if (ctx->bucket_B) { free(ctx->bucket_B); ctx->bucket_B = NULL; } if (ctx->bucket_A) { free(ctx->bucket_A); ctx->bucket_A = NULL; } } /*- Function -*/ saint_t divsufsort_build_array(divsufsort_ctx_t *ctx, const sauchar_t *T, saidx_t *SA, saidx_t n) { saidx_t m; saint_t err = 0; /* Check arguments. */ if((T == NULL) || (SA == NULL) || (n < 0)) { return -1; } else if(n == 0) { return 0; } else if(n == 1) { SA[0] = 0; return 0; } else if(n == 2) { m = (T[0] < T[1]); SA[m ^ 1] = 0, SA[m] = 1; return 0; } /* Suffixsort. */ if((ctx->bucket_A != NULL) && (ctx->bucket_B != NULL)) { m = sort_typeBstar(T, SA, ctx->bucket_A, ctx->bucket_B, n); construct_SA(T, SA, ctx->bucket_A, ctx->bucket_B, n, m); } else { err = -2; } return err; } #if 0 saidx_t divbwt(const sauchar_t *T, sauchar_t *U, saidx_t *A, saidx_t n) { saidx_t *B; saidx_t *bucket_A, *bucket_B; saidx_t m, pidx, i; /* Check arguments. */ if((T == NULL) || (U == NULL) || (n < 0)) { return -1; } else if(n <= 1) { if(n == 1) { U[0] = T[0]; } return n; } if((B = A) == NULL) { B = (saidx_t *)malloc((size_t)(n + 1) * sizeof(saidx_t)); } bucket_A = (saidx_t *)malloc(BUCKET_A_SIZE * sizeof(saidx_t)); bucket_B = (saidx_t *)malloc(BUCKET_B_SIZE * sizeof(saidx_t)); /* Burrows-Wheeler Transform. */ if((B != NULL) && (bucket_A != NULL) && (bucket_B != NULL)) { m = sort_typeBstar(T, B, bucket_A, bucket_B, n); pidx = construct_BWT(T, B, bucket_A, bucket_B, n, m); /* Copy to output string. */ U[0] = T[n - 1]; for(i = 0; i < pidx; ++i) { U[i + 1] = (sauchar_t)B[i]; } for(i += 1; i < n; ++i) { U[i] = (sauchar_t)B[i]; } pidx += 1; } else { pidx = -2; } free(bucket_B); free(bucket_A); if(A == NULL) { free(B); } return pidx; } const char * divsufsort_version(void) { return PROJECT_VERSION_FULL; } #endif
par_mgr.c
/****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ /****************************************************************************** * * Two-grid system solver * *****************************************************************************/ #include "_hypre_parcsr_ls.h" #include "par_amg.h" #include "par_mgr.h" /* Create */ void * hypre_MGRCreate() { hypre_ParMGRData *mgr_data; mgr_data = hypre_CTAlloc(hypre_ParMGRData, 1, HYPRE_MEMORY_HOST); /* block data */ (mgr_data -> block_size) = 1; (mgr_data -> num_coarse_indexes) = 1; (mgr_data -> block_num_coarse_indexes) = NULL; (mgr_data -> block_cf_marker) = NULL; /* general data */ (mgr_data -> max_num_coarse_levels) = 10; (mgr_data -> A_array) = NULL; (mgr_data -> P_array) = NULL; (mgr_data -> RT_array) = NULL; (mgr_data -> RAP) = NULL; (mgr_data -> CF_marker_array) = NULL; (mgr_data -> coarse_indices_lvls) = NULL; (mgr_data -> F_array) = NULL; (mgr_data -> U_array) = NULL; (mgr_data -> residual) = NULL; (mgr_data -> rel_res_norms) = NULL; (mgr_data -> Vtemp) = NULL; (mgr_data -> Ztemp) = NULL; (mgr_data -> Utemp) = NULL; (mgr_data -> Ftemp) = NULL; (mgr_data -> num_iterations) = 0; (mgr_data -> num_interp_sweeps) = 1; (mgr_data -> num_restrict_sweeps) = 1; (mgr_data -> trunc_factor) = 0.0; (mgr_data -> max_row_sum) = 0.9; (mgr_data -> strong_threshold) = 0.25; (mgr_data -> S_commpkg_switch) = 1.0; (mgr_data -> P_max_elmts) = 0; (mgr_data -> coarse_grid_solver) = NULL; (mgr_data -> coarse_grid_solver_setup) = NULL; (mgr_data -> coarse_grid_solver_solve) = NULL; (mgr_data -> global_smoother) = NULL; (mgr_data -> use_default_cgrid_solver) = 1; (mgr_data -> omega) = 1.; (mgr_data -> max_iter) = 20; (mgr_data -> tol) = 1.0e-7; (mgr_data -> relax_type) = 0; (mgr_data -> relax_order) = 1; (mgr_data -> interp_type) = 2; (mgr_data -> restrict_type) = 0; (mgr_data -> num_relax_sweeps) = 1; (mgr_data -> relax_weight) = 1.0; (mgr_data -> logging) = 0; (mgr_data -> print_level) = 0; (mgr_data -> l1_norms) = NULL; (mgr_data -> reserved_coarse_size) = 0; (mgr_data -> reserved_coarse_indexes) = NULL; (mgr_data -> reserved_Cpoint_local_indexes) = NULL; (mgr_data -> diaginv) = NULL; (mgr_data -> global_smooth_iters) = 1; (mgr_data -> global_smooth_type) = 0; (mgr_data -> set_non_Cpoints_to_F) = 0; (mgr_data -> Frelax_method) = 0; (mgr_data -> FrelaxVcycleData) = NULL; (mgr_data -> max_local_lvls) = 10; (mgr_data -> print_coarse_system) = 0; return (void *) mgr_data; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ /* Destroy */ HYPRE_Int hypre_MGRDestroy( void *data ) { hypre_ParMGRData * mgr_data = (hypre_ParMGRData*) data; HYPRE_Int i; HYPRE_Int num_coarse_levels = (mgr_data -> num_coarse_levels); /* block info data */ if ((mgr_data -> block_cf_marker)) { for (i=0; i < (mgr_data -> max_num_coarse_levels); i++) { if ((mgr_data -> block_cf_marker)[i]) { hypre_TFree((mgr_data -> block_cf_marker)[i], HYPRE_MEMORY_HOST); } } hypre_TFree((mgr_data -> block_cf_marker), HYPRE_MEMORY_HOST); (mgr_data -> block_cf_marker) = NULL; } if(mgr_data -> block_num_coarse_indexes) { hypre_TFree(mgr_data -> block_num_coarse_indexes, HYPRE_MEMORY_HOST); (mgr_data -> block_num_coarse_indexes) = NULL; } /* final residual vector */ if((mgr_data -> residual)) { hypre_ParVectorDestroy( (mgr_data -> residual) ); (mgr_data -> residual) = NULL; } if((mgr_data -> rel_res_norms)) { hypre_TFree( (mgr_data -> rel_res_norms) , HYPRE_MEMORY_HOST); (mgr_data -> rel_res_norms) = NULL; } /* temp vectors for solve phase */ if((mgr_data -> Vtemp)) { hypre_ParVectorDestroy( (mgr_data -> Vtemp) ); (mgr_data -> Vtemp) = NULL; } if((mgr_data -> Ztemp)) { hypre_ParVectorDestroy( (mgr_data -> Ztemp) ); (mgr_data -> Ztemp) = NULL; } if((mgr_data -> Utemp)) { hypre_ParVectorDestroy( (mgr_data -> Utemp) ); (mgr_data -> Utemp) = NULL; } if((mgr_data -> Ftemp)) { hypre_ParVectorDestroy( (mgr_data -> Ftemp) ); (mgr_data -> Ftemp) = NULL; } /* coarse grid solver */ if((mgr_data -> use_default_cgrid_solver)) { if((mgr_data -> coarse_grid_solver)) hypre_BoomerAMGDestroy( (mgr_data -> coarse_grid_solver) ); (mgr_data -> coarse_grid_solver) = NULL; } /* l1_norms */ if ((mgr_data -> l1_norms)) { for (i=0; i < (num_coarse_levels); i++) if ((mgr_data -> l1_norms)[i]) hypre_TFree((mgr_data -> l1_norms)[i], HYPRE_MEMORY_HOST); hypre_TFree((mgr_data -> l1_norms), HYPRE_MEMORY_HOST); } /* coarse_indices_lvls */ if ((mgr_data -> coarse_indices_lvls)) { for (i=0; i < (num_coarse_levels); i++) if ((mgr_data -> coarse_indices_lvls)[i]) hypre_TFree((mgr_data -> coarse_indices_lvls)[i], HYPRE_MEMORY_HOST); hypre_TFree((mgr_data -> coarse_indices_lvls), HYPRE_MEMORY_HOST); } /* linear system and cf marker array */ if(mgr_data -> A_array || mgr_data -> P_array || mgr_data -> RT_array || mgr_data -> CF_marker_array) { for (i=1; i < num_coarse_levels+1; i++) { hypre_ParVectorDestroy((mgr_data -> F_array)[i]); hypre_ParVectorDestroy((mgr_data -> U_array)[i]); if ((mgr_data -> P_array)[i-1]) hypre_ParCSRMatrixDestroy((mgr_data -> P_array)[i-1]); if ((mgr_data -> RT_array)[i-1]) hypre_ParCSRMatrixDestroy((mgr_data -> RT_array)[i-1]); hypre_TFree((mgr_data -> CF_marker_array)[i-1], HYPRE_MEMORY_HOST); } for (i=1; i < (num_coarse_levels); i++) { if ((mgr_data -> A_array)[i]) hypre_ParCSRMatrixDestroy((mgr_data -> A_array)[i]); } } if((mgr_data -> F_array)) { hypre_TFree((mgr_data -> F_array), HYPRE_MEMORY_HOST); (mgr_data -> F_array) = NULL; } if((mgr_data -> U_array)) { hypre_TFree((mgr_data -> U_array), HYPRE_MEMORY_HOST); (mgr_data -> U_array) = NULL; } if((mgr_data -> A_array)) { hypre_TFree((mgr_data -> A_array), HYPRE_MEMORY_HOST); (mgr_data -> A_array) = NULL; } if((mgr_data -> P_array)) { hypre_TFree((mgr_data -> P_array), HYPRE_MEMORY_HOST); (mgr_data -> P_array) = NULL; } if((mgr_data -> RT_array)) { hypre_TFree((mgr_data -> RT_array), HYPRE_MEMORY_HOST); (mgr_data -> RT_array) = NULL; } if((mgr_data -> CF_marker_array)) { hypre_TFree((mgr_data -> CF_marker_array), HYPRE_MEMORY_HOST); (mgr_data -> CF_marker_array) = NULL; } if((mgr_data -> reserved_Cpoint_local_indexes)) { hypre_TFree((mgr_data -> reserved_Cpoint_local_indexes), HYPRE_MEMORY_HOST); (mgr_data -> reserved_Cpoint_local_indexes) = NULL; } /* data for V-cycle F-relaxation */ if (mgr_data -> FrelaxVcycleData) { for (i = 0; i < num_coarse_levels; i++) { if ((mgr_data -> FrelaxVcycleData)[i]) { hypre_MGRDestroyFrelaxVcycleData((mgr_data -> FrelaxVcycleData)[i]); (mgr_data -> FrelaxVcycleData)[i] = NULL; } } hypre_TFree(mgr_data -> FrelaxVcycleData, HYPRE_MEMORY_HOST); mgr_data -> FrelaxVcycleData = NULL; } /* data for reserved coarse nodes */ if(mgr_data -> reserved_coarse_indexes) { hypre_TFree(mgr_data -> reserved_coarse_indexes, HYPRE_MEMORY_HOST); (mgr_data -> reserved_coarse_indexes) = NULL; } /* coarse level matrix - RAP */ if ((mgr_data -> RAP)) hypre_ParCSRMatrixDestroy((mgr_data -> RAP)); if ((mgr_data -> diaginv)) hypre_TFree((mgr_data -> diaginv), HYPRE_MEMORY_HOST); /* mgr data */ hypre_TFree(mgr_data, HYPRE_MEMORY_HOST); return hypre_error_flag; } /* Create data for V-cycle F-relaxtion */ void * hypre_MGRCreateFrelaxVcycleData() { hypre_ParAMGData *vdata = hypre_CTAlloc(hypre_ParAMGData, 1, HYPRE_MEMORY_HOST); hypre_ParAMGDataAArray(vdata) = NULL; hypre_ParAMGDataPArray(vdata) = NULL; hypre_ParAMGDataFArray(vdata) = NULL; hypre_ParAMGDataCFMarkerArray(vdata) = NULL; hypre_ParAMGDataVtemp(vdata) = NULL; hypre_ParAMGDataAMat(vdata) = NULL; hypre_ParAMGDataBVec(vdata) = NULL; hypre_ParAMGDataZtemp(vdata) = NULL; hypre_ParAMGDataCommInfo(vdata) = NULL; hypre_ParAMGDataUArray(vdata) = NULL; hypre_ParAMGDataNewComm(vdata) = hypre_MPI_COMM_NULL; hypre_ParAMGDataNumLevels(vdata) = 0; hypre_ParAMGDataMaxLevels(vdata) = 10; return (void *) vdata; } /* Destroy data for V-cycle F-relaxation */ HYPRE_Int hypre_MGRDestroyFrelaxVcycleData( void *data ) { hypre_ParAMGData * vdata = (hypre_ParAMGData*) data; HYPRE_Int i; HYPRE_Int num_levels = hypre_ParAMGDataNumLevels(vdata); MPI_Comm new_comm = hypre_ParAMGDataNewComm(vdata); for (i=1; i < num_levels; i++) { hypre_ParVectorDestroy(hypre_ParAMGDataFArray(vdata)[i]); hypre_ParVectorDestroy(hypre_ParAMGDataUArray(vdata)[i]); if (hypre_ParAMGDataAArray(vdata)[i]) hypre_ParCSRMatrixDestroy(hypre_ParAMGDataAArray(vdata)[i]); if (hypre_ParAMGDataPArray(vdata)[i-1]) hypre_ParCSRMatrixDestroy(hypre_ParAMGDataPArray(vdata)[i-1]); hypre_TFree(hypre_ParAMGDataCFMarkerArray(vdata)[i-1], HYPRE_MEMORY_HOST); } /* see comments in par_coarsen.c regarding special case for CF_marker */ if (num_levels == 1) { hypre_TFree(hypre_ParAMGDataCFMarkerArray(vdata)[0], HYPRE_MEMORY_HOST); } /* Points to vtemp of mgr_data, which is already destroyed */ // hypre_ParVectorDestroy(hypre_ParAMGDataVtemp(vdata)); hypre_TFree(hypre_ParAMGDataFArray(vdata), HYPRE_MEMORY_HOST); hypre_TFree(hypre_ParAMGDataUArray(vdata), HYPRE_MEMORY_HOST); hypre_TFree(hypre_ParAMGDataAArray(vdata), HYPRE_MEMORY_HOST); hypre_TFree(hypre_ParAMGDataPArray(vdata), HYPRE_MEMORY_HOST); hypre_TFree(hypre_ParAMGDataCFMarkerArray(vdata), HYPRE_MEMORY_HOST); /* Points to ztemp of mgr_data, which is already destroyed */ /* if (hypre_ParAMGDataZtemp(vdata)) hypre_ParVectorDestroy(hypre_ParAMGDataZtemp(vdata)); */ if (hypre_ParAMGDataAMat(vdata)) hypre_TFree(hypre_ParAMGDataAMat(vdata), HYPRE_MEMORY_HOST); if (hypre_ParAMGDataBVec(vdata)) hypre_TFree(hypre_ParAMGDataBVec(vdata), HYPRE_MEMORY_HOST); if (hypre_ParAMGDataCommInfo(vdata)) hypre_TFree(hypre_ParAMGDataCommInfo(vdata), HYPRE_MEMORY_HOST); if (new_comm != hypre_MPI_COMM_NULL) { hypre_MPI_Comm_free (&new_comm); } hypre_TFree(vdata, HYPRE_MEMORY_HOST); return hypre_error_flag; } /* Set C-point variables for each reduction level */ /* Currently not implemented */ HYPRE_Int hypre_MGRSetReductionLevelCpoints( void *mgr_vdata, HYPRE_Int nlevels, HYPRE_Int *num_coarse_points, HYPRE_Int **level_coarse_indexes) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> num_coarse_levels) = nlevels; (mgr_data -> num_coarse_per_level) = num_coarse_points; (mgr_data -> level_coarse_indexes) = level_coarse_indexes; return hypre_error_flag; } /* Initialize some data */ /* Set whether non-coarse points on each level should be explicitly tagged as F-points */ HYPRE_Int hypre_MGRSetNonCpointsToFpoints( void *mgr_vdata, HYPRE_Int nonCptToFptFlag) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> set_non_Cpoints_to_F) = nonCptToFptFlag; return hypre_error_flag; } /* Initialize/ set block data information */ HYPRE_Int hypre_MGRSetCpointsByBlock( void *mgr_vdata, HYPRE_Int block_size, HYPRE_Int max_num_levels, HYPRE_Int *block_num_coarse_points, HYPRE_Int **block_coarse_indexes) { HYPRE_Int i,j; HYPRE_Int **block_cf_marker = NULL; HYPRE_Int *block_num_coarse_indexes = NULL; hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; /* free block cf_marker data if not previously destroyed */ if((mgr_data -> block_cf_marker) != NULL) { for (i=0; i < (mgr_data -> max_num_coarse_levels); i++) { if ((mgr_data -> block_cf_marker)[i]) { hypre_TFree((mgr_data -> block_cf_marker)[i], HYPRE_MEMORY_HOST); (mgr_data -> block_cf_marker)[i] = NULL; } } hypre_TFree(mgr_data -> block_cf_marker, HYPRE_MEMORY_HOST); (mgr_data -> block_cf_marker) = NULL; } if((mgr_data -> block_num_coarse_indexes)) { hypre_TFree((mgr_data -> block_num_coarse_indexes), HYPRE_MEMORY_HOST); (mgr_data -> block_num_coarse_indexes) = NULL; } /* store block cf_marker */ block_cf_marker = hypre_CTAlloc(HYPRE_Int *, max_num_levels, HYPRE_MEMORY_HOST); for (i = 0; i < max_num_levels; i++) { block_cf_marker[i] = hypre_CTAlloc(HYPRE_Int, block_size, HYPRE_MEMORY_HOST); memset(block_cf_marker[i], FMRK, block_size*sizeof(HYPRE_Int)); } for (i = 0; i < max_num_levels; i++) { for(j=0; j<block_num_coarse_points[i]; j++) { (block_cf_marker[i])[block_coarse_indexes[i][j]] = CMRK; } } /* store block_num_coarse_points */ if(max_num_levels > 0) { block_num_coarse_indexes = hypre_CTAlloc(HYPRE_Int, max_num_levels, HYPRE_MEMORY_HOST); for(i=0; i<max_num_levels; i++) block_num_coarse_indexes[i] = block_num_coarse_points[i]; } /* set block data */ (mgr_data -> max_num_coarse_levels) = max_num_levels; (mgr_data -> block_size) = block_size; (mgr_data -> block_num_coarse_indexes) = block_num_coarse_indexes; (mgr_data -> block_cf_marker) = block_cf_marker; return hypre_error_flag; } /*Set number of points that remain part of the coarse grid throughout the hierarchy */ HYPRE_Int hypre_MGRSetReservedCoarseNodes(void *mgr_vdata, HYPRE_Int reserved_coarse_size, HYPRE_Int *reserved_cpt_index) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_BigInt *reserved_coarse_indexes = NULL; HYPRE_Int i; if (!mgr_data) { hypre_error_w_msg(HYPRE_ERROR_GENERIC,"Warning! MGR object empty!\n"); return hypre_error_flag; } if(reserved_coarse_size < 0) { hypre_error_in_arg(2); return hypre_error_flag; } /* free data not previously destroyed */ if((mgr_data -> reserved_coarse_indexes)) { hypre_TFree((mgr_data -> reserved_coarse_indexes), HYPRE_MEMORY_HOST); (mgr_data -> reserved_coarse_indexes) = NULL; } /* set reserved coarse nodes */ if(reserved_coarse_size > 0) { reserved_coarse_indexes = hypre_CTAlloc(HYPRE_BigInt, reserved_coarse_size, HYPRE_MEMORY_HOST); for(i=0; i<reserved_coarse_size; i++) reserved_coarse_indexes[i] = reserved_cpt_index[i]; } (mgr_data -> reserved_coarse_size) = reserved_coarse_size; (mgr_data -> reserved_coarse_indexes) = reserved_coarse_indexes; return hypre_error_flag; } /* Set CF marker array */ HYPRE_Int hypre_MGRCoarsen(hypre_ParCSRMatrix *S, hypre_ParCSRMatrix *A, HYPRE_Int fixed_coarse_size, HYPRE_Int *fixed_coarse_indexes, HYPRE_Int debug_flag, HYPRE_Int **CF_marker, HYPRE_Int cflag) { HYPRE_Int *cf_marker, i, row, nc; HYPRE_Int *cindexes = fixed_coarse_indexes; HYPRE_Int nloc = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A)); /* If this is the last level, coarsen onto fixed coarse set */ if(cflag) { if(*CF_marker != NULL) { hypre_TFree(*CF_marker, HYPRE_MEMORY_HOST); } cf_marker = hypre_CTAlloc(HYPRE_Int, nloc, HYPRE_MEMORY_HOST); memset(cf_marker, FMRK, nloc*sizeof(HYPRE_Int)); /* first mark fixed coarse set */ nc = fixed_coarse_size; for(i = 0; i < nc; i++) { cf_marker[cindexes[i]] = CMRK; } } else { /* First coarsen to get initial CF splitting. * This is then followed by updating the CF marker to pass * coarse information to the next levels. NOTE: It may be * convenient to implement this way (allows the use of multiple * coarsening strategies without changing too much code), * but not necessarily the best option, compared to initializing * CF_marker first and then coarsening on subgraph which excludes * the initialized coarse nodes. */ hypre_BoomerAMGCoarsen(S, A, 0, debug_flag, &cf_marker); /* Update CF_marker to correct Cpoints marked as Fpoints. */ nc = fixed_coarse_size; for(i = 0; i < nc; i++) { cf_marker[cindexes[i]] = CMRK; } /* set F-points to FMRK. This is necessary since the different coarsening schemes differentiate * between type of F-points (example Ruge coarsening). We do not need that distinction here. */ for (row = 0; row <nloc; row++) { if(cf_marker[row] == CMRK) continue; cf_marker[row] = FMRK; } #if 0 /* IMPORTANT: Update coarse_indexes array to define the positions of the fixed coarse points * in the next level. */ nc = 0; index_i = 0; for (row = 0; row <nloc; row++) { /* loop through new c-points */ if(cf_marker[row] == CMRK) nc++; else if(cf_marker[row] == S_CMRK) { /* previously marked c-point is part of fixed coarse set. Track its current local index */ cindexes[index_i++] = nc; /* reset c-point from S_CMRK to CMRK */ cf_marker[row] = CMRK; nc++; } /* set F-points to FMRK. This is necessary since the different coarsening schemes differentiate * between type of F-points (example Ruge coarsening). We do not need that distinction here. */ else { cf_marker[row] = FMRK; } } /* check if this should be last level */ if( nc == fixed_coarse_size) last_level = 1; //printf(" nc = %d and fixed coarse size = %d \n", nc, fixed_coarse_size); #endif } /* set CF_marker */ *CF_marker = cf_marker; return hypre_error_flag; } /* Interpolation for MGR - Adapted from BoomerAMGBuildInterp */ HYPRE_Int hypre_MGRBuildP( hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, HYPRE_BigInt *num_cpts_global, HYPRE_Int method, HYPRE_Int debug_flag, hypre_ParCSRMatrix **P_ptr) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_Real *a_diag; hypre_ParCSRMatrix *P; HYPRE_BigInt *col_map_offd_P; HYPRE_Int *tmp_map_offd = NULL; HYPRE_Int *CF_marker_offd = NULL; hypre_CSRMatrix *P_diag; hypre_CSRMatrix *P_offd; HYPRE_Real *P_diag_data; HYPRE_Int *P_diag_i; HYPRE_Int *P_diag_j; HYPRE_Real *P_offd_data; HYPRE_Int *P_offd_i; HYPRE_Int *P_offd_j; HYPRE_Int P_diag_size, P_offd_size; HYPRE_Int *P_marker, *P_marker_offd; HYPRE_Int jj_counter,jj_counter_offd; HYPRE_Int *jj_count, *jj_count_offd; // HYPRE_Int jj_begin_row,jj_begin_row_offd; // HYPRE_Int jj_end_row,jj_end_row_offd; HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */ HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int *fine_to_coarse; //HYPRE_BigInt *fine_to_coarse_offd; HYPRE_Int *coarse_counter; HYPRE_Int coarse_shift; HYPRE_BigInt total_global_cpts; //HYPRE_BigInt my_first_cpt; HYPRE_Int num_cols_P_offd; HYPRE_Int i,i1; HYPRE_Int j,jl,jj; HYPRE_Int start; HYPRE_Real one = 1.0; HYPRE_Int my_id; HYPRE_Int num_procs; HYPRE_Int num_threads; HYPRE_Int num_sends; HYPRE_Int index; HYPRE_Int ns, ne, size, rest; HYPRE_Int *int_buf_data; HYPRE_Real wall_time; /* for debugging instrumentation */ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm,&my_id); num_threads = hypre_NumThreads(); #ifdef HYPRE_NO_GLOBAL_PARTITION //my_first_cpt = num_cpts_global[0]; if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1]; hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm); #else //my_first_cpt = num_cpts_global[my_id]; total_global_cpts = num_cpts_global[num_procs]; #endif /*------------------------------------------------------------------- * Get the CF_marker data for the off-processor columns *-------------------------------------------------------------------*/ if (debug_flag < 0) { debug_flag = -debug_flag; } if (debug_flag==4) wall_time = time_getWallclockSeconds(); if (num_cols_A_offd) CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) int_buf_data[index++] = CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, CF_marker_offd); hypre_ParCSRCommHandleDestroy(comm_handle); if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Comm 1 CF_marker = %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * First Pass: Determine size of P and fill in fine_to_coarse mapping. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Intialize counters and allocate mapping vector. *-----------------------------------------------------------------------*/ coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i = 0; i < n_fine; i++) fine_to_coarse[i] = -1; jj_counter = start_indexing; jj_counter_offd = start_indexing; /*----------------------------------------------------------------------- * Loop over fine grid. *-----------------------------------------------------------------------*/ /* RDF: this looks a little tricky, but doable */ #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE #endif #endif for (j = 0; j < num_threads; j++) { size = n_fine/num_threads; rest = n_fine - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) { /*-------------------------------------------------------------------- * If i is a C-point, interpolation is the identity. Also set up * mapping vector. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { jj_count[j]++; fine_to_coarse[i] = coarse_counter[j]; coarse_counter[j]++; } /*-------------------------------------------------------------------- * If i is an F-point, interpolation is the approximation of A_{ff}^{-1}A_{fc} *--------------------------------------------------------------------*/ else { for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++) { i1 = A_diag_j[jj]; if (CF_marker[i1] >= 0) { jj_count[j]++; } } if (num_procs > 1) { for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { i1 = A_offd_j[jj]; if (CF_marker_offd[i1] >= 0) { jj_count_offd[j]++; } } } } } } /*----------------------------------------------------------------------- * Allocate arrays. *-----------------------------------------------------------------------*/ for (i=0; i < num_threads-1; i++) { coarse_counter[i+1] += coarse_counter[i]; jj_count[i+1] += jj_count[i]; jj_count_offd[i+1] += jj_count_offd[i]; } i = num_threads-1; jj_counter = jj_count[i]; jj_counter_offd = jj_count_offd[i]; P_diag_size = jj_counter; P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_SHARED); P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_SHARED); P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_SHARED); P_diag_i[n_fine] = jj_counter; P_offd_size = jj_counter_offd; P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_SHARED); P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_SHARED); P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_SHARED); /*----------------------------------------------------------------------- * Intialize some stuff. *-----------------------------------------------------------------------*/ jj_counter = start_indexing; jj_counter_offd = start_indexing; if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Internal work 1 = %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * Send and receive fine_to_coarse info. *-----------------------------------------------------------------------*/ if (debug_flag==4) wall_time = time_getWallclockSeconds(); //fine_to_coarse_offd = hypre_CTAlloc(HYPRE_BigInt, num_cols_A_offd, HYPRE_MEMORY_HOST); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE #endif #endif for (j = 0; j < num_threads; j++) { coarse_shift = 0; if (j > 0) coarse_shift = coarse_counter[j-1]; size = n_fine/num_threads; rest = n_fine - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) { fine_to_coarse[i] += coarse_shift; } } /* index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) big_buf_data[index++] = fine_to_coarse[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]+ my_first_cpt; } comm_handle = hypre_ParCSRCommHandleCreate( 21, comm_pkg, big_buf_data, fine_to_coarse_offd); hypre_ParCSRCommHandleDestroy(comm_handle); if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Comm 4 FineToCoarse = %f\n", my_id, wall_time); fflush(NULL); } */ if (debug_flag==4) wall_time = time_getWallclockSeconds(); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif //for (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_cpt; /*----------------------------------------------------------------------- * Loop over fine grid points. *-----------------------------------------------------------------------*/ a_diag = hypre_CTAlloc(HYPRE_Real, n_fine, HYPRE_MEMORY_HOST); for (i = 0; i < n_fine; i++) { for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++) { i1 = A_diag_j[jj]; if ( i==i1 ) /* diagonal of A only */ { a_diag[i] = 1.0/A_diag_data[jj]; } } } #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,jl,i1,jj,ns,ne,size,rest,P_marker,P_marker_offd,jj_counter,jj_counter_offd,jj_begin_row,jj_end_row,jj_begin_row_offd,jj_end_row_offd) HYPRE_SMP_SCHEDULE #endif #endif for (jl = 0; jl < num_threads; jl++) { size = n_fine/num_threads; rest = n_fine - size*num_threads; if (jl < rest) { ns = jl*size+jl; ne = (jl+1)*size+jl+1; } else { ns = jl*size+rest; ne = (jl+1)*size+rest; } jj_counter = 0; if (jl > 0) jj_counter = jj_count[jl-1]; jj_counter_offd = 0; if (jl > 0) jj_counter_offd = jj_count_offd[jl-1]; P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); if (num_cols_A_offd) P_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); else P_marker_offd = NULL; for (i = 0; i < n_fine; i++) { P_marker[i] = -1; } for (i = 0; i < num_cols_A_offd; i++) { P_marker_offd[i] = -1; } for (i = ns; i < ne; i++) { /*-------------------------------------------------------------------- * If i is a c-point, interpolation is the identity. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { P_diag_i[i] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i]; P_diag_data[jj_counter] = one; jj_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, build interpolation. *--------------------------------------------------------------------*/ else { /* Diagonal part of P */ P_diag_i[i] = jj_counter; for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++) { i1 = A_diag_j[jj]; /*-------------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_diag_j * and initialize interpolation weight to zero. *--------------------------------------------------------------*/ if (CF_marker[i1] >= 0) { P_marker[i1] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i1]; if(method == 0) { P_diag_data[jj_counter] = 0.0; } else if (method == 1) { P_diag_data[jj_counter] = - A_diag_data[jj]; } else if (method == 2) { P_diag_data[jj_counter] = - A_diag_data[jj]*a_diag[i]; } jj_counter++; } } /* Off-Diagonal part of P */ P_offd_i[i] = jj_counter_offd; if (num_procs > 1) { for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { i1 = A_offd_j[jj]; /*----------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_offd_j * and initialize interpolation weight to zero. *-----------------------------------------------------------*/ if (CF_marker_offd[i1] >= 0) { P_marker_offd[i1] = jj_counter_offd; /*P_offd_j[jj_counter_offd] = fine_to_coarse_offd[i1];*/ P_offd_j[jj_counter_offd] = i1; if(method == 0) { P_offd_data[jj_counter_offd] = 0.0; } else if (method == 1) { P_offd_data[jj_counter_offd] = - A_offd_data[jj]; } else if (method == 2) { P_offd_data[jj_counter_offd] = - A_offd_data[jj]*a_diag[i]; } jj_counter_offd++; } } } } P_offd_i[i+1] = jj_counter_offd; } hypre_TFree(P_marker, HYPRE_MEMORY_HOST); hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST); } hypre_TFree(a_diag, HYPRE_MEMORY_HOST); P = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(A), total_global_cpts, hypre_ParCSRMatrixColStarts(A), num_cpts_global, 0, P_diag_i[n_fine], P_offd_i[n_fine]); P_diag = hypre_ParCSRMatrixDiag(P); hypre_CSRMatrixData(P_diag) = P_diag_data; hypre_CSRMatrixI(P_diag) = P_diag_i; hypre_CSRMatrixJ(P_diag) = P_diag_j; P_offd = hypre_ParCSRMatrixOffd(P); hypre_CSRMatrixData(P_offd) = P_offd_data; hypre_CSRMatrixI(P_offd) = P_offd_i; hypre_CSRMatrixJ(P_offd) = P_offd_j; hypre_ParCSRMatrixOwnsRowStarts(P) = 0; num_cols_P_offd = 0; if (P_offd_size) { P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i=0; i < num_cols_A_offd; i++) P_marker[i] = 0; num_cols_P_offd = 0; for (i=0; i < P_offd_size; i++) { index = P_offd_j[i]; if (!P_marker[index]) { num_cols_P_offd++; P_marker[index] = 1; } } col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST); tmp_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd, HYPRE_MEMORY_HOST); index = 0; for (i=0; i < num_cols_P_offd; i++) { while (P_marker[index]==0) index++; tmp_map_offd[i] = index++; } #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i=0; i < P_offd_size; i++) P_offd_j[i] = hypre_BinarySearch(tmp_map_offd, P_offd_j[i], num_cols_P_offd); hypre_TFree(P_marker, HYPRE_MEMORY_HOST); } for (i=0; i < n_fine; i++) if (CF_marker[i] == -3) CF_marker[i] = -1; if (num_cols_P_offd) { hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P; hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd; } hypre_GetCommPkgRTFromCommPkgA(P,A, fine_to_coarse, tmp_map_offd); *P_ptr = P; hypre_TFree(tmp_map_offd, HYPRE_MEMORY_HOST); hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST); //hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST); hypre_TFree(coarse_counter, HYPRE_MEMORY_HOST); hypre_TFree(jj_count, HYPRE_MEMORY_HOST); hypre_TFree(jj_count_offd, HYPRE_MEMORY_HOST); return(0); } /* Interpolation for MGR - Dynamic Row Sum method */ HYPRE_Int hypre_MGRBuildPDRS( hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, HYPRE_BigInt *num_cpts_global, HYPRE_Int blk_size, HYPRE_Int reserved_coarse_size, HYPRE_Int debug_flag, hypre_ParCSRMatrix **P_ptr) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_Real *a_diag; hypre_ParCSRMatrix *P; HYPRE_BigInt *col_map_offd_P; HYPRE_Int *tmp_map_offd; HYPRE_Int *CF_marker_offd = NULL; hypre_CSRMatrix *P_diag; hypre_CSRMatrix *P_offd; HYPRE_Real *P_diag_data; HYPRE_Int *P_diag_i; HYPRE_Int *P_diag_j; HYPRE_Real *P_offd_data; HYPRE_Int *P_offd_i; HYPRE_Int *P_offd_j; HYPRE_Int P_diag_size, P_offd_size; HYPRE_Int *P_marker, *P_marker_offd; HYPRE_Int jj_counter,jj_counter_offd; HYPRE_Int *jj_count, *jj_count_offd; // HYPRE_Int jj_begin_row,jj_begin_row_offd; // HYPRE_Int jj_end_row,jj_end_row_offd; HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */ HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int *fine_to_coarse; //HYPRE_Int *fine_to_coarse_offd; HYPRE_Int *coarse_counter; HYPRE_Int coarse_shift; HYPRE_BigInt total_global_cpts; //HYPRE_BigInt my_first_cpt; HYPRE_Int num_cols_P_offd; HYPRE_Int i,i1; HYPRE_Int j,jl,jj; HYPRE_Int start; HYPRE_Real one = 1.0; HYPRE_Int my_id; HYPRE_Int num_procs; HYPRE_Int num_threads; HYPRE_Int num_sends; HYPRE_Int index; HYPRE_Int ns, ne, size, rest; HYPRE_Int *int_buf_data; HYPRE_Real wall_time; /* for debugging instrumentation */ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm,&my_id); num_threads = hypre_NumThreads(); #ifdef HYPRE_NO_GLOBAL_PARTITION //my_first_cpt = num_cpts_global[0]; if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1]; hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm); #else //my_first_cpt = num_cpts_global[my_id]; total_global_cpts = num_cpts_global[num_procs]; #endif /*------------------------------------------------------------------- * Get the CF_marker data for the off-processor columns *-------------------------------------------------------------------*/ if (debug_flag < 0) { debug_flag = -debug_flag; } if (debug_flag==4) wall_time = time_getWallclockSeconds(); if (num_cols_A_offd) CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) int_buf_data[index++] = CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, CF_marker_offd); hypre_ParCSRCommHandleDestroy(comm_handle); if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Comm 1 CF_marker = %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * First Pass: Determine size of P and fill in fine_to_coarse mapping. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Intialize counters and allocate mapping vector. *-----------------------------------------------------------------------*/ coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i = 0; i < n_fine; i++) fine_to_coarse[i] = -1; jj_counter = start_indexing; jj_counter_offd = start_indexing; /*----------------------------------------------------------------------- * Loop over fine grid. *-----------------------------------------------------------------------*/ /* RDF: this looks a little tricky, but doable */ #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE #endif #endif for (j = 0; j < num_threads; j++) { size = n_fine/num_threads; rest = n_fine - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) { /*-------------------------------------------------------------------- * If i is a C-point, interpolation is the identity. Also set up * mapping vector. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { jj_count[j]++; fine_to_coarse[i] = coarse_counter[j]; coarse_counter[j]++; } /*-------------------------------------------------------------------- * If i is an F-point, interpolation is the approximation of A_{ff}^{-1}A_{fc} *--------------------------------------------------------------------*/ else { for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++) { i1 = A_diag_j[jj]; if (CF_marker[i1] >= 0) { jj_count[j]++; } } if (num_procs > 1) { for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { i1 = A_offd_j[jj]; if (CF_marker_offd[i1] >= 0) { jj_count_offd[j]++; } } } } /*-------------------------------------------------------------------- * Set up the indexes for the DRS method *--------------------------------------------------------------------*/ } } /*----------------------------------------------------------------------- * Allocate arrays. *-----------------------------------------------------------------------*/ for (i=0; i < num_threads-1; i++) { coarse_counter[i+1] += coarse_counter[i]; jj_count[i+1] += jj_count[i]; jj_count_offd[i+1] += jj_count_offd[i]; } i = num_threads-1; jj_counter = jj_count[i]; jj_counter_offd = jj_count_offd[i]; P_diag_size = jj_counter; P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_HOST); P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_HOST); P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_HOST); P_diag_i[n_fine] = jj_counter; P_offd_size = jj_counter_offd; P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_HOST); P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_HOST); P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_HOST); /*----------------------------------------------------------------------- * Intialize some stuff. *-----------------------------------------------------------------------*/ jj_counter = start_indexing; jj_counter_offd = start_indexing; if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Internal work 1 = %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * Send and receive fine_to_coarse info. *-----------------------------------------------------------------------*/ if (debug_flag==4) wall_time = time_getWallclockSeconds(); //fine_to_coarse_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE #endif #endif for (j = 0; j < num_threads; j++) { coarse_shift = 0; if (j > 0) coarse_shift = coarse_counter[j-1]; size = n_fine/num_threads; rest = n_fine - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) fine_to_coarse[i] += coarse_shift; } /*index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) int_buf_data[index++] = fine_to_coarse[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, fine_to_coarse_offd); hypre_ParCSRCommHandleDestroy(comm_handle); if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Comm 4 FineToCoarse = %f\n", my_id, wall_time); fflush(NULL); }*/ if (debug_flag==4) wall_time = time_getWallclockSeconds(); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif //for (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_cpt; /*----------------------------------------------------------------------- * Loop over fine grid points. *-----------------------------------------------------------------------*/ a_diag = hypre_CTAlloc(HYPRE_Real, n_fine, HYPRE_MEMORY_HOST); for (i = 0; i < n_fine; i++) { for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++) { i1 = A_diag_j[jj]; if ( i==i1 ) /* diagonal of A only */ { a_diag[i] = 1.0/A_diag_data[jj]; } } } #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,jl,i1,jj,ns,ne,size,rest,P_marker,P_marker_offd,jj_counter,jj_counter_offd,jj_begin_row,jj_end_row,jj_begin_row_offd,jj_end_row_offd) HYPRE_SMP_SCHEDULE #endif #endif for (jl = 0; jl < num_threads; jl++) { size = n_fine/num_threads; rest = n_fine - size*num_threads; if (jl < rest) { ns = jl*size+jl; ne = (jl+1)*size+jl+1; } else { ns = jl*size+rest; ne = (jl+1)*size+rest; } jj_counter = 0; if (jl > 0) jj_counter = jj_count[jl-1]; jj_counter_offd = 0; if (jl > 0) jj_counter_offd = jj_count_offd[jl-1]; P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); if (num_cols_A_offd) P_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); else P_marker_offd = NULL; for (i = 0; i < n_fine; i++) { P_marker[i] = -1; } for (i = 0; i < num_cols_A_offd; i++) { P_marker_offd[i] = -1; } for (i = ns; i < ne; i++) { /*-------------------------------------------------------------------- * If i is a c-point, interpolation is the identity. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { P_diag_i[i] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i]; P_diag_data[jj_counter] = one; jj_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, build interpolation. *--------------------------------------------------------------------*/ else { /* Diagonal part of P */ P_diag_i[i] = jj_counter; for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++) { i1 = A_diag_j[jj]; /*-------------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_diag_j * and initialize interpolation weight to zero. *--------------------------------------------------------------*/ if (CF_marker[i1] >= 0) { P_marker[i1] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i1]; P_diag_data[jj_counter] = - A_diag_data[jj]*a_diag[i]; jj_counter++; } } /* Off-Diagonal part of P */ P_offd_i[i] = jj_counter_offd; if (num_procs > 1) { for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { i1 = A_offd_j[jj]; /*----------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_offd_j * and initialize interpolation weight to zero. *-----------------------------------------------------------*/ if (CF_marker_offd[i1] >= 0) { P_marker_offd[i1] = jj_counter_offd; /*P_offd_j[jj_counter_offd] = fine_to_coarse_offd[i1];*/ P_offd_j[jj_counter_offd] = i1; P_offd_data[jj_counter_offd] = - A_offd_data[jj]*a_diag[i]; jj_counter_offd++; } } } } P_offd_i[i+1] = jj_counter_offd; } hypre_TFree(P_marker, HYPRE_MEMORY_HOST); hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST); } hypre_TFree(a_diag, HYPRE_MEMORY_HOST); P = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(A), total_global_cpts, hypre_ParCSRMatrixColStarts(A), num_cpts_global, 0, P_diag_i[n_fine], P_offd_i[n_fine]); P_diag = hypre_ParCSRMatrixDiag(P); hypre_CSRMatrixData(P_diag) = P_diag_data; hypre_CSRMatrixI(P_diag) = P_diag_i; hypre_CSRMatrixJ(P_diag) = P_diag_j; P_offd = hypre_ParCSRMatrixOffd(P); hypre_CSRMatrixData(P_offd) = P_offd_data; hypre_CSRMatrixI(P_offd) = P_offd_i; hypre_CSRMatrixJ(P_offd) = P_offd_j; hypre_ParCSRMatrixOwnsRowStarts(P) = 0; num_cols_P_offd = 0; if (P_offd_size) { P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i=0; i < num_cols_A_offd; i++) P_marker[i] = 0; num_cols_P_offd = 0; for (i=0; i < P_offd_size; i++) { index = P_offd_j[i]; if (!P_marker[index]) { num_cols_P_offd++; P_marker[index] = 1; } } tmp_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd, HYPRE_MEMORY_HOST); col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST); index = 0; for (i=0; i < num_cols_P_offd; i++) { while (P_marker[index]==0) index++; tmp_map_offd[i] = index++; } #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i=0; i < P_offd_size; i++) P_offd_j[i] = hypre_BinarySearch(tmp_map_offd, P_offd_j[i], num_cols_P_offd); hypre_TFree(P_marker, HYPRE_MEMORY_HOST); } for (i=0; i < n_fine; i++) if (CF_marker[i] == -3) CF_marker[i] = -1; if (num_cols_P_offd) { hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P; hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd; } hypre_GetCommPkgRTFromCommPkgA(P,A, fine_to_coarse, tmp_map_offd); *P_ptr = P; hypre_TFree(tmp_map_offd, HYPRE_MEMORY_HOST); hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST); // hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST); hypre_TFree(coarse_counter, HYPRE_MEMORY_HOST); hypre_TFree(jj_count, HYPRE_MEMORY_HOST); hypre_TFree(jj_count_offd, HYPRE_MEMORY_HOST); return(0); } /* Setup interpolation operator */ HYPRE_Int hypre_MGRBuildInterp(hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, hypre_ParCSRMatrix *S, HYPRE_BigInt *num_cpts_global, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag, HYPRE_Real trunc_factor, HYPRE_Int max_elmts, HYPRE_Int *col_offd_S_to_A, hypre_ParCSRMatrix **P, HYPRE_Int last_level, HYPRE_Int method, HYPRE_Int numsweeps) { // HYPRE_Int i; hypre_ParCSRMatrix *P_ptr = NULL; // HYPRE_Real jac_trunc_threshold = trunc_factor; // HYPRE_Real jac_trunc_threshold_minus = 0.5*jac_trunc_threshold; /* Build interpolation operator using (hypre default) */ if(!last_level) { hypre_MGRBuildP( A,CF_marker,num_cpts_global,2,debug_flag,&P_ptr); } /* Do Jacobi interpolation for last level */ else { if (method <3) { hypre_MGRBuildP( A,CF_marker,num_cpts_global,method,debug_flag,&P_ptr); /* Could do a few sweeps of Jacobi to further improve P */ //for(i=0; i<numsweeps; i++) // hypre_BoomerAMGJacobiInterp(A, &P_ptr, S,1, NULL, CF_marker, 0, jac_trunc_threshold, jac_trunc_threshold_minus ); } else { /* Classical modified interpolation */ hypre_BoomerAMGBuildInterp(A, CF_marker, S, num_cpts_global,1, NULL,debug_flag, trunc_factor, max_elmts, col_offd_S_to_A, &P_ptr); /* Do k steps of Jacobi build W for P = [-W I]. * Note that BoomerAMGJacobiInterp assumes you have some initial P, * hence we need to initialize P as above, before calling this routine. * If numsweeps = 0, the following step is skipped and P is returned as is. * Looping here is equivalent to improving P by Jacobi interpolation */ // for(i=0; i<numsweeps; i++) // hypre_BoomerAMGJacobiInterp(A, &P_ptr, S,1, NULL, CF_marker, // 0, jac_trunc_threshold, // jac_trunc_threshold_minus ); } } /* set pointer to P */ *P = P_ptr; return hypre_error_flag; } void hypre_blas_smat_inv_n4 (HYPRE_Real *a) { const HYPRE_Real a11 = a[0], a12 = a[1], a13 = a[2], a14 = a[3]; const HYPRE_Real a21 = a[4], a22 = a[5], a23 = a[6], a24 = a[7]; const HYPRE_Real a31 = a[8], a32 = a[9], a33 = a[10], a34 = a[11]; const HYPRE_Real a41 = a[12], a42 = a[13], a43 = a[14], a44 = a[15]; const HYPRE_Real M11 = a22*a33*a44 + a23*a34*a42 + a24*a32*a43 - a22*a34*a43 - a23*a32*a44 - a24*a33*a42; const HYPRE_Real M12 = a12*a34*a43 + a13*a32*a44 + a14*a33*a42 - a12*a33*a44 - a13*a34*a42 - a14*a32*a43; const HYPRE_Real M13 = a12*a23*a44 + a13*a24*a42 + a14*a22*a43 - a12*a24*a43 - a13*a22*a44 - a14*a23*a42; const HYPRE_Real M14 = a12*a24*a33 + a13*a22*a34 + a14*a23*a32 - a12*a23*a34 - a13*a24*a32 - a14*a22*a33; const HYPRE_Real M21 = a21*a34*a43 + a23*a31*a44 + a24*a33*a41 - a21*a33*a44 - a23*a34*a41 - a24*a31*a43; const HYPRE_Real M22 = a11*a33*a44 + a13*a34*a41 + a14*a31*a43 - a11*a34*a43 - a13*a31*a44 - a14*a33*a41; const HYPRE_Real M23 = a11*a24*a43 + a13*a21*a44 + a14*a23*a41 - a11*a23*a44 - a13*a24*a41 - a14*a21*a43; const HYPRE_Real M24 = a11*a23*a34 + a13*a24*a31 + a14*a21*a33 - a11*a24*a33 - a13*a21*a34 - a14*a23*a31; const HYPRE_Real M31 = a21*a32*a44 + a22*a34*a41 + a24*a31*a42 - a21*a34*a42 - a22*a31*a44 - a24*a32*a41; const HYPRE_Real M32 = a11*a34*a42 + a12*a31*a44 + a14*a32*a41 - a11*a32*a44 - a12*a34*a41 - a14*a31*a42; const HYPRE_Real M33 = a11*a22*a44 + a12*a24*a41 + a14*a21*a42 - a11*a24*a42 - a12*a21*a44 - a14*a22*a41; const HYPRE_Real M34 = a11*a24*a32 + a12*a21*a34 + a14*a22*a31 - a11*a22*a34 - a12*a24*a31 - a14*a21*a32; const HYPRE_Real M41 = a21*a33*a42 + a22*a31*a43 + a23*a32*a41 - a21*a32*a43 - a22*a33*a41 - a23*a31*a42; const HYPRE_Real M42 = a11*a32*a43 + a12*a33*a41 + a13*a31*a42 - a11*a33*a42 - a12*a31*a43 - a13*a32*a41; const HYPRE_Real M43 = a11*a23*a42 + a12*a21*a43 + a13*a22*a41 - a11*a22*a43 - a12*a23*a41 - a13*a21*a42; const HYPRE_Real M44 = a11*a22*a33 + a12*a23*a31 + a13*a21*a32 - a11*a23*a32 - a12*a21*a33 - a13*a22*a31; const HYPRE_Real det = a11*M11 + a12*M21 + a13*M31 + a14*M41; HYPRE_Real det_inv; //if ( fabs(det) < 1e-22 ) { /* there should be no print statements that can't be turned off. Is this an error? */ //hypre_fprintf(stderr, "### WARNING: Matrix is nearly singular! det = %e\n", det); /* printf("##----------------------------------------------\n"); printf("## %12.5e %12.5e %12.5e \n", a0, a1, a2); printf("## %12.5e %12.5e %12.5e \n", a3, a4, a5); printf("## %12.5e %12.5e %12.5e \n", a5, a6, a7); printf("##----------------------------------------------\n"); getchar(); */ //} det_inv = 1.0/det; a[0] = M11*det_inv; a[1] = M12*det_inv; a[2] = M13*det_inv; a[3] = M14*det_inv; a[4] = M21*det_inv; a[5] = M22*det_inv; a[6] = M23*det_inv; a[7] = M24*det_inv; a[8] = M31*det_inv; a[9] = M32*det_inv; a[10] = M33*det_inv; a[11] = M34*det_inv; a[12] = M41*det_inv; a[13] = M42*det_inv; a[14] = M43*det_inv; a[15] = M44*det_inv; } void hypre_blas_mat_inv(HYPRE_Real *a, HYPRE_Int n) { HYPRE_Int i,j,k,l,u,kn,in; HYPRE_Real alinv; if (n == 4) { hypre_blas_smat_inv_n4(a); } else { for (k=0; k<n; ++k) { kn = k*n; l = kn+k; //if (fabs(a[l]) < SMALLREAL) { // printf("### WARNING: Diagonal entry is close to zero!"); // printf("### WARNING: diag_%d=%e\n", k, a[l]); // a[l] = SMALLREAL; //} alinv = 1.0/a[l]; a[l] = alinv; for (j=0; j<k; ++j) { u = kn+j; a[u] *= alinv; } for (j=k+1; j<n; ++j) { u = kn+j; a[u] *= alinv; } for (i=0; i<k; ++i) { in = i*n; for (j=0; j<n; ++j) if (j!=k) { u = in+j; a[u] -= a[in+k]*a[kn+j]; } // end if (j!=k) } for (i=k+1; i<n; ++i) { in = i*n; for (j=0; j<n; ++j) if (j!=k) { u = in+j; a[u] -= a[in+k]*a[kn+j]; } // end if (j!=k) } for (i=0; i<k; ++i) { u=i*n+k; a[u] *= -alinv; } for (i=k+1; i<n; ++i) { u=i*n+k; a[u] *= -alinv; } } // end for (k=0; k<n; ++k) }// end if } HYPRE_Int hypre_block_jacobi_scaling(hypre_ParCSRMatrix *A, hypre_ParCSRMatrix **B_ptr, void *mgr_vdata, HYPRE_Int debug_flag) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_Int num_procs, my_id; HYPRE_Int blk_size = (mgr_data -> block_size); HYPRE_Int reserved_coarse_size = (mgr_data -> reserved_coarse_size); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_ParCSRMatrix *B; hypre_CSRMatrix *B_diag; HYPRE_Real *B_diag_data; HYPRE_Int *B_diag_i; HYPRE_Int *B_diag_j; hypre_CSRMatrix *B_offd; HYPRE_Int i,ii; HYPRE_Int j,jj; HYPRE_Int k; HYPRE_Int n = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int n_block, left_size,inv_size; // HYPRE_Real wall_time; /* for debugging instrumentation */ HYPRE_Int bidx,bidxm1,bidxp1; HYPRE_Real * diaginv; const HYPRE_Int nb2 = blk_size*blk_size; HYPRE_Int block_scaling_error = 0; hypre_MPI_Comm_size(comm,&num_procs); hypre_MPI_Comm_rank(comm,&my_id); // HYPRE_Int num_threads = hypre_NumThreads(); //printf("n = %d\n",n); if (my_id == num_procs) { n_block = (n - reserved_coarse_size) / blk_size; left_size = n - blk_size*n_block; } else { n_block = n / blk_size; left_size = n - blk_size*n_block; } inv_size = nb2*n_block + left_size*left_size; //printf("inv_size = %d\n",inv_size); hypre_blockRelax_setup(A,blk_size,reserved_coarse_size,&(mgr_data -> diaginv)); // if (debug_flag==4) wall_time = time_getWallclockSeconds(); /*----------------------------------------------------------------------- * First Pass: Determine size of B and fill in *-----------------------------------------------------------------------*/ B_diag_i = hypre_CTAlloc(HYPRE_Int, n+1, HYPRE_MEMORY_HOST); B_diag_j = hypre_CTAlloc(HYPRE_Int, inv_size, HYPRE_MEMORY_HOST); B_diag_data = hypre_CTAlloc(HYPRE_Real, inv_size, HYPRE_MEMORY_HOST); B_diag_i[n] = inv_size; //B_offd_i = hypre_CTAlloc(HYPRE_Int, n+1, HYPRE_MEMORY_HOST); //B_offd_j = hypre_CTAlloc(HYPRE_Int, 1, HYPRE_MEMORY_HOST); //B_offd_data = hypre_CTAlloc(HYPRE_Real, 1, HYPRE_MEMORY_HOST); //B_offd_i[n] = 1; /*----------------------------------------------------------------- * Get all the diagonal sub-blocks *-----------------------------------------------------------------*/ diaginv = hypre_CTAlloc(HYPRE_Real, nb2, HYPRE_MEMORY_HOST); //printf("n_block = %d\n",n_block); for (i = 0;i < n_block; i++) { bidxm1 = i*blk_size; bidxp1 = (i+1)*blk_size; for (k = 0;k < blk_size; k++) { for (j = 0;j < blk_size; j++) { bidx = k*blk_size + j; diaginv[bidx] = 0.0; } for (ii = A_diag_i[bidxm1+k]; ii < A_diag_i[bidxm1+k+1]; ii++) { jj = A_diag_j[ii]; if (jj >= bidxm1 && jj < bidxp1 && fabs(A_diag_data[ii]) > SMALLREAL) { bidx = k*blk_size + jj - bidxm1; //printf("jj = %d,val = %e, bidx = %d\n",jj,A_diag_data[ii],bidx); diaginv[bidx] = A_diag_data[ii]; } } } /* for (k = 0;k < blk_size; k++) */ /* { */ /* for (j = 0;j < blk_size; j++) */ /* { */ /* bidx = k*blk_size + j; */ /* printf("diaginv[%d] = %e\n",bidx,diaginv[bidx]); */ /* } */ /* } */ hypre_blas_mat_inv(diaginv, blk_size); for (k = 0;k < blk_size; k++) { B_diag_i[i*blk_size+k] = i*nb2 + k*blk_size; //B_offd_i[i*nb2+k] = 0; for (j = 0;j < blk_size; j++) { bidx = i*nb2 + k*blk_size + j; B_diag_j[bidx] = i*blk_size + j; B_diag_data[bidx] = diaginv[k*blk_size + j]; } } } //printf("Before create\n"); B = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(A), hypre_ParCSRMatrixGlobalNumCols(A), hypre_ParCSRMatrixRowStarts(A), hypre_ParCSRMatrixColStarts(A), 0, inv_size, 0); //printf("After create\n"); B_diag = hypre_ParCSRMatrixDiag(B); hypre_CSRMatrixData(B_diag) = B_diag_data; hypre_CSRMatrixI(B_diag) = B_diag_i; hypre_CSRMatrixJ(B_diag) = B_diag_j; B_offd = hypre_ParCSRMatrixOffd(B); hypre_CSRMatrixData(B_offd) = NULL; hypre_CSRMatrixI(B_offd) = NULL; hypre_CSRMatrixJ(B_offd) = NULL; /* hypre_ParCSRMatrixOwnsRowStarts(B) = 0; */ *B_ptr = B; return(block_scaling_error); } HYPRE_Int hypre_block_jacobi (hypre_ParCSRMatrix *A, hypre_ParVector *f, hypre_ParVector *u, HYPRE_Real blk_size, HYPRE_Int n_block, HYPRE_Int left_size, HYPRE_Real *diaginv, hypre_ParVector *Vtemp) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; HYPRE_Int n = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd); hypre_Vector *u_local = hypre_ParVectorLocalVector(u); HYPRE_Real *u_data = hypre_VectorData(u_local); hypre_Vector *f_local = hypre_ParVectorLocalVector(f); HYPRE_Real *f_data = hypre_VectorData(f_local); hypre_Vector *Vtemp_local = hypre_ParVectorLocalVector(Vtemp); HYPRE_Real *Vtemp_data = hypre_VectorData(Vtemp_local); HYPRE_Real *Vext_data = NULL; HYPRE_Real *v_buf_data; HYPRE_Int i, j, k; HYPRE_Int ii, jj; HYPRE_Int bidx,bidx1; HYPRE_Int relax_error = 0; HYPRE_Int num_sends; HYPRE_Int index, start; HYPRE_Int num_procs, my_id; HYPRE_Real *res; const HYPRE_Int nb2 = blk_size*blk_size; hypre_MPI_Comm_size(comm,&num_procs); hypre_MPI_Comm_rank(comm,&my_id); // HYPRE_Int num_threads = hypre_NumThreads(); res = hypre_CTAlloc(HYPRE_Real, blk_size, HYPRE_MEMORY_HOST); if (num_procs > 1) { num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); v_buf_data = hypre_CTAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); Vext_data = hypre_CTAlloc(HYPRE_Real, num_cols_offd, HYPRE_MEMORY_HOST); if (num_cols_offd) { A_offd_j = hypre_CSRMatrixJ(A_offd); A_offd_data = hypre_CSRMatrixData(A_offd); } index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) v_buf_data[index++] = u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 1, comm_pkg, v_buf_data, Vext_data); } /*----------------------------------------------------------------- * Copy current approximation into temporary vector. *-----------------------------------------------------------------*/ #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i = 0; i < n; i++) { Vtemp_data[i] = u_data[i]; //printf("u_old[%d] = %e\n",i,Vtemp_data[i]); } if (num_procs > 1) { hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; } /*----------------------------------------------------------------- * Relax points block by block *-----------------------------------------------------------------*/ for (i = 0;i < n_block; i++) { for (j = 0;j < blk_size; j++) { bidx = i*blk_size +j; res[j] = f_data[bidx]; for (jj = A_diag_i[bidx]; jj < A_diag_i[bidx+1]; jj++) { ii = A_diag_j[jj]; res[j] -= A_diag_data[jj] * Vtemp_data[ii]; //printf("%d: Au= %e * %e =%e\n",ii,A_diag_data[jj],Vtemp_data[ii], res[j]); } for (jj = A_offd_i[bidx]; jj < A_offd_i[bidx+1]; jj++) { ii = A_offd_j[jj]; res[j] -= A_offd_data[jj] * Vext_data[ii]; } //printf("%d: res = %e\n",bidx,res[j]); } for (j = 0;j < blk_size; j++) { bidx1 = i*blk_size +j; for (k = 0;k < blk_size; k++) { bidx = i*nb2 +j*blk_size+k; u_data[bidx1] += res[k]*diaginv[bidx]; //printf("u[%d] = %e, diaginv[%d] = %e\n",bidx1,u_data[bidx1],bidx,diaginv[bidx]); } //printf("u[%d] = %e\n",bidx1,u_data[bidx1]); } } if (num_procs > 1) { hypre_TFree(Vext_data, HYPRE_MEMORY_HOST); hypre_TFree(v_buf_data, HYPRE_MEMORY_HOST); } hypre_TFree(res, HYPRE_MEMORY_HOST); return(relax_error); } /*Block smoother*/ HYPRE_Int hypre_blockRelax_setup(hypre_ParCSRMatrix *A, HYPRE_Int blk_size, HYPRE_Int reserved_coarse_size, HYPRE_Real **diaginvptr) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); HYPRE_Int n = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int i, j,k; HYPRE_Int ii, jj; HYPRE_Int bidx,bidxm1,bidxp1; HYPRE_Int num_procs, my_id; const HYPRE_Int nb2 = blk_size*blk_size; HYPRE_Int n_block; HYPRE_Int left_size,inv_size; HYPRE_Real *diaginv = *diaginvptr; hypre_MPI_Comm_size(comm,&num_procs); hypre_MPI_Comm_rank(comm,&my_id); // HYPRE_Int num_threads = hypre_NumThreads(); if (my_id == num_procs) { n_block = (n - reserved_coarse_size) / blk_size; left_size = n - blk_size*n_block; } else { n_block = n / blk_size; left_size = n - blk_size*n_block; } inv_size = nb2*n_block + left_size*left_size; if (diaginv !=NULL) { hypre_TFree(diaginv, HYPRE_MEMORY_HOST); diaginv = hypre_CTAlloc(HYPRE_Real, inv_size, HYPRE_MEMORY_HOST); } else { diaginv = hypre_CTAlloc(HYPRE_Real, inv_size, HYPRE_MEMORY_HOST); } /*----------------------------------------------------------------- * Get all the diagonal sub-blocks *-----------------------------------------------------------------*/ for (i = 0;i < n_block; i++) { bidxm1 = i*blk_size; bidxp1 = (i+1)*blk_size; //printf("bidxm1 = %d,bidxp1 = %d\n",bidxm1,bidxp1); for (k = 0;k < blk_size; k++) { for (j = 0;j < blk_size; j++) { bidx = i*nb2 + k*blk_size + j; diaginv[bidx] = 0.0; } for (ii = A_diag_i[bidxm1+k]; ii < A_diag_i[bidxm1+k+1]; ii++) { jj = A_diag_j[ii]; if (jj >= bidxm1 && jj < bidxp1 && fabs(A_diag_data[ii]) > SMALLREAL) { bidx = i*nb2 + k*blk_size + jj - bidxm1; //printf("jj = %d,val = %e, bidx = %d\n",jj,A_diag_data[ii],bidx); diaginv[bidx] = A_diag_data[ii]; } } } } for (i = 0;i < left_size; i++) { bidxm1 =n_block*nb2 + i*blk_size; bidxp1 =n_block*nb2 + (i+1)*blk_size; for (j = 0;j < left_size; j++) { bidx = n_block*nb2 + i*blk_size +j; diaginv[bidx] = 0.0; } for (ii = A_diag_i[n_block*blk_size + i]; ii < A_diag_i[n_block*blk_size+i+1]; ii++) { jj = A_diag_j[ii]; if (jj > n_block*blk_size) { bidx = n_block*nb2 + i*blk_size + jj - n_block*blk_size; diaginv[bidx] = A_diag_data[ii]; } } } /*----------------------------------------------------------------- * compute the inverses of all the diagonal sub-blocks *-----------------------------------------------------------------*/ if (blk_size > 1) { for (i = 0;i < n_block; i++) { hypre_blas_mat_inv(diaginv+i*nb2, blk_size); } hypre_blas_mat_inv(diaginv+(HYPRE_Int)(blk_size*nb2),left_size); } else { for (i = 0;i < n; i++) { // FIX-ME: zero-diagonal should be tested previously if (fabs(diaginv[i]) < SMALLREAL) diaginv[i] = 0.0; else diaginv[i] = 1.0 / diaginv[i]; } } *diaginvptr = diaginv; return 1; } HYPRE_Int hypre_blockRelax(hypre_ParCSRMatrix *A, hypre_ParVector *f, hypre_ParVector *u, HYPRE_Int blk_size, HYPRE_Int reserved_coarse_size, hypre_ParVector *Vtemp, hypre_ParVector *Ztemp) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); HYPRE_Int n = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int i, j,k; HYPRE_Int ii, jj; HYPRE_Int bidx,bidxm1,bidxp1; HYPRE_Int relax_error = 0; HYPRE_Int num_procs, my_id; const HYPRE_Int nb2 = blk_size*blk_size; HYPRE_Int n_block; HYPRE_Int left_size,inv_size; HYPRE_Real *diaginv; hypre_MPI_Comm_size(comm,&num_procs); hypre_MPI_Comm_rank(comm,&my_id); // HYPRE_Int num_threads = hypre_NumThreads(); if (my_id == num_procs) { n_block = (n - reserved_coarse_size) / blk_size; left_size = n - blk_size*n_block; } else { n_block = n / blk_size; left_size = n - blk_size*n_block; } inv_size = nb2*n_block + left_size*left_size; diaginv = hypre_CTAlloc(HYPRE_Real, inv_size, HYPRE_MEMORY_HOST); /*----------------------------------------------------------------- * Get all the diagonal sub-blocks *-----------------------------------------------------------------*/ for (i = 0;i < n_block; i++) { bidxm1 = i*blk_size; bidxp1 = (i+1)*blk_size; //printf("bidxm1 = %d,bidxp1 = %d\n",bidxm1,bidxp1); for (k = 0;k < blk_size; k++) { for (j = 0;j < blk_size; j++) { bidx = i*nb2 + k*blk_size + j; diaginv[bidx] = 0.0; } for (ii = A_diag_i[bidxm1+k]; ii < A_diag_i[bidxm1+k+1]; ii++) { jj = A_diag_j[ii]; if (jj >= bidxm1 && jj < bidxp1 && fabs(A_diag_data[ii]) > SMALLREAL) { bidx = i*nb2 + k*blk_size + jj - bidxm1; //printf("jj = %d,val = %e, bidx = %d\n",jj,A_diag_data[ii],bidx); diaginv[bidx] = A_diag_data[ii]; } } } } for (i = 0;i < left_size; i++) { bidxm1 =n_block*nb2 + i*blk_size; bidxp1 =n_block*nb2 + (i+1)*blk_size; for (j = 0;j < left_size; j++) { bidx = n_block*nb2 + i*blk_size +j; diaginv[bidx] = 0.0; } for (ii = A_diag_i[n_block*blk_size + i]; ii < A_diag_i[n_block*blk_size+i+1]; ii++) { jj = A_diag_j[ii]; if (jj > n_block*blk_size) { bidx = n_block*nb2 + i*blk_size + jj - n_block*blk_size; diaginv[bidx] = A_diag_data[ii]; } } } /* for (i = 0;i < n_block; i++) { for (j = 0;j < blk_size; j++) { for (k = 0;k < blk_size; k ++) { bidx = i*nb2 + j*blk_size + k; printf("%e\t",diaginv[bidx]); } printf("\n"); } printf("\n"); } */ /*----------------------------------------------------------------- * compute the inverses of all the diagonal sub-blocks *-----------------------------------------------------------------*/ if (blk_size > 1) { for (i = 0;i < n_block; i++) { hypre_blas_mat_inv(diaginv+i*nb2, blk_size); } hypre_blas_mat_inv(diaginv+(HYPRE_Int)(blk_size*nb2),left_size); /* for (i = 0;i < n_block; i++) { for (j = 0;j < blk_size; j++) { for (k = 0;k < blk_size; k ++) { bidx = i*nb2 + j*blk_size + k; printf("%e\t",diaginv[bidx]); } printf("\n"); } printf("\n"); } */ } else { for (i = 0;i < n; i++) { // FIX-ME: zero-diagonal should be tested previously if (fabs(diaginv[i]) < SMALLREAL) diaginv[i] = 0.0; else diaginv[i] = 1.0 / diaginv[i]; } } hypre_block_jacobi(A,f,u,blk_size,n_block,left_size,diaginv,Vtemp); /*----------------------------------------------------------------- * Free temperary memeory *-----------------------------------------------------------------*/ hypre_TFree(diaginv, HYPRE_MEMORY_HOST); return(relax_error); } /* set coarse grid solver */ HYPRE_Int hypre_MGRSetCoarseSolver( void *mgr_vdata, HYPRE_Int (*coarse_grid_solver_solve)(void*,void*,void*,void*), HYPRE_Int (*coarse_grid_solver_setup)(void*,void*,void*,void*), void *coarse_grid_solver ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; if (!mgr_data) { hypre_error_in_arg(1); return hypre_error_flag; } (mgr_data -> coarse_grid_solver_solve) = coarse_grid_solver_solve; (mgr_data -> coarse_grid_solver_setup) = coarse_grid_solver_setup; (mgr_data -> coarse_grid_solver) = (HYPRE_Solver) coarse_grid_solver; (mgr_data -> use_default_cgrid_solver) = 0; return hypre_error_flag; } /* Set the maximum number of coarse levels. * maxcoarselevs = 1 yields the default 2-grid scheme. */ HYPRE_Int hypre_MGRSetMaxCoarseLevels( void *mgr_vdata, HYPRE_Int maxcoarselevs ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> max_num_coarse_levels) = maxcoarselevs; return hypre_error_flag; } /* Set the system block size */ HYPRE_Int hypre_MGRSetBlockSize( void *mgr_vdata, HYPRE_Int bsize ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> block_size) = bsize; return hypre_error_flag; } /* Set the relaxation type for the fine levels of the reduction. * Currently supports the following flavors of relaxation types * as described in the documentation: * relax_types 0 - 8, 13, 14, 18, 19, 98. * See par_relax.c and par_relax_more.c for more details. * */ HYPRE_Int hypre_MGRSetRelaxType( void *mgr_vdata, HYPRE_Int relax_type ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> relax_type) = relax_type; return hypre_error_flag; } /* Set the number of relaxation sweeps */ HYPRE_Int hypre_MGRSetNumRelaxSweeps( void *mgr_vdata, HYPRE_Int nsweeps ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> num_relax_sweeps) = nsweeps; return hypre_error_flag; } /* Set the F-relaxation strategy: 0=single level, 1=multi level */ HYPRE_Int hypre_MGRSetFRelaxMethod( void *mgr_vdata, HYPRE_Int relax_method ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> Frelax_method) = relax_method; return hypre_error_flag; } /* Set the type of the restriction type * for computing restriction operator */ HYPRE_Int hypre_MGRSetRestrictType( void *mgr_vdata, HYPRE_Int restrict_type) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> restrict_type) = restrict_type; return hypre_error_flag; } /* Set the number of Jacobi interpolation iterations * for computing interpolation operator */ HYPRE_Int hypre_MGRSetNumRestrictSweeps( void *mgr_vdata, HYPRE_Int nsweeps ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> num_restrict_sweeps) = nsweeps; return hypre_error_flag; } /* Set the type of the interpolation * for computing interpolation operator */ HYPRE_Int hypre_MGRSetInterpType( void *mgr_vdata, HYPRE_Int interpType) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> interp_type) = interpType; return hypre_error_flag; } /* Set the number of Jacobi interpolation iterations * for computing interpolation operator */ HYPRE_Int hypre_MGRSetNumInterpSweeps( void *mgr_vdata, HYPRE_Int nsweeps ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> num_interp_sweeps) = nsweeps; return hypre_error_flag; } /* Set print level for mgr solver */ HYPRE_Int hypre_MGRSetPrintLevel( void *mgr_vdata, HYPRE_Int print_level ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> print_level) = print_level; return hypre_error_flag; } /* Set print level for mgr solver */ HYPRE_Int hypre_MGRSetLogging( void *mgr_vdata, HYPRE_Int logging ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> logging) = logging; return hypre_error_flag; } /* Set max number of iterations for mgr solver */ HYPRE_Int hypre_MGRSetMaxIter( void *mgr_vdata, HYPRE_Int max_iter ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> max_iter) = max_iter; return hypre_error_flag; } /* Set convergence tolerance for mgr solver */ HYPRE_Int hypre_MGRSetTol( void *mgr_vdata, HYPRE_Real tol ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> tol) = tol; return hypre_error_flag; } /* Set max number of iterations for mgr solver */ HYPRE_Int hypre_MGRSetMaxGlobalsmoothIters( void *mgr_vdata, HYPRE_Int max_iter ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> global_smooth_iters) = max_iter; return hypre_error_flag; } /* Set max number of iterations for mgr solver */ HYPRE_Int hypre_MGRSetGlobalsmoothType( void *mgr_vdata, HYPRE_Int iter_type ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> global_smooth_type) = iter_type; return hypre_error_flag; } /* Get number of iterations for MGR solver */ HYPRE_Int hypre_MGRGetNumIterations( void *mgr_vdata, HYPRE_Int *num_iterations ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; if (!mgr_data) { hypre_error_in_arg(1); return hypre_error_flag; } *num_iterations = mgr_data->num_iterations; return hypre_error_flag; } /* Get residual norms for MGR solver */ HYPRE_Int hypre_MGRGetFinalRelativeResidualNorm( void *mgr_vdata, HYPRE_Real *res_norm ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; if (!mgr_data) { hypre_error_in_arg(1); return hypre_error_flag; } *res_norm = mgr_data->final_rel_residual_norm; return hypre_error_flag; } HYPRE_Int hypre_MGRBuildAff( MPI_Comm comm, HYPRE_Int local_num_variables, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int *CF_marker, HYPRE_Int **coarse_dof_func_ptr, HYPRE_BigInt **coarse_pnts_global_ptr, hypre_ParCSRMatrix *A, HYPRE_Int debug_flag, hypre_ParCSRMatrix **P_f_ptr, hypre_ParCSRMatrix **A_ff_ptr ) { HYPRE_Int *CF_marker_copy = hypre_CTAlloc(HYPRE_Int, local_num_variables, HYPRE_MEMORY_HOST); HYPRE_Int i; for (i = 0; i < local_num_variables; i++) { CF_marker_copy[i] = -CF_marker[i]; } hypre_BoomerAMGCoarseParms(comm, local_num_variables, 1, NULL, CF_marker_copy, coarse_dof_func_ptr, coarse_pnts_global_ptr); hypre_MGRBuildP(A, CF_marker_copy, (*coarse_pnts_global_ptr), 0, debug_flag, P_f_ptr); hypre_BoomerAMGBuildCoarseOperator(*P_f_ptr, A, *P_f_ptr, A_ff_ptr); hypre_TFree(CF_marker_copy, HYPRE_MEMORY_HOST); return 0; } /* Get pointer to coarse grid matrix for MGR solver */ HYPRE_Int hypre_MGRGetCoarseGridMatrix( void *mgr_vdata, hypre_ParCSRMatrix **RAP ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; if (!mgr_data) { hypre_error_in_arg(1); return hypre_error_flag; } if (mgr_data -> RAP == NULL) { hypre_error_w_msg(HYPRE_ERROR_GENERIC," Coarse grid matrix is NULL. Please make sure MGRSetup() is called \n"); return hypre_error_flag; } *RAP = mgr_data->RAP; return hypre_error_flag; } /* Get pointer to coarse grid solution for MGR solver */ HYPRE_Int hypre_MGRGetCoarseGridSolution( void *mgr_vdata, hypre_ParVector **sol ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; if (!mgr_data) { hypre_error_in_arg(1); return hypre_error_flag; } if (mgr_data -> U_array == NULL) { hypre_error_w_msg(HYPRE_ERROR_GENERIC," MGR solution array is NULL. Please make sure MGRSetup() and MGRSolve() are called \n"); return hypre_error_flag; } *sol = mgr_data->U_array[mgr_data->num_coarse_levels]; return hypre_error_flag; } /* Get pointer to coarse grid solution for MGR solver */ HYPRE_Int hypre_MGRGetCoarseGridRHS( void *mgr_vdata, hypre_ParVector **rhs ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; if (!mgr_data) { hypre_error_in_arg(1); return hypre_error_flag; } if (mgr_data -> F_array == NULL) { hypre_error_w_msg(HYPRE_ERROR_GENERIC," MGR RHS array is NULL. Please make sure MGRSetup() and MGRSolve() are called \n"); return hypre_error_flag; } *rhs = mgr_data->F_array[mgr_data->num_coarse_levels]; return hypre_error_flag; } /* Print coarse grid linear system (for debugging)*/ HYPRE_Int hypre_MGRPrintCoarseSystem( void *mgr_vdata, HYPRE_Int print_flag) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; mgr_data->print_coarse_system = print_flag; return hypre_error_flag; } /* Print solver params */ HYPRE_Int hypre_MGRWriteSolverParams(void *mgr_vdata) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; hypre_printf("MGR Setup parameters: \n"); hypre_printf("Max number of coarse levels: %d\n", (mgr_data -> max_num_coarse_levels)); hypre_printf("Block size: %d\n", (mgr_data -> block_size)); hypre_printf("Number of coarse indexes: %d\n", (mgr_data -> num_coarse_indexes)); hypre_printf("reserved coarse nodes size: %d\n", (mgr_data -> reserved_coarse_size)); hypre_printf("\n MGR Solver Parameters: \n"); hypre_printf("F-relaxation Method: %d\n", (mgr_data -> Frelax_method)); hypre_printf("Relax type: %d\n", (mgr_data -> relax_type)); hypre_printf("Number of relax sweeps: %d\n", (mgr_data -> num_relax_sweeps)); hypre_printf("Interpolation type: %d\n", (mgr_data -> interp_type)); hypre_printf("Number of interpolation sweeps: %d\n", (mgr_data -> num_interp_sweeps)); hypre_printf("Restriction type: %d\n", (mgr_data -> restrict_type)); hypre_printf("Number of restriction sweeps: %d\n", (mgr_data -> num_restrict_sweeps)); hypre_printf("Global smoother type: %d\n", (mgr_data ->global_smooth_type)); hypre_printf("Number of global smoother sweeps: %d\n", (mgr_data ->global_smooth_iters)); hypre_printf("Max number of iterations: %d\n", (mgr_data -> max_iter)); hypre_printf("Stopping tolerance: %e\n", (mgr_data -> tol)); return hypre_error_flag; }
pre_utilities.h
#ifndef PRE_UTILITES_H #define PRE_UTILITES_H /* System includes */ #include <limits> #include <iostream> #include <iomanip> #include <fstream> #include <vector> #include <stdlib.h> #include <time.h> #include <string> /* External includes */ #ifdef _OPENMP #include <omp.h> #endif /* Project includes */ #include "includes/define.h" #include "utilities/timer.h" #include "includes/variables.h" #include "utilities/openmp_utils.h" #include "cluster_information.h" #include "custom_elements/spheric_continuum_particle.h" namespace Kratos { class PreUtilities { public: typedef ModelPart::ElementsContainerType ElementsArrayType; typedef ModelPart::NodesContainerType::ContainerType NodesContainerType; typedef GlobalPointersVector<Element> ParticleWeakVectorType; typedef GlobalPointersVector<Element>::iterator ParticleWeakIteratorType; KRATOS_CLASS_POINTER_DEFINITION(PreUtilities); /// Default constructor PreUtilities() {} PreUtilities(ModelPart& rModelPart) { //mInitialCenterOfMassAndMass = CalculateCenterOfMass(rModelPart); //mInitialMass = CalculateTotalMass(rModelPart); } /// Destructor virtual ~PreUtilities() {} void SetClusterInformationInProperties(std::string const& name, pybind11::list& list_of_coordinates, pybind11::list& list_of_radii, double size, double volume, pybind11::list& inertias, Properties::Pointer& p_properties) { ClusterInformation cl_info; cl_info.mName = name; array_1d<double,3> coords(3,0.0); for (int i = 0; i < (int)pybind11::len(list_of_coordinates); i++) { pybind11::list list(list_of_coordinates[i]); coords[0] = pybind11::cast<double>(list[0]); coords[1] = pybind11::cast<double>(list[1]); coords[2] = pybind11::cast<double>(list[2]); cl_info.mListOfCoordinates.push_back(coords); } for (int i = 0; i < (int)pybind11::len(list_of_radii); i++) { cl_info.mListOfRadii.push_back(pybind11::cast<double>(list_of_radii[i])); } //TODO: check the sizes (should be the same) cl_info.mSize = size; cl_info.mVolume = volume; cl_info.mInertias[0] = pybind11::cast<double>(inertias[0]); cl_info.mInertias[1] = pybind11::cast<double>(inertias[1]); cl_info.mInertias[2] = pybind11::cast<double>(inertias[2]); p_properties->SetValue(CLUSTER_INFORMATION, cl_info); } void PrintNumberOfNeighboursHistogram(const ModelPart& rSpheresModelPart, std::string const& filename) { std::vector<int> number_of_spheres_with_i_neighbours; number_of_spheres_with_i_neighbours.resize(20); for(int i=0; i<(int)number_of_spheres_with_i_neighbours.size(); i++) {number_of_spheres_with_i_neighbours[i] = 0;} const ElementsArrayType& pElements = rSpheresModelPart.GetCommunicator().LocalMesh().Elements(); ElementsArrayType::ptr_const_iterator begin = pElements.ptr_begin(); for(int i=0; i<(int)pElements.size(); i++) { ElementsArrayType::ptr_const_iterator it = begin + i; const Element& el = **it; const SphericContinuumParticle* p_cont_sphere = dynamic_cast<const SphericContinuumParticle*>(&el); if(p_cont_sphere) { unsigned int size = p_cont_sphere->mContinuumInitialNeighborsSize; if(size > number_of_spheres_with_i_neighbours.size() - 1) size = number_of_spheres_with_i_neighbours.size() - 1; number_of_spheres_with_i_neighbours[size] += 1; } else { const SphericParticle* p_sphere = dynamic_cast<const SphericParticle*>(&el); unsigned int size = p_sphere->mNeighbourElements.size(); if(size > number_of_spheres_with_i_neighbours.size() - 1) size = number_of_spheres_with_i_neighbours.size() - 1; number_of_spheres_with_i_neighbours[size] += 1; } } std::ofstream outputfile(filename, std::ios_base::out | std::ios_base::app); outputfile << "number_of_neighbours percentage_of_spheres_with_that_number_of_neighbours number_of_spheres_with_that_number_of_neighbours\n"; for(int i=0; i<(int)number_of_spheres_with_i_neighbours.size(); i++) { const double percentage = (double)(number_of_spheres_with_i_neighbours[i]) / (double)(rSpheresModelPart.NumberOfElements(0)) * 100.0; outputfile <<i<<" "<<percentage<<" "<<number_of_spheres_with_i_neighbours[i]<<"\n"; } } void FillAnalyticSubModelPartUtility(ModelPart& rSpheresModelPart, ModelPart& rAnalyticSpheresModelPart){ ElementsArrayType& pElements = rSpheresModelPart.GetCommunicator().LocalMesh().Elements(); std::vector<std::vector<std::size_t> > thread_vectors_of_ids; int mNumberOfThreads = ParallelUtilities::GetNumThreads(); thread_vectors_of_ids.resize(mNumberOfThreads); #pragma omp parallel for for (int k = 0; k < (int)pElements.size(); k++) { ElementsArrayType::iterator it = pElements.ptr_begin() + k; int analytic_particle_id = it->Id(); thread_vectors_of_ids[OpenMPUtils::ThisThread()].push_back(analytic_particle_id); } std::vector<std::size_t> vector_of_ids; for (int i = 0; i < mNumberOfThreads; i++) { vector_of_ids.insert(vector_of_ids.end(), thread_vectors_of_ids[i].begin(), thread_vectors_of_ids[i].end()); } rAnalyticSpheresModelPart.AddElements(vector_of_ids); } // non-OMP version // void FillAnalyticSubModelPartUtility(ModelPart& rSpheresModelPart, ModelPart& rAnalyticSpheresModelPart){ // ElementsArrayType& pElements = rSpheresModelPart.GetCommunicator().LocalMesh().Elements(); // std::vector<long unsigned int> vector_of_ids; // for (int k = 0; k < (int)pElements.size(); k++) { // ElementsArrayType::iterator it = pElements.ptr_begin() + k; // int analytic_particle_id = it->Id(); // vector_of_ids.push_back(analytic_particle_id); // } // rAnalyticSpheresModelPart.AddElements(vector_of_ids); // } void ResetSkinParticles(ModelPart& r_model_part) { auto& pNodes = r_model_part.GetCommunicator().LocalMesh().Nodes(); #pragma omp parallel for for (int k = 0; k < (int)pNodes.size(); k++) { auto it = pNodes.begin() + k; it->FastGetSolutionStepValue(SKIN_SPHERE) = 0.0; } } void SetSkinParticlesInnerCircularBoundary(ModelPart& r_model_part, const double inner_radius, const double detection_radius) { auto& pNodes = r_model_part.GetCommunicator().LocalMesh().Nodes(); #pragma omp parallel for for (int k = 0; k < (int)pNodes.size(); k++) { auto it = pNodes.begin() + k; const array_1d<double, 3>& coords = it->Coordinates(); array_1d<double, 3> vector_distance_to_center; noalias(vector_distance_to_center) = coords; const double distance_to_center = MathUtils<double>::Norm3(vector_distance_to_center); if(distance_to_center < inner_radius + detection_radius) { it->FastGetSolutionStepValue(SKIN_SPHERE) = 1.0; } } } void SetSkinParticlesOuterCircularBoundary(ModelPart& r_model_part, const double outer_radius, const double detection_radius) { auto& pNodes = r_model_part.GetCommunicator().LocalMesh().Nodes(); #pragma omp parallel for for (int k = 0; k < (int)pNodes.size(); k++) { auto it = pNodes.begin() + k; const array_1d<double, 3>& coords = it->Coordinates(); array_1d<double, 3> vector_distance_to_center; noalias(vector_distance_to_center) = coords; const double distance_to_center = MathUtils<double>::Norm3(vector_distance_to_center); const double radius = it->FastGetSolutionStepValue(RADIUS); if (distance_to_center + radius > outer_radius - detection_radius) { it->FastGetSolutionStepValue(SKIN_SPHERE) = 1.0; } } } void SetSkinParticlesOuterSquaredBoundary(ModelPart& r_model_part, const double outer_radius, const array_1d<double, 3>& center, const double detection_radius) { auto& pNodes = r_model_part.GetCommunicator().LocalMesh().Nodes(); #pragma omp parallel for for (int k = 0; k < (int)pNodes.size(); k++) { auto it = pNodes.begin() + k; const array_1d<double, 3>& coords = it->Coordinates(); array_1d<double, 3> vector_distance_to_center; noalias(vector_distance_to_center) = coords - center; const double total_x_distance = fabs(vector_distance_to_center[0]); const double total_y_distance = fabs(vector_distance_to_center[1]); const double radius = it->FastGetSolutionStepValue(RADIUS); if ((total_x_distance + radius > outer_radius - detection_radius) || (total_y_distance + radius > outer_radius - detection_radius)) { it->FastGetSolutionStepValue(SKIN_SPHERE) = 1.0; } } } void BreakBondUtility(ModelPart& rSpheresModelPart) { ElementsArrayType& pElements = rSpheresModelPart.GetCommunicator().LocalMesh().Elements(); #pragma omp parallel for for (int k = 0; k < (int)pElements.size(); k++) { ElementsArrayType::iterator it = pElements.ptr_begin() + k; Element* p_element = &(*it); SphericContinuumParticle* p_sphere = dynamic_cast<SphericContinuumParticle*>(p_element); if (p_sphere->mNeighbourElements[k] == NULL) continue; double x_node = p_sphere->GetGeometry()[0].Coordinates()[0]; double y_node = p_sphere->GetGeometry()[0].Coordinates()[1]; double z_node = p_sphere->GetGeometry()[0].Coordinates()[2]; double radius = 0.0225; // radi if ((x_node*x_node + z_node*z_node >= radius*radius && y_node < 0.01) || (x_node*x_node + z_node*z_node >= radius*radius && y_node > 0.07)) { // 1- geometry condition unsigned int number_of_neighbors = p_sphere->mContinuumInitialNeighborsSize; for (unsigned int i = 0; i < number_of_neighbors; i++) { SphericContinuumParticle* neighbour_iterator = dynamic_cast<SphericContinuumParticle*>(p_sphere->mNeighbourElements[i]); double x_node_it = neighbour_iterator->GetGeometry()[0].Coordinates()[0]; double z_node_it = neighbour_iterator->GetGeometry()[0].Coordinates()[2]; double radius_it = 0.0225; // radi de la entalla en el shear test. if (x_node_it*x_node_it + z_node_it*z_node_it < radius_it*radius_it) { // 2- geometry condition //int& failure_type = p_sphere->mIniNeighbourFailureId[i]; //failure_type = 1; p_sphere->Set(TO_ERASE, true); neighbour_iterator->Set(TO_ERASE, true); //noalias(other_to_me_vector) = p_sphere->GetGeometry()[0].Coordinates() - p_sphere->mNeighbourElements[i]->GetGeometry()[0].Coordinates(); //noalias(initial_other_to_me_vector) = p_sphere->GetGeometry()[0].GetInitialPosition() - p_sphere->mNeighbourElements[i]->GetGeometry()[0].GetInitialPosition(); } } } else if ((x_node*x_node + z_node*z_node < radius*radius && y_node < 0.01) || (x_node*x_node + z_node*z_node < radius*radius && y_node > 0.07)) { unsigned int number_of_neighbors = p_sphere->mContinuumInitialNeighborsSize; for (unsigned int i = 0; i < number_of_neighbors; i++) { SphericContinuumParticle* neighbour_iterator = dynamic_cast<SphericContinuumParticle*>(p_sphere->mNeighbourElements[i]); double x_node_it = neighbour_iterator->GetGeometry()[0].Coordinates()[0]; double z_node_it = neighbour_iterator->GetGeometry()[0].Coordinates()[2]; double radius_it = 0.0225; // radi de la entalla en el shear test. if (x_node_it*x_node_it + z_node_it*z_node_it > radius_it*radius_it) { // 2- geometry condition //int& failure_type = p_sphere->mIniNeighbourFailureId[i]; //failure_type = 1; p_sphere->Set(TO_ERASE, true); neighbour_iterator->Set(TO_ERASE, true); } } } } } void CreateCartesianSpecimenMdpa(std::string filename) { // We have a prismatic specimen of dimensions 1m x 1m x 2m const double side = 0.15; int divisions; KRATOS_WARNING("DEM") << "\nEnter the number of divisions: "; std::cin >> divisions; if (!divisions) { KRATOS_WARNING("DEM") << "\nCannot divide by zero. Program stopped.\n\n"; exit(EXIT_FAILURE); } const double radius = 0.5 * side / divisions; int node_counter = 0; std::vector<int> skin_nodes; std::vector<int> top_nodes; std::vector<int> bottom_nodes; filename += "DEM.mdpa"; // std::ifstream infile(filename); if(infile.good()) { while(1){ KRATOS_WARNING("DEM") << "\nThe file already exists. Do you want to overwrite it? (y/n) "; char yn; std::cin >> yn; if(yn == 'n') { KRATOS_WARNING("DEM") << "\nStopped.\n\n"; exit(EXIT_FAILURE); } if(yn=='y') break; } } KRATOS_INFO("DEM") << "\nGenerating mesh...\n\n"; clock_t initial_time, end_time; initial_time = clock(); std::ofstream outputfile(filename, std::ios_base::out); outputfile << "Begin ModelPartData\nEnd ModelPartData\n\n"; outputfile << "Begin Properties 1\n"; outputfile << "PARTICLE_DENSITY 2550.0\n"; outputfile << "YOUNG_MODULUS 35e9\n"; outputfile << "POISSON_RATIO 0.20\n"; outputfile << "STATIC_FRICTION 0.5773502691896257\n"; outputfile << "DYNAMIC_FRICTION 0.5773502691896257\n"; outputfile << "FRICTION_DECAY 500.0\n"; outputfile << "PARTICLE_COHESION 0.0\n"; outputfile << "COEFFICIENT_OF_RESTITUTION 0.2\n"; outputfile << "PARTICLE_MATERIAL 1\n"; outputfile << "ROLLING_FRICTION 0.01\n"; outputfile << "ROLLING_FRICTION_WITH_WALLS 0.01\n"; outputfile << "DEM_CONTINUUM_CONSTITUTIVE_LAW_NAME DEM_Dempack\n"; outputfile << "DEM_DISCONTINUUM_CONSTITUTIVE_LAW_NAME DEM_D_Linear_viscous_Coulomb\n"; outputfile << "SLOPE_LIMIT_COEFF_C1 24\n"; outputfile << "SLOPE_LIMIT_COEFF_C2 28\n"; outputfile << "SLOPE_LIMIT_COEFF_C3 1\n"; outputfile << "SLOPE_FRACTION_N1 1\n"; outputfile << "SLOPE_FRACTION_N2 1\n"; outputfile << "SLOPE_FRACTION_N3 35e9\n"; outputfile << "YOUNG_MODULUS_PLASTIC 1000\n"; outputfile << "PLASTIC_YIELD_STRESS 0.2\n"; outputfile << "DAMAGE_FACTOR 1\n"; outputfile << "SHEAR_ENERGY_COEF 1\n"; outputfile << "CONTACT_TAU_ZERO 5\n"; outputfile << "CONTACT_SIGMA_MIN 1\n"; outputfile << "CONTACT_INTERNAL_FRICC 20\n"; outputfile << "End Properties\n"; outputfile << "\nBegin Nodes\n"; // Relative sizes according to axes: int ai=1; int aj=2; int ak=1; //Generation of the samble for (int k = 0; k < ai*divisions; k++) { for (int j = 0; j < aj* divisions; j++) { for (int i = 0; i < ak*divisions; i++) { outputfile << ++node_counter << " " << (1 + 2 * i) * radius - 0.5*side << " " << (1 + 2 * j) * radius << " " << (1 + 2 * k) * radius - 0.5*side << '\n'; if ((i == 0) || (j == 0) || (k == 0) || (i == ai* divisions - 1) || (j == aj*divisions - 1) || (k == ak*divisions - 1)) skin_nodes.push_back(node_counter); if (k == 0) bottom_nodes.push_back(node_counter); if (k == 2 * divisions - 1) top_nodes.push_back(node_counter); } } } // outputfile << "End Nodes\n"; outputfile << "\nBegin Elements SphericContinuumParticle3D\n"; for (int i = 1; i <= node_counter; i++) outputfile << i << " 1 " << i << '\n'; outputfile << "End Elements\n"; outputfile << "\nBegin NodalData RADIUS\n"; for (int i = 1; i <= node_counter; i++) outputfile << i << " 0 " << radius << '\n'; outputfile << "End NodalData\n"; outputfile << "\nBegin NodalData COHESIVE_GROUP // whole specimen\n"; for (int i = 1; i <= node_counter; i++) outputfile << i << " 0 1\n"; outputfile << "End NodalData\n"; //outputfile << "\nBegin NodalData COHESIVE_GROUP // bottom nodes\n"; //for (std::vector<int>::iterator it_bottom = bottom_nodes.begin(); it_bottom != bottom_nodes.end(); it_bottom++) outputfile << *it_bottom << " 0 1\n"; //outputfile << "End NodalData\n\nBegin NodalData COHESIVE_GROUP // top nodes\n"; //for (std::vector<int>::iterator it_top = top_nodes.begin(); it_top != top_nodes.end(); it_top++) outputfile << *it_top << " 0 1\n"; //outputfile << "End NodalData\n"; outputfile << "\nBegin NodalData SKIN_SPHERE\n"; for (std::vector<int>::iterator it_skin = skin_nodes.begin(); it_skin != skin_nodes.end(); it_skin++) outputfile << *it_skin << " 0 1\n"; outputfile << "End NodalData\n\n"; /*outputfile << "Begin Mesh 1 // bottom nodes\n Begin MeshData\n VELOCITY_START_TIME 0.0\n"; outputfile << " FORCE_INTEGRATION_GROUP 0\n VELOCITY_STOP_TIME 100.0\n TOP 0\n"; outputfile << " IMPOSED_VELOCITY_Z_VALUE 0.0005\n BOTTOM 0\n End MeshData\n Begin MeshNodes\n"; for (std::vector<int>::iterator it_bottom = bottom_nodes.begin(); it_bottom != bottom_nodes.end(); it_bottom++) outputfile << " " << *it_bottom << '\n'; outputfile << " End MeshNodes\nEnd Mesh\n\n"; outputfile << "Begin Mesh 2 // top nodes\n Begin MeshData\n VELOCITY_START_TIME 0.0\n"; outputfile << " FORCE_INTEGRATION_GROUP 0\n VELOCITY_STOP_TIME 100.0\n TOP 0\n"; outputfile << " IMPOSED_VELOCITY_Z_VALUE -0.0005\n BOTTOM 0\n End MeshData\n Begin MeshNodes\n"; for (std::vector<int>::iterator it_top = top_nodes.begin(); it_top != top_nodes.end(); it_top++) outputfile << " " << *it_top << '\n'; outputfile << " End MeshNodes\nEnd Mesh\n";*/ outputfile.close(); end_time = clock(); double elapsed_time = (double(end_time) - double(initial_time)) / CLOCKS_PER_SEC; KRATOS_INFO("DEM") << "\nfinished!\n\n"; KRATOS_INFO("DEM") << "\nTotal number of elements: " << node_counter << '\n'; KRATOS_INFO("DEM") << "\nTime required to create the mdpa file: " << elapsed_time << " seconds\n\n"; } void MeasureTopHeight(ModelPart& rModelPart, double& subtotal, double& weight) { /* ElementsArrayType& pElements = rModelPart.Elements(); for (ElementsArrayType::iterator it= pElements.begin(); it!=pElements.end(); ++it) { if( it->GetGeometry()[0].FastGetSolutionStepValue(GROUP_ID) == 1 ) { ParticleWeakVectorType& mrNeighbours = it->GetValue(NEIGHBOUR_ELEMENTS); for(ParticleWeakIteratorType ineighbour = mrNeighbours.begin(); ineighbour != mrNeighbours.end(); ineighbour++) { if( ineighbour->GetGeometry()[0].FastGetSolutionStepValue(GROUP_ID) != 1 ) { subtotal += it->GetGeometry()[0].Coordinates()[1]*it->GetGeometry()[0].FastGetSolutionStepValue(RADIUS); weight += it->GetGeometry()[0].FastGetSolutionStepValue(RADIUS); break; } } } } */ } void MeasureBotHeight(ModelPart& rModelPart, double& subtotal, double& weight) { /* ElementsArrayType& pElements = rModelPart.Elements(); for (ElementsArrayType::iterator it= pElements.begin(); it!=pElements.end(); ++it) { if( it->GetGeometry()[0].FastGetSolutionStepValue(GROUP_ID) == 2 ) { ParticleWeakVectorType& mrNeighbours = it->GetValue(NEIGHBOUR_ELEMENTS); for(ParticleWeakIteratorType ineighbour = mrNeighbours.begin(); ineighbour != mrNeighbours.end(); ineighbour++) { if( ineighbour->GetGeometry()[0].FastGetSolutionStepValue(GROUP_ID) != 2 ) { subtotal += it->GetGeometry()[0].Coordinates()[1]*it->GetGeometry()[0].FastGetSolutionStepValue(RADIUS); weight += it->GetGeometry()[0].FastGetSolutionStepValue(RADIUS); break; } } } } */ } void MarkToEraseParticlesOutsideRadius(ModelPart& r_model_part, const double max_radius, const array_1d<double, 3>& center, const double tolerance_for_erasing) { auto& pNodes = r_model_part.GetCommunicator().LocalMesh().Nodes(); #pragma omp parallel for for (int k = 0; k < (int)pNodes.size(); k++) { auto it = pNodes.begin() + k; const array_1d<double, 3>& coords = it->Coordinates(); array_1d<double, 3> vector_distance_to_center; noalias(vector_distance_to_center) = coords - center; const double distance_to_center = MathUtils<double>::Norm3(vector_distance_to_center); const double radius = it->FastGetSolutionStepValue(RADIUS); if(distance_to_center + radius > max_radius + tolerance_for_erasing) { it->Set(TO_ERASE, true); } } } void ApplyConcentricForceOnParticles(ModelPart& r_model_part, const array_1d<double, 3>& center, const double density_for_artificial_gravity) { auto& pElements = r_model_part.GetCommunicator().LocalMesh().Elements(); #pragma omp parallel for for (int k = 0; k < (int)pElements.size(); k++) { auto it = pElements.begin() + k; auto& node = it->GetGeometry()[0]; const array_1d<double, 3>& coords = node.Coordinates(); array_1d<double, 3> vector_particle_to_center; noalias(vector_particle_to_center) = center - coords; const double distance_to_center = MathUtils<double>::Norm3(vector_particle_to_center); const double inv_dist = 1.0 / distance_to_center; array_1d<double, 3> force; SphericParticle* spheric_p_particle = dynamic_cast<SphericParticle*> (&*it); const double volume = spheric_p_particle->CalculateVolume(); noalias(force) = inv_dist * vector_particle_to_center * volume * density_for_artificial_gravity; node.FastGetSolutionStepValue(EXTERNAL_APPLIED_FORCE) = force; } } array_1d<double, 3> GetInitialCenterOfMass() { return mInitialCenterOfMassAndMass; } /// Turn back information as a stemplate<class T, std::size_t dim> tring. virtual std::string Info() const { return ""; } /// Print information about this object. virtual void PrintInfo(std::ostream& rOStream) const { } /// Print object's data. virtual void PrintData(std::ostream& rOStream) const { } std::vector<unsigned int>& GetElementPartition() {return (mElementPartition);}; protected: std::vector<unsigned int> mElementPartition; private: array_1d<double, 3> mInitialCenterOfMassAndMass; double mInitialMass; /// Assignment operator PreUtilities & operator=(PreUtilities const& rOther); }; // Class PreUtilities /// output stream function // template<std::size_t TDim> // inline std::ostream& operator << (std::ostream& rOStream) // { // rThis.PrintInfo(rOStream); // rOStream << std::endl; // rThis.PrintData(rOStream); // // return rOStream; // } } // namespace Kratos #endif // PRE_UTILITES_H
beta_projectors.h
// Copyright (c) 2013-2017 Anton Kozhevnikov, Thomas Schulthess // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, are permitted provided that // the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the // following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions // and the following disclaimer in the documentation and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED // WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A // PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR // OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. /** \file beta_projectors.h * * \brief Contains declaration and implementation of sirius::Beta_projectors class. */ #ifndef __BETA_PROJECTORS_H__ #define __BETA_PROJECTORS_H__ #include "gpu.h" #include "communicator.hpp" #include "unit_cell.h" #include "wave_functions.hpp" #include "sbessel.h" #include "simulation_context.h" #ifdef __GPU extern "C" void create_beta_gk_gpu(int num_atoms, int num_gkvec, int const* beta_desc, cuDoubleComplex const* beta_gk_t, double const* gkvec, double const* atom_pos, cuDoubleComplex* beta_gk); #endif namespace sirius { enum beta_desc_idx { nbf = 0, offset = 1, offset_t = 2, ia = 3 }; class Beta_projectors_gradient; /// Stores <G+k | beta> expansion /** \todo Beta_projectors and Beta_projectors_gradient need some rethinking. Beta_projectors are used in two * places: in application of non-local potential and in generation of density matrix. Beta_projectors_gradient * are used in the calculation of forces. Both are split in chunks, both require an inner product with * wave-functions. */ class Beta_projectors { friend class Beta_projectors_gradient; protected: Communicator const& comm_; Unit_cell const& unit_cell_; Gvec const& gkvec_; mdarray<double, 2> gkvec_coord_; int lmax_beta_; device_t pu_; int num_gkvec_loc_; /// Total number of beta-projectors among atom types. int num_beta_t_; /// Phase-factor independent plane-wave coefficients of |beta> functions for atom types. matrix<double_complex> beta_gk_t_; /// Plane-wave coefficients of |beta> functions for all atoms. matrix<double_complex> beta_gk_a_; /// Plane-wave coefficients of |beta> functions for a chunk of atoms. matrix<double_complex> beta_gk_; /// Inner product between beta-projectors and wave-functions. /** Store as double to handle both gamma- and general k-point cases */ mdarray<double, 1> beta_phi_; /// Explicit GPU buffer for beta-projectors. matrix<double_complex> beta_gk_gpu_; struct beta_chunk_t { int num_beta_; int num_atoms_; int offset_; mdarray<int, 2> desc_; mdarray<double, 2> atom_pos_; }; mdarray<beta_chunk_t, 1> beta_chunks_; int max_num_beta_; /// Generate plane-wave coefficients for beta-projectors of atom types. void generate_beta_gk_t(Simulation_context const& ctx__); void split_in_chunks(); /// calculates < Beta | Psi > inner product template <typename T> void inner(int chunk__, wave_functions& phi__, int idx0__, int n__, mdarray<double_complex, 2>& beta_gk__, mdarray<double, 1>& beta_phi__); public: Beta_projectors(Simulation_context const& ctx__, Communicator const& comm__, Gvec const& gkvec__); matrix<double_complex>& beta_gk_t() { return beta_gk_t_; } matrix<double_complex> const& beta_gk_a() { return beta_gk_a_; } matrix<double_complex> const& beta_gk() const { return beta_gk_; } template <typename T> matrix<T> beta_phi(int chunk__, int n__) { int nbeta = beta_chunk(chunk__).num_beta_; if (pu_ == GPU) { return std::move(matrix<T>(reinterpret_cast<T*>(beta_phi_.at<CPU>()), reinterpret_cast<T*>(beta_phi_.at<GPU>()), nbeta, n__)); } else { return std::move(matrix<T>(reinterpret_cast<T*>(beta_phi_.at<CPU>()), nbeta, n__)); } } Unit_cell const& unit_cell() const { return unit_cell_; } Communicator const& comm() const { return comm_; } Gvec const& gk_vectors() const { return gkvec_; } device_t proc_unit() const { return pu_; } int lmax_beta() const { return lmax_beta_; } inline int num_beta_chunks() const { return static_cast<int>(beta_chunks_.size()); } inline beta_chunk_t const& beta_chunk(int idx__) const { return beta_chunks_(idx__); } inline int num_gkvec_loc() const { return num_gkvec_loc_; } void generate(int chunk__); template <typename T> void inner(int chunk__, wave_functions& phi__, int idx0__, int n__) { inner<T>(chunk__, phi__, idx0__, n__, beta_gk_, beta_phi_); } int max_num_beta() { return max_num_beta_; } void prepare() { if (pu_ == GPU) { beta_gk_gpu_.allocate(memory_t::device); beta_phi_.allocate(memory_t::device); } } void dismiss() { #ifdef __GPU if (pu_ == GPU) { beta_gk_gpu_.deallocate_on_device(); beta_phi_.deallocate_on_device(); } #endif } void generate_beta_gk_t_lat_deriv(Simulation_context const& ctx__) { PROFILE("sirius::Beta_projectors::generate_beta_gk_t_lat_deriv"); int mu = 0; int nu = 0; /* allocate array */ matrix<double_complex> beta_gk_t_lat_deriv(gkvec_.gvec_count(comm_.rank()), num_beta_t_); /* compute dG_tau / da_{mu,nu} */ auto dG_da = [this](vector3d<double>& gvc, int tau, int mu, int nu) { return -unit_cell_.inverse_lattice_vectors()(nu, tau) * gvc[mu]; }; /* compute derivative of theta angle with respect to lattice vectors d theta / da_{mu,nu} */ auto dtheta_da = [this, dG_da](vector3d<double>& gvc, vector3d<double>& gvs, int mu, int nu) { double g = gvs[0]; double theta = gvs[1]; double phi = gvs[2]; double result = std::cos(theta) * std::cos(phi) * dG_da(gvc, 0, mu, nu) + std::cos(theta) * std::sin(phi) * dG_da(gvc, 1, mu, nu) - std::sin(theta) * dG_da(gvc, 2, mu, nu); return result / g; }; auto dphi_da = [this, dG_da](vector3d<double>& gvc, vector3d<double>& gvs, int mu, int nu) { double g = gvs[0]; double phi = gvs[2]; double result = -std::sin(phi) * dG_da(gvc, 0, mu, nu) + std::cos(phi) * dG_da(gvc, 0, mu, nu); return result / g; }; auto dRlm_da = [this, dtheta_da, dphi_da](int lm, vector3d<double>& gvc, vector3d<double>& gvs, int mu, int nu) { double theta = gvs[1]; double phi = gvs[2]; return SHT::dRlm_dtheta(lm, theta, phi) * dtheta_da(gvc, gvs, mu, nu) + SHT::dRlm_dphi_sin_theta(lm, theta, phi) * dphi_da(gvc, gvs, mu, nu); }; auto djl_da = [this, dG_da](vector3d<double>& gvc, vector3d<double>& gvs, int mu, int nu) { double theta = gvs[1]; double phi = gvs[2]; return std::sin(theta) * std::cos(phi) * dG_da(gvc, 0, mu, nu) + std::sin(theta) * std::sin(phi) * dG_da(gvc, 1, mu, nu) + std::cos(theta) * dG_da(gvc, 2, mu, nu); }; /* compute d <G+k|beta> / a_{mu, nu} */ #pragma omp parallel for for (int igkloc = 0; igkloc < gkvec_.gvec_count(comm_.rank()); igkloc++) { int igk = gkvec_.gvec_offset(comm_.rank()) + igkloc; auto gvc = gkvec_.gkvec_cart(igk); /* vs = {r, theta, phi} */ auto gvs = SHT::spherical_coordinates(gvc); if (gvs[0] < 1e-10) { for (int i = 0; i < num_beta_t_; i++) { beta_gk_t_lat_deriv(igkloc, i) = 0; } continue; } /* compute real spherical harmonics for G+k vector */ std::vector<double> gkvec_rlm(Utils::lmmax(lmax_beta_)); std::vector<double> gkvec_drlm(Utils::lmmax(lmax_beta_)); SHT::spherical_harmonics(lmax_beta_, gvs[1], gvs[2], &gkvec_rlm[0]); for (int lm = 0; lm < Utils::lmmax(lmax_beta_); lm++) { gkvec_drlm[lm] = dRlm_da(lm, gvc, gvs, mu, nu); } for (int iat = 0; iat < unit_cell_.num_atom_types(); iat++) { auto& atom_type = unit_cell_.atom_type(iat); for (int xi = 0; xi < atom_type.mt_basis_size(); xi++) { int l = atom_type.indexb(xi).l; int lm = atom_type.indexb(xi).lm; int idxrf = atom_type.indexb(xi).idxrf; auto z = std::pow(double_complex(0, -1), l) * fourpi / std::sqrt(unit_cell_.omega()); auto d1 = ctx__.radial_integrals().beta_radial_integral(idxrf, iat, gvs[0]) * (gkvec_drlm[lm] - 0.5 * gkvec_rlm[lm] * unit_cell_.inverse_lattice_vectors()(nu, mu)); auto d2 = gkvec_rlm[lm] * djl_da(gvc, gvs, mu, nu) * ctx__.radial_integrals().beta_djldq_radial_integral(idxrf, iat, gvs[0]); beta_gk_t_lat_deriv(igkloc, atom_type.offset_lo() + xi) = z * (d1 + d2); } } } //if (unit_cell_.parameters().control().print_checksum_) { // auto c1 = beta_gk_t_.checksum(); // comm_.allreduce(&c1, 1); // if (comm_.rank() == 0) { // DUMP("checksum(beta_gk_t) : %18.10f %18.10f", c1.real(), c1.imag()) // } //} } }; inline Beta_projectors::Beta_projectors(Simulation_context const& ctx__, Communicator const& comm__, Gvec const& gkvec__) : comm_(comm__) , unit_cell_(ctx__.unit_cell()) , gkvec_(gkvec__) , lmax_beta_(unit_cell_.lmax()) , pu_(ctx__.processing_unit()) { PROFILE("sirius::Beta_projectors::Beta_projectors"); num_gkvec_loc_ = gkvec_.gvec_count(comm_.rank()); split_in_chunks(); generate_beta_gk_t(ctx__); if (pu_ == GPU) { gkvec_coord_ = mdarray<double, 2>(3, num_gkvec_loc_, ctx__.dual_memory_t()); /* copy G+k vectors */ for (int igk_loc = 0; igk_loc < num_gkvec_loc_; igk_loc++) { int igk = gkvec_.gvec_offset(comm_.rank()) + igk_loc; auto vgk = gkvec_.gkvec(igk); for (auto x: {0, 1, 2}) { gkvec_coord_(x, igk_loc) = vgk[x]; } } gkvec_coord_.copy<memory_t::host, memory_t::device>(); beta_gk_t_.allocate(memory_t::device); beta_gk_t_.copy<memory_t::host, memory_t::device>(); } beta_gk_gpu_ = matrix<double_complex>(num_gkvec_loc_, max_num_beta_, memory_t::none); beta_gk_a_ = matrix<double_complex>(num_gkvec_loc_, unit_cell_.mt_lo_basis_size()); #pragma omp for for (int ia = 0; ia < unit_cell_.num_atoms(); ia++) { double phase = twopi * (gkvec_.vk() * unit_cell_.atom(ia).position()); double_complex phase_k = std::exp(double_complex(0.0, phase)); std::vector<double_complex> phase_gk(num_gkvec_loc_); for (int igk_loc = 0; igk_loc < num_gkvec_loc_; igk_loc++) { int igk = gkvec_.gvec_offset(comm_.rank()) + igk_loc; auto G = gkvec_.gvec(igk); phase_gk[igk_loc] = std::conj(ctx__.gvec_phase_factor(G, ia) * phase_k); } for (int xi = 0; xi < unit_cell_.atom(ia).mt_lo_basis_size(); xi++) { for (int igk_loc = 0; igk_loc < num_gkvec_loc_; igk_loc++) { beta_gk_a_(igk_loc, unit_cell_.atom(ia).offset_lo() + xi) = beta_gk_t_(igk_loc, unit_cell_.atom(ia).type().offset_lo() + xi) * phase_gk[igk_loc]; } } } } inline void Beta_projectors::generate_beta_gk_t(Simulation_context const& ctx__) { PROFILE("sirius::Beta_projectors::generate_beta_gk_t"); if (!num_beta_t_) { return; } /* allocate array */ beta_gk_t_ = matrix<double_complex>(gkvec_.gvec_count(comm_.rank()), num_beta_t_); /* compute <G+k|beta> */ #pragma omp parallel for for (int igkloc = 0; igkloc < gkvec_.gvec_count(comm_.rank()); igkloc++) { int igk = gkvec_.gvec_offset(comm_.rank()) + igkloc; double gk = gkvec_.gvec_len(igk); /* vs = {r, theta, phi} */ auto vs = SHT::spherical_coordinates(gkvec_.gkvec_cart(igk)); /* compute real spherical harmonics for G+k vector */ std::vector<double> gkvec_rlm(Utils::lmmax(lmax_beta_)); SHT::spherical_harmonics(lmax_beta_, vs[1], vs[2], &gkvec_rlm[0]); for (int iat = 0; iat < unit_cell_.num_atom_types(); iat++) { auto& atom_type = unit_cell_.atom_type(iat); for (int xi = 0; xi < atom_type.mt_basis_size(); xi++) { int l = atom_type.indexb(xi).l; int lm = atom_type.indexb(xi).lm; int idxrf = atom_type.indexb(xi).idxrf; auto z = std::pow(double_complex(0, -1), l) * fourpi / std::sqrt(unit_cell_.omega()); beta_gk_t_(igkloc, atom_type.offset_lo() + xi) = z * gkvec_rlm[lm] * ctx__.radial_integrals().beta_radial_integral(idxrf, iat, gk); } } } if (unit_cell_.parameters().control().print_checksum_) { auto c1 = beta_gk_t_.checksum(); comm_.allreduce(&c1, 1); if (comm_.rank() == 0) { DUMP("checksum(beta_gk_t) : %18.10f %18.10f", c1.real(), c1.imag()) } } } inline void Beta_projectors::split_in_chunks() { /* split beta-projectors into chunks */ int num_atoms_in_chunk = (comm_.size() == 1) ? unit_cell_.num_atoms() : std::min(unit_cell_.num_atoms(), 256); int num_beta_chunks = unit_cell_.num_atoms() / num_atoms_in_chunk + std::min(1, unit_cell_.num_atoms() % num_atoms_in_chunk); splindex<block> spl_beta_chunks(unit_cell_.num_atoms(), num_beta_chunks, 0); beta_chunks_ = mdarray<beta_chunk_t, 1>(num_beta_chunks); int offset_in_beta_gk = 0; for (int ib = 0; ib < num_beta_chunks; ib++) { /* number of atoms in chunk */ int na = spl_beta_chunks.local_size(ib); beta_chunks_(ib).num_atoms_ = na; beta_chunks_(ib).desc_ = mdarray<int, 2>(4, na); beta_chunks_(ib).atom_pos_ = mdarray<double, 2>(3, na); int num_beta{0}; for (int i = 0; i < na; i++) { /* global index of atom by local index and chunk */ int ia = spl_beta_chunks.global_index(i, ib); auto pos = unit_cell_.atom(ia).position(); auto& type = unit_cell_.atom(ia).type(); /* atom fractional coordinates */ for (int x: {0, 1, 2}) { beta_chunks_(ib).atom_pos_(x, i) = pos[x]; } /* number of beta functions for atom */ beta_chunks_(ib).desc_(beta_desc_idx::nbf, i) = type.mt_basis_size(); /* offset in beta_gk*/ beta_chunks_(ib).desc_(beta_desc_idx::offset, i) = num_beta; /* offset in beta_gk_t */ beta_chunks_(ib).desc_(beta_desc_idx::offset_t, i) = type.offset_lo(); /* global index of atom */ beta_chunks_(ib).desc_(beta_desc_idx::ia, i) = ia; num_beta += type.mt_basis_size(); } /* number of beta-projectors in this chunk */ beta_chunks_(ib).num_beta_ = num_beta; beta_chunks_(ib).offset_ = offset_in_beta_gk; offset_in_beta_gk += num_beta; if (pu_ == GPU) { beta_chunks_[ib].desc_.allocate(memory_t::device); beta_chunks_[ib].desc_.copy<memory_t::host, memory_t::device>(); beta_chunks_[ib].atom_pos_.allocate(memory_t::device); beta_chunks_[ib].atom_pos_.copy<memory_t::host, memory_t::device>(); } } max_num_beta_ = 0; for (int ib = 0; ib < num_beta_chunks; ib++) { max_num_beta_ = std::max(max_num_beta_, beta_chunks_(ib).num_beta_); } num_beta_t_ = 0; for (int iat = 0; iat < unit_cell_.num_atom_types(); iat++) { num_beta_t_ += unit_cell_.atom_type(iat).mt_lo_basis_size(); } } inline void Beta_projectors::generate(int chunk__) { PROFILE("sirius::Beta_projectors::generate"); if (pu_ == CPU) { beta_gk_ = mdarray<double_complex, 2>(&beta_gk_a_(0, beta_chunk(chunk__).offset_), num_gkvec_loc_, beta_chunk(chunk__).num_beta_); } #ifdef __GPU if (pu_ == GPU) { beta_gk_ = mdarray<double_complex, 2>(&beta_gk_a_(0, beta_chunk(chunk__).offset_), beta_gk_gpu_.at<GPU>(), num_gkvec_loc_, beta_chunk(chunk__).num_beta_); auto& desc = beta_chunk(chunk__).desc_; create_beta_gk_gpu(beta_chunk(chunk__).num_atoms_, num_gkvec_loc_, desc.at<GPU>(), beta_gk_t_.at<GPU>(), gkvec_coord_.at<GPU>(), beta_chunk(chunk__).atom_pos_.at<GPU>(), beta_gk_.at<GPU>()); } #endif } template<> inline void Beta_projectors::inner<double_complex>(int chunk__, wave_functions& phi__, int idx0__, int n__, mdarray<double_complex, 2> &beta_gk, mdarray<double, 1> &beta_phi) { PROFILE("sirius::Beta_projectors::inner"); assert(num_gkvec_loc_ == phi__.pw_coeffs().num_rows_loc()); int nbeta = beta_chunk(chunk__).num_beta_; if (static_cast<size_t>(nbeta * n__) > beta_phi.size()) { beta_phi = mdarray<double, 1>(2 * nbeta * n__); if (pu_ == GPU) { beta_phi.allocate(memory_t::device); } } switch (pu_) { case CPU: { /* compute <beta|phi> */ linalg<CPU>::gemm(2, 0, nbeta, n__, num_gkvec_loc_, beta_gk.at<CPU>(), num_gkvec_loc_, phi__.pw_coeffs().prime().at<CPU>(0, idx0__), phi__.pw_coeffs().prime().ld(), (double_complex*)beta_phi.at<CPU>(), nbeta); break; } case GPU: { #ifdef __GPU linalg<GPU>::gemm(2, 0, nbeta, n__, num_gkvec_loc_, beta_gk.at<GPU>(), num_gkvec_loc_, phi__.pw_coeffs().prime().at<GPU>(0, idx0__), phi__.pw_coeffs().prime().ld(), (double_complex*)beta_phi.at<GPU>(), nbeta); beta_phi.copy_to_host(2 * nbeta * n__); #else TERMINATE_NO_GPU #endif break; } } comm_.allreduce(beta_phi.at<CPU>(), 2 * nbeta * n__); if (pu_ == GPU) { beta_phi.copy<memory_t::host, memory_t::device>(2 * nbeta * n__); } if (unit_cell_.parameters().control().print_checksum_) { auto cs = mdarray<double, 1>(beta_phi.at<CPU>(), 2 * nbeta * n__).checksum(); if (comm_.rank() == 0) { DUMP("checksum(beta_phi) : %18.10f", cs); } } } template<> inline void Beta_projectors::inner<double>(int chunk__, wave_functions& phi__, int idx0__, int n__, mdarray<double_complex, 2> &beta_gk, mdarray<double, 1> &beta_phi) { PROFILE("sirius::Beta_projectors::inner"); assert(num_gkvec_loc_ == phi__.pw_coeffs().num_rows_loc()); int nbeta = beta_chunk(chunk__).num_beta_; if (static_cast<size_t>(nbeta * n__) > beta_phi.size()) { beta_phi = mdarray<double, 1>(nbeta * n__); #ifdef __GPU if (pu_ == GPU) { beta_phi.allocate(memory_t::device); } #endif } double a = 2; double a1 = -1; double b = 0; switch (pu_) { case CPU: { /* compute <beta|phi> */ linalg<CPU>::gemm(2, 0, nbeta, n__, 2 * num_gkvec_loc_, a, (double*)beta_gk.at<CPU>(), 2 * num_gkvec_loc_, (double*)phi__.pw_coeffs().prime().at<CPU>(0, idx0__), 2 * phi__.pw_coeffs().prime().ld(), b, beta_phi.at<CPU>(), nbeta); if (comm_.rank() == 0) { /* subtract one extra G=0 contribution */ linalg<CPU>::ger(nbeta, n__, a1, (double*)&beta_gk(0, 0), 2 * num_gkvec_loc_, (double*)phi__.pw_coeffs().prime().at<CPU>(0, idx0__), 2 * phi__.pw_coeffs().prime().ld(), &beta_phi[0], nbeta); } break; } case GPU: { #ifdef __GPU linalg<GPU>::gemm(2, 0, nbeta, n__, 2 * num_gkvec_loc_, &a, (double*)beta_gk.at<GPU>(), 2 * num_gkvec_loc_, (double*)phi__.pw_coeffs().prime().at<GPU>(0, idx0__), 2 * phi__.pw_coeffs().prime().ld(), &b, beta_phi.at<GPU>(), nbeta); if (comm_.rank() == 0) { /* subtract one extra G=0 contribution */ linalg<GPU>::ger(nbeta, n__, &a1, (double*)beta_gk.at<GPU>(0, 0), 2 * num_gkvec_loc_, (double*)phi__.pw_coeffs().prime().at<GPU>(0, idx0__), 2 * phi__.pw_coeffs().prime().ld(), beta_phi.at<GPU>(), nbeta); } beta_phi.copy_to_host(nbeta * n__); #else TERMINATE_NO_GPU #endif break; } } comm_.allreduce(beta_phi.at<CPU>(), nbeta * n__); #ifdef __GPU if (pu_ == GPU) { beta_phi.copy_to_device(nbeta * n__); } #endif #ifdef __PRINT_OBJECT_CHECKSUM { auto cs = mdarray<double, 1>(beta_phi.at<CPU>(), nbeta * n__).checksum(); DUMP("checksum(beta_phi) : %18.10f", cs); } #endif } } // namespace #endif
FG_vector.h
#ifndef __FG_VECTOR_H__ #define __FG_VECTOR_H__ /* * Copyright 2014 Open Connectome Project (http://openconnecto.me) * Written by Da Zheng (zhengda1936@gmail.com) * * This file is part of FlashGraph. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <memory> #include <set> #include <fstream> #include "graph_engine.h" #include "stat.h" /** * \brief FlashGraph vector that provides several parallelized methods * when compared to an STL-vector. <br> * **NOTE**: Not an STL-compatible data structure. This vector is also * ideally used with numeric data types. <br> * Methods marked with the keyword **parallel** are parallelized implementations. */ template<class T> class FG_vector { // TODO I might need to split the vector into partitions. std::vector<T> eles; FG_vector(graph_engine::ptr graph) { eles.resize(graph->get_num_vertices()); } FG_vector(size_t size) { eles.resize(size); } public: typedef typename std::shared_ptr<FG_vector<T> > ptr; /** Smart pointer for object access */ /** * \brief Create a vector of the length the same as the number of vertices * in the graph. An object of this * class should be created using this or the `create(size_t size)` * method. * \param graph A shared pointer to a graph engine object. This is generally * the graph for which you are creating the vector. */ static ptr create(graph_engine::ptr graph) { return ptr(new FG_vector<T>(graph)); } /** * \brief Create a vector of the specified length. An object of this * class should be created using this or the `create(graph_engine::ptr graph)` * method. * \param size The length of the vector you desire. */ static ptr create(size_t size) { return ptr(new FG_vector<T>(size)); } /** * \brief Initialize the vector a single value as specified by parameter 1. * * \param v The initialization parameter for the vector data. * **parallel** */ void init(T v) { #pragma omp parallel for for (size_t i = 0; i < eles.size(); i++) eles[i] = v; } /** * \brief Equivalent to += operator. Element by element * addition of one `FG_vector` to another. * \param other An `FG_vector` smart pointer object. * **parallel** * */ void plus_eq(FG_vector<T>::ptr other) { assert(get_size() == other->get_size()); for (size_t i = 0; i < get_size(); i++) { eles[i] += other->get(i); } } /** * \brief Assign a value `num` many times to the vector. * \param num The number of elements to assign. * \param val The value a user wnats to assign to vector positions. */ void assign(size_t num, T val) { eles.assign(num, val); } /** * \brief Make a shallow copy of the vector. * \param other An `FG_vector` smart pointer. * **paralel** */ void shallow_copy(FG_vector<T>::ptr other) { assert(this->get_size() == other->get_size()); #pragma omp parallel for for (size_t i = 0; i < get_size(); i++) { this->eles[i] = other->eles[i]; } } template<class T1> void copy_to(T1 *arr, size_t size) { size_t num = std::min(size, eles.size()); for (size_t i = 0; i < num; i++) arr[i] = eles[i]; } /** * \brief Check for equality between two `FG_vector`s element by * element. * \param other An `FG_vector` smart pointer. */ // TODO DM: Make parallel / smarter bool eq_all(FG_vector<T>::ptr other) { return std::equal(this->eles.begin(), this->eles.end(), other->eles.begin()); } void init_rand(long max, unsigned int seed = 0) { if (seed > 0) srandom(seed); if (max >= std::numeric_limits<T>::max()) max = std::numeric_limits<T>::max(); #pragma omp parallel for for (size_t i = 0; i < eles.size(); i++) eles[i] = random(); } /** * \brief Populate an [STL set](http://www.cplusplus.com/reference/set/set/) * with the unique elements in the vector. All duplicates are ignored. * * \param set The *empty* STL set that will be populated with unique vector members. * */ void unique(std::set<T> &set) const { // TODO we need a parallel implementation. assert(set.empty()); // FIXME: `new` a shared/unique ptr & remove param BOOST_FOREACH(T v, eles) { set.insert(v); } } /** * \brief Count the number of unique items in the vector using a * count map. * \param map An *empty* `count_map` object that is used to count * the number of unique elements in the vector. * */ void count_unique(count_map<T> &map) const { // TODO we need a parallel implementation. assert(map.get_size() == 0); // FIXME: `new` a shared/unique ptr & remove param BOOST_FOREACH(T v, eles) { map.add(v); } } /** * \brief Get the number of elements contained in the vector. * * \return The number of elements in the vector */ size_t get_size() const { return eles.size(); } /** * \brief Get a pointer to the memory array used internally by * the vector to store its owned elements. * \return A pointer the underlying data memory array. * */ T *get_data() { return eles.data(); } /** * \brief Const method to get a pointer to the memory array * used internally by the vector to store its owned elements. * \return A const pointer the underlying data memory array. * * */ const T*get_data() const { return eles.data(); } /** * \brief Compute the [dot product](http://en.wikipedia.org/wiki/Dot_product) * of two FG vectors. <br> * **parallel** * * \return A value of data type `T` value that is the dot product. */ T dot_product(const FG_vector<T> &other) const { assert(this->get_size() == other.get_size()); T ret = 0; #pragma omp parallel for reduction(+:ret) for (size_t i = 0; i < get_size(); i++) ret += get(i) * other.get(i); return ret; } /** * \brief Compute the * [L2 Norm](http://en.wikipedia.org/wiki/Norm_(mathematics)#Euclidean_norm) * (also know as Euclidean distance) of a vector. <br> * **parallel** * * \return An object of type `T` with the value of the L2 norm. */ T norm2() const { T ret = 0; #pragma omp parallel for reduction(+:ret) for (size_t i = 0; i < get_size(); i++) ret += get(i) * get(i); return sqrt(ret); } /** * \brief Compute the * [L1 Norm](http://en.wikipedia.org/wiki/Norm_(mathematics)#Taxicab_norm_or_Manhattan_norm) * (also Taxicab norm) of an FG_vector. <br> * **parallel** * * \return An object of type `T` with the L1 norm. */ T norm1() const { T ret = 0; #pragma omp parallel for reduction(+:ret) for (size_t i = 0; i < get_size(); i++) ret += fabs(get(i)); return ret; } /** * \brief Compute the sum of all elements in the vector. <br> * If the type is integer, the sum can overflow. * **parallel** * \return The sum of all items in the vector. */ T sum() const { return sum<T>(); } /** * \brief Compute the sum of all elements in the vector. <br> * This sum() allows users to specify the type of the result, so users * can avoid integer overflow. * **parallel** * \return The sum of all items in the vector. */ template<class ResType> ResType sum() const { struct identity_func { ResType operator()(T v) { return v; } }; return aggregate<identity_func, ResType>(identity_func()); } template<class Func, class ResType> ResType aggregate(Func func) const { ResType ret = 0; #pragma omp parallel for reduction(+:ret) for (size_t i = 0; i < get_size(); i++) ret += func(eles[i]); return ret; } /** * \brief Find the maximal value in the vector and return its value. * \return The maximal value in the vector. */ T max() const { return max_val_loc().first; } /** * \brief Find the maximal value in the vector and return its value * and its location. * \return A pair that contains the maximal value and its location * in the vector. */ std::pair<T, off_t> max_val_loc() const { T ret = std::numeric_limits<T>::min(); off_t idx = 0; for (size_t i = 0; i < get_size(); i++) { if (ret < get(i)) { ret = get(i); idx = i; } } return std::pair<T, off_t>(ret, idx); } void max_val_locs(size_t num, std::vector<std::pair<T, off_t> > &pairs) const { typedef std::pair<T, off_t> val_loc_t; struct comp_val { bool operator()(const val_loc_t &v1, const val_loc_t &v2) { return v1.first > v2.first; } }; std::priority_queue<val_loc_t, std::vector<val_loc_t>, comp_val> queue; for (size_t i = 0; i < get_size(); i++) { T val = get(i); queue.push(val_loc_t(val, i)); if (queue.size() > num) queue.pop(); } while (!queue.empty()) { val_loc_t pair = queue.top(); queue.pop(); pairs.push_back(pair); } } /** * \brief Find the index with the minmimal value in the vector and * return its value. * \return The minimal value in the vector. */ T min() const { T ret = std::numeric_limits<T>::max(); for (size_t i = 0; i < get_size(); i++) ret = std::min(get(i), ret); return ret; } /** * \brief Find the index with the minimal value in the vector and * return *the index*. * \return The minimal index value in the vector. */ size_t argmin() { typename std::vector<T>::iterator res = std::min_element(eles.begin(), eles.end()); size_t ret = std::distance(eles.begin(), res); return ret; } /** * \brief Serial element-wise print of the vector. * **Not intended for very large vectors** */ void print() { std::cout << "["; for (vsize_t i=0; i < get_size(); i++) { std::cout << " " << get(i); } std::cout << " ]\n\n"; } /** * \brief Write the space separated vector to file. * \param fn The file name you wish written to file. */ void to_file(std::string fn) { std::ofstream f; f.open(fn); for (vsize_t i=0; i < get_size(); i++) { f << get(i) << " "; } f.close(); } /** * \brief In place division of vector by a single value. * \param v The value by which you want the array divided. * **parallel** */ void div_by_in_place(T v) { #pragma omp parallel for for (size_t i = 0; i < get_size(); i++) eles[i] /= v; } /** * \brief element-wise merge with another vector and store the result * in this vector. * \param vec The vector that you want to merge with. * \param func The operator that you want to perform on each pair of * elements. */ template<class MergeFunc, class VecType> void merge_in_place(typename FG_vector<VecType>::ptr vec, MergeFunc func) { assert(this->get_size() == vec->get_size()); #pragma omp parallel for for (size_t i = 0; i < get_size(); i++) eles[i] = func(eles[i], vec->get(i)); } /** * \brief In place element-wise addition by another vector. * \param vec The vector by which you want to add to this vector. * **parallel** */ void add_in_place(FG_vector<T>::ptr vec) { struct add_func { T operator()(const T &v1, const T &v2) { return v1 + v2; } }; merge_in_place<add_func, T>(vec, add_func()); } /** * \brief In place subtraction of the vector by another vector. * \param vec The vector by which you want the array to be subtracted. * **parallel** */ void subtract_in_place(const FG_vector<T>::ptr &vec) { struct sub_func { T operator()(const T &v1, const T &v2) { return v1 - v2; } }; merge_in_place<sub_func, T>(vec, sub_func()); } /** * \brief Normalize vector using an Lx form. * **parallel** */ void normalize(int type) { T norm; switch(type) { case 2: norm = norm2(); break; case 1: norm = norm1(); break; default: ABORT_MSG("normalize on wrong type"); } div_by_in_place(norm); } /** * \brief Apply a function to every element in an FG_vector. * * \param func A user-defined function. * \param output The FG_vector that you want to apply the function to. * * **parallel** */ template<class ApplyFunc> void apply(ApplyFunc func, FG_vector<T> &output) { #pragma omp parallel for for (size_t i = 0; i < get_size(); i++) output.set(i, func(eles[i])); } // TODO these interfaces assume shared memory. /** * Set a value of an index in the vector. * * **NOTE:** This function assumes a shared memory environment. * \param id The index where value is being set. * \param v The value that the index will be set to. */ void set(vertex_id_t id, const T &v) { eles[id] = v; } /** * \brief Const get the value of a particular index. * \param id The index of the vector from where you want a value. * \return The value requested by param 1 * */ const T &get(vertex_id_t id) const { return eles[id]; } /** * \brief Non-const get the value of a particular index. * \param id The index of the vector from where you want a value. * \return The value requested by param 1 * */ T &get(vertex_id_t id) { return eles[id]; } log_histogram log_hist(int power) const { T max_v = max(); int num_buckets = ceil(log(max_v) / log(power)); log_histogram hist(std::max(num_buckets, 1)); for (size_t i = 0; i < get_size(); i++) { hist.add_value(eles[i]); } return hist; } }; /** * \brief Apply a user defined function to multipl FG_vectors. * **parallel** * \param inputs A vector of FG_vectors that are the inputs. * \param output A FG_vector that are the outputs. * \param apply The user-defined function that will be applied to all vecotors. */ template<class T, class ApplyFunc> void multi_vec_apply(const std::vector<typename FG_vector<T>::ptr> &inputs, typename FG_vector<T>::ptr output, ApplyFunc apply) { for (size_t i = 0; i < inputs.size(); i++) assert(output->get_size() == inputs[i]->get_size()); #pragma omp parallel for for (size_t i = 0; i < output->get_size(); i++) output->set(i, apply(i, inputs)); } #endif
zSchCompUdt-2Ddynamic.c
/*! \file Copyright (c) 2003, The Regents of the University of California, through Lawrence Berkeley National Laboratory (subject to receipt of any required approvals from U.S. Dept. of Energy) All rights reserved. The source code is distributed under BSD license, see the file License.txt at the top-level directory. */ /*! @file * \brief This file contains the main loop of pdgstrf which involves rank k * update of the Schur complement. * Uses 2D partitioning for the scatter phase. * * <pre> * -- Distributed SuperLU routine (version 5.4) -- * Lawrence Berkeley National Lab, Univ. of California Berkeley. * October 1, 2014 * * Modified: * September 14, 2017 * - First gather U-panel, then depending on "ldu" (excluding leading zeros), * gather only trailing columns of the L-panel corresponding to the nonzero * of U-rows. * - Padding zeros for nice dimensions of GEMM. * * June 1, 2018 add parallel AWPM pivoting; add back arrive_at_ublock() */ #define SCHEDULE_STRATEGY guided /* * Buffers: * [ lookAhead_L_buff | Remain_L_buff ] : stores the gathered L-panel * (A matrix in C := A*B ) * bigU : stores the U-panel (B matrix in C := A*B) * bigV : stores the block GEMM result (C matrix in C := A*B) */ if ( msg0 && msg2 ) { /* L(:,k) and U(k,:) are not empty. */ int cum_nrow = 0; /* cumulative number of nonzero rows in L(:,k) */ int temp_nbrow; /* nonzero rows in current block L(i,k) */ lptr = lptr0; luptr = luptr0; int Lnbrow, Rnbrow; /* number of nonzero rows in look-ahead window, and remaining part. */ /******************************************************************* * Separating L blocks into the top part within look-ahead window * and the remaining ones. *******************************************************************/ int lookAheadBlk=0, RemainBlk=0; tt_start = SuperLU_timer_(); /* Sherry -- can this loop be threaded?? */ /* Loop through all blocks in L(:,k) to set up pointers to the start * of each block in the data arrays. * - lookAheadFullRow[i] := number of nonzero rows from block 0 to i * - lookAheadStRow[i] := number of nonzero rows before block i * - lookAhead_lptr[i] := point to the start of block i in L's index[] * - (ditto Remain_Info[i]) */ for (int i = 0; i < nlb; ++i) { ib = lsub[lptr]; /* Block number of L(i,k). */ temp_nbrow = lsub[lptr+1]; /* Number of full rows. */ int look_up_flag = 1; /* assume ib is outside look-up window */ for (int j = k0+1; j < SUPERLU_MIN (k0 + num_look_aheads+2, nsupers ); ++j) { if ( ib == perm_c_supno[j] ) { look_up_flag = 0; /* flag ib within look-up window */ break; /* Sherry -- can exit the loop?? */ } } if ( look_up_flag == 0 ) { /* ib is within look-up window */ if (lookAheadBlk==0) { lookAheadFullRow[lookAheadBlk] = temp_nbrow; } else { lookAheadFullRow[lookAheadBlk] = temp_nbrow + lookAheadFullRow[lookAheadBlk-1]; } lookAheadStRow[lookAheadBlk] = cum_nrow; lookAhead_lptr[lookAheadBlk] = lptr; lookAhead_ib[lookAheadBlk] = ib; lookAheadBlk++; } else { /* ib is not in look-up window */ if ( RemainBlk==0 ) { Remain_info[RemainBlk].FullRow = temp_nbrow; } else { Remain_info[RemainBlk].FullRow = temp_nbrow + Remain_info[RemainBlk-1].FullRow; } RemainStRow[RemainBlk] = cum_nrow; // Remain_lptr[RemainBlk] = lptr; Remain_info[RemainBlk].lptr = lptr; // Remain_ib[RemainBlk] = ib; Remain_info[RemainBlk].ib = ib; RemainBlk++; } cum_nrow += temp_nbrow; lptr += LB_DESCRIPTOR; /* Skip descriptor. */ lptr += temp_nbrow; /* Move to next block */ luptr += temp_nbrow; } /* for i ... set up pointers for all blocks in L(:,k) */ lptr = lptr0; luptr = luptr0; /* leading dimension of L look-ahead buffer, same as Lnbrow */ //int LDlookAhead_LBuff = lookAheadBlk==0 ? 0 :lookAheadFullRow[lookAheadBlk-1]; Lnbrow = lookAheadBlk==0 ? 0 : lookAheadFullRow[lookAheadBlk-1]; /* leading dimension of L remaining buffer, same as Rnbrow */ //int LDRemain_LBuff = RemainBlk==0 ? 0 : Remain_info[RemainBlk-1].FullRow; Rnbrow = RemainBlk==0 ? 0 : Remain_info[RemainBlk-1].FullRow; /* assert( cum_nrow == (LDlookAhead_LBuff + LDRemain_LBuff) );*/ /* Piyush fix */ //int LDlookAhead_LBuff = lookAheadBlk==0? 0 : lookAheadFullRow[lookAheadBlk-1]; nbrow = Lnbrow + Rnbrow; /* total number of rows in L */ LookAheadRowSepMOP += 2*knsupc*(nbrow); /*********************************************** * Gather U blocks (AFTER LOOK-AHEAD WINDOW) * ***********************************************/ tt_start = SuperLU_timer_(); if ( nbrow > 0 ) { /* L(:,k) is not empty */ /* * Counting U blocks */ ldu = 0; /* Calculate ldu for U(k,:) after look-ahead window. */ ncols = 0; /* Total number of nonzero columns in U(k,:) */ int temp_ncols = 0; /* jj0 contains the look-ahead window that was updated in dlook_ahead_update.c. Now the search can continue from that point, not to start from block 0. */ #if 0 // Sherry comment out 5/21/208 /* Save pointers at location right after look-ahead window for later restart. */ iukp0 = iukp; rukp0 = rukp; #endif /* if ( iam==0 ) printf("--- k0 %d, k %d, jj0 %d, nub %d\n", k0, k, jj0, nub);*/ /* * Loop through all blocks in U(k,:) to set up pointers to the start * of each block in the data arrays, store them in Ublock_info[j] * for block U(k,j). */ for (j = jj0; j < nub; ++j) { /* jj0 starts after look-ahead window. */ temp_ncols = 0; #if 1 /* Cannot remove following call, since perm_u != Identity */ arrive_at_ublock( j, &iukp, &rukp, &jb, &ljb, &nsupc, iukp0, rukp0, usub, perm_u, xsup, grid ); #else jb = usub[iukp]; /* ljb = LBj (jb, grid); Local block number of U(k,j). */ nsupc = SuperSize(jb); iukp += UB_DESCRIPTOR; /* Start fstnz of block U(k,j). */ #endif Ublock_info[j].iukp = iukp; Ublock_info[j].rukp = rukp; Ublock_info[j].jb = jb; /* if ( iam==0 ) printf("j %d: Ublock_info[j].iukp %d, Ublock_info[j].rukp %d," "Ublock_info[j].jb %d, nsupc %d\n", j, Ublock_info[j].iukp, Ublock_info[j].rukp, Ublock_info[j].jb, nsupc); */ /* Prepare to call GEMM. */ jj = iukp; for (; jj < iukp+nsupc; ++jj) { segsize = klst - usub[jj]; if ( segsize ) { ++temp_ncols; if ( segsize > ldu ) ldu = segsize; } } Ublock_info[j].full_u_cols = temp_ncols; ncols += temp_ncols; #if 0 // Sherry comment out 5/31/2018 */ /* Jump number of nonzeros in block U(k,jj); Move to block U(k,j+1) in nzval[] array. */ rukp += usub[iukp - 1]; iukp += nsupc; #endif } /* end for j ... compute ldu & ncols */ /* Now doing prefix sum on full_u_cols. * After this, full_u_cols is the number of nonzero columns * from block 0 to block j. */ for ( j = jj0+1; j < nub; ++j) { Ublock_info[j].full_u_cols += Ublock_info[j-1].full_u_cols; } /* Padding zeros to make {m,n,k} multiple of vector length. */ jj = 8; //n; if (gemm_padding > 0 && Rnbrow > jj && ncols > jj && ldu > jj) { gemm_m_pad = Rnbrow + (Rnbrow % GEMM_PADLEN); gemm_n_pad = ncols + (ncols % GEMM_PADLEN); //gemm_n_pad = ncols; //gemm_k_pad = ldu + (ldu % GEMM_PADLEN); gemm_k_pad = ldu; for (i = Rnbrow; i < gemm_m_pad; ++i) // padding A matrix for (j = 0; j < gemm_k_pad; ++j) Remain_L_buff[i + j*gemm_m_pad] = zero; for (i = 0; i < Rnbrow; ++i) for (j = ldu; j < gemm_k_pad; ++j) Remain_L_buff[i + j*gemm_m_pad] = zero; for (i = ldu; i < gemm_k_pad; ++i) // padding B matrix for (j = 0; j < gemm_n_pad; ++j) bigU[i + j*gemm_k_pad] = zero; for (i = 0; i < ldu; ++i) for (j = ncols; j < gemm_n_pad; ++j) bigU[i + j*gemm_k_pad] = zero; } else { gemm_m_pad = Rnbrow; gemm_n_pad = ncols; gemm_k_pad = ldu; } tempu = bigU; /* buffer the entire row block U(k,:) */ /* Gather U(k,:) into buffer bigU[] to prepare for GEMM */ #ifdef _OPENMP #pragma omp parallel for firstprivate(iukp, rukp) \ private(j,tempu, jb, nsupc,ljb,segsize, lead_zero, jj, i) \ default (shared) schedule(SCHEDULE_STRATEGY) #endif for (j = jj0; j < nub; ++j) { /* jj0 starts after look-ahead window. */ if (j==jj0) tempu = bigU; //else tempu = bigU + ldu * Ublock_info[j-1].full_u_cols; else tempu = bigU + gemm_k_pad * Ublock_info[j-1].full_u_cols; /* == processing each of the remaining columns in parallel == */ #if 0 /* Can remove following call, since search was already done. */ arrive_at_ublock(j, &iukp, &rukp, &jb, &ljb, &nsupc, iukp0, rukp0, usub,perm_u, xsup, grid); #else iukp = Ublock_info[j].iukp; rukp = Ublock_info[j].rukp; jb = Ublock_info[j].jb; nsupc = SuperSize (jb ); #endif /* Copy from U(k,j) to tempu[], padding zeros. */ for (jj = iukp; jj < iukp+nsupc; ++jj) { segsize = klst - usub[jj]; if ( segsize ) { lead_zero = ldu - segsize; for (i = 0; i < lead_zero; ++i) tempu[i] = zero; //tempu += lead_zero; #if (_OPENMP>=201307) #pragma omp simd #endif for (i = 0; i < segsize; ++i) tempu[i+lead_zero] = uval[rukp+i]; rukp += segsize; tempu += gemm_k_pad; } } } /* parallel for j = jj0 .. nub */ #if 0 if (ldu==0) printf("[%d] .. k0 %d, before updating: ldu %d, Lnbrow %d, Rnbrow %d, ncols %d\n",iam,k0,ldu,Lnbrow,Rnbrow, ncols); fflush(stdout); #endif GatherMOP += 2*ldu*ncols; } /* end if (nbrow>0), end gather U blocks */ GatherUTimer += SuperLU_timer_() - tt_start; int jj_cpu = nub; /* limit between CPU and GPU */ int thread_id; /*tempv = bigV;*/ /********************** * Gather L blocks * **********************/ tt_start = SuperLU_timer_(); /* Loop through the look-ahead blocks to copy Lval into the buffer */ #ifdef _OPENMP #pragma omp parallel for private(j,jj,tempu,tempv) default (shared) #endif for (i = 0; i < lookAheadBlk; ++i) { int StRowDest, temp_nbrow; if ( i==0 ) { StRowDest = 0; temp_nbrow = lookAheadFullRow[0]; } else { StRowDest = lookAheadFullRow[i-1]; temp_nbrow = lookAheadFullRow[i]-lookAheadFullRow[i-1]; } int StRowSource = lookAheadStRow[i]; /* Now copying one block into L lookahead buffer */ /* #pragma omp parallel for (gives slow down) */ // for (int j = 0; j < knsupc; ++j) { for (j = knsupc-ldu; j < knsupc; ++j) { /* skip leading columns corresponding to zero U rows */ #if 1 /* Better let compiler generate memcpy or vectorized code. */ //tempu = &lookAhead_L_buff[StRowDest + j*LDlookAhead_LBuff]; //tempu = &lookAhead_L_buff[StRowDest + j * Lnbrow]; tempu = &lookAhead_L_buff[StRowDest + (j - (knsupc-ldu)) * Lnbrow]; tempv = &lusup[luptr+j*nsupr + StRowSource]; #if (_OPENMP>=201307) #pragma omp simd #endif for (jj = 0; jj < temp_nbrow; ++jj) tempu[jj] = tempv[jj]; #else //memcpy(&lookAhead_L_buff[StRowDest + j*LDlookAhead_LBuff], memcpy(&lookAhead_L_buff[StRowDest + (j - (knsupc-ldu)) * Lnbrow], &lusup[luptr+j*nsupr + StRowSource], temp_nbrow * sizeof(doublecomplex) ); #endif } /* end for j ... */ } /* parallel for i ... gather Lval blocks from lookahead window */ /* Loop through the remaining blocks to copy Lval into the buffer */ #ifdef _OPENMP #pragma omp parallel for private(i,j,jj,tempu,tempv) default (shared) \ schedule(SCHEDULE_STRATEGY) #endif for (int i = 0; i < RemainBlk; ++i) { int StRowDest, temp_nbrow; if ( i==0 ) { StRowDest = 0; temp_nbrow = Remain_info[0].FullRow; } else { StRowDest = Remain_info[i-1].FullRow; temp_nbrow = Remain_info[i].FullRow - Remain_info[i-1].FullRow; } int StRowSource = RemainStRow[i]; /* Now copying a block into L remaining buffer */ // #pragma omp parallel for (gives slow down) // for (int j = 0; j < knsupc; ++j) { for (int j = knsupc-ldu; j < knsupc; ++j) { // printf("StRowDest %d Rnbrow %d StRowSource %d \n", StRowDest,Rnbrow ,StRowSource); #if 1 /* Better let compiler generate memcpy or vectorized code. */ //tempu = &Remain_L_buff[StRowDest + j*LDRemain_LBuff]; //tempu = &Remain_L_buff[StRowDest + (j - (knsupc-ldu)) * Rnbrow]; tempu = &Remain_L_buff[StRowDest + (j - (knsupc-ldu)) * gemm_m_pad]; tempv = &lusup[luptr + j*nsupr + StRowSource]; #if (_OPENMP>=201307) #pragma omp simd #endif for (jj = 0; jj < temp_nbrow; ++jj) tempu[jj] = tempv[jj]; #else //memcpy(&Remain_L_buff[StRowDest + j*LDRemain_LBuff], memcpy(&Remain_L_buff[StRowDest + (j - (knsupc-ldu)) * gemm_m_pad], &lusup[luptr+j*nsupr + StRowSource], temp_nbrow * sizeof(doublecomplex) ); #endif } /* end for j ... */ } /* parallel for i ... copy Lval into the remaining buffer */ tt_end = SuperLU_timer_(); GatherLTimer += tt_end - tt_start; /************************************************************************* * Perform GEMM (look-ahead L part, and remain L part) followed by Scatter *************************************************************************/ tempu = bigU; /* setting to the start of padded U(k,:) */ if ( Lnbrow>0 && ldu>0 && ncols>0 ) { /* Both L(:,k) and U(k,:) nonempty */ /*************************************************************** * Updating blocks in look-ahead window of the LU(look-ahead-rows,:) ***************************************************************/ /* Count flops for total GEMM calls */ ncols = Ublock_info[nub-1].full_u_cols; flops_t flps = 8.0 * (flops_t)Lnbrow * ldu * ncols; LookAheadScatterMOP += 3 * Lnbrow * ncols; /* scatter-add */ schur_flop_counter += flps; stat->ops[FACT] += flps; LookAheadGEMMFlOp += flps; #ifdef _OPENMP #pragma omp parallel default (shared) private(thread_id) { thread_id = omp_get_thread_num(); /* Ideally, should organize the loop as: for (j = 0; j < nub; ++j) { for (lb = 0; lb < lookAheadBlk; ++lb) { L(lb,k) X U(k,j) -> tempv[] } } But now, we use collapsed loop to achieve more parallelism. Total number of block updates is: (# of lookAheadBlk in L(:,k)) X (# of blocks in U(k,:)) */ int i = sizeof(int); int* indirect_thread = indirect + (ldt + CACHELINE/i) * thread_id; int* indirect2_thread = indirect2 + (ldt + CACHELINE/i) * thread_id; #pragma omp for \ private (nsupc,ljb,lptr,ib,temp_nbrow,cum_nrow) \ schedule(dynamic) #else /* not use _OPENMP */ thread_id = 0; int* indirect_thread = indirect; int* indirect2_thread = indirect2; #endif /* Each thread is assigned one loop index ij, responsible for block update L(lb,k) * U(k,j) -> tempv[]. */ for (int ij = 0; ij < lookAheadBlk*(nub-jj0); ++ij) { /* jj0 starts after look-ahead window. */ int j = ij/lookAheadBlk + jj0; int lb = ij%lookAheadBlk; /* Getting U block U(k,j) information */ /* unsigned long long ut_start, ut_end; */ int_t rukp = Ublock_info[j].rukp; int_t iukp = Ublock_info[j].iukp; int jb = Ublock_info[j].jb; int nsupc = SuperSize(jb); int ljb = LBj (jb, grid); /* destination column block */ int st_col; int ncols; /* Local variable counts only columns in the block */ if ( j > jj0 ) { /* jj0 starts after look-ahead window. */ ncols = Ublock_info[j].full_u_cols-Ublock_info[j-1].full_u_cols; st_col = Ublock_info[j-1].full_u_cols; } else { ncols = Ublock_info[j].full_u_cols; st_col = 0; } /* Getting L block L(i,k) information */ int_t lptr = lookAhead_lptr[lb]; int ib = lookAhead_ib[lb]; int temp_nbrow = lsub[lptr+1]; lptr += LB_DESCRIPTOR; int cum_nrow = (lb==0 ? 0 : lookAheadFullRow[lb-1]); /* Block-by-block GEMM in look-ahead window */ #if 0 i = sizeof(doublecomplex); doublecomplex* tempv1 = bigV + thread_id * (ldt*ldt + CACHELINE/i); #else doublecomplex* tempv1 = bigV + thread_id * (ldt*ldt); #endif #if ( PRNTlevel>= 1) if (thread_id == 0) tt_start = SuperLU_timer_(); gemm_max_m = SUPERLU_MAX(gemm_max_m, temp_nbrow); gemm_max_n = SUPERLU_MAX(gemm_max_n, ncols); gemm_max_k = SUPERLU_MAX(gemm_max_k, ldu); #endif #if defined (USE_VENDOR_BLAS) zgemm_("N", "N", &temp_nbrow, &ncols, &ldu, &alpha, //&lookAhead_L_buff[(knsupc-ldu)*Lnbrow+cum_nrow], &Lnbrow, &lookAhead_L_buff[cum_nrow], &Lnbrow, &tempu[st_col*ldu], &ldu, &beta, tempv1, &temp_nbrow, 1, 1); #else zgemm_("N", "N", &temp_nbrow, &ncols, &ldu, &alpha, //&lookAhead_L_buff[(knsupc-ldu)*Lnbrow+cum_nrow], &Lnbrow, &lookAhead_L_buff[cum_nrow], &Lnbrow, &tempu[st_col*ldu], &ldu, &beta, tempv1, &temp_nbrow); #endif #if (PRNTlevel>=1 ) if (thread_id == 0) { tt_end = SuperLU_timer_(); LookAheadGEMMTimer += tt_end - tt_start; tt_start = tt_end; } #endif if ( ib < jb ) { zscatter_u ( ib, jb, nsupc, iukp, xsup, klst, temp_nbrow, lptr, temp_nbrow, lsub, usub, tempv1, Ufstnz_br_ptr, Unzval_br_ptr, grid ); } else { #if 0 //#ifdef USE_VTUNE __SSC_MARK(0x111);// start SDE tracing, note uses 2 underscores __itt_resume(); // start VTune, again use 2 underscores #endif zscatter_l ( ib, ljb, nsupc, iukp, xsup, klst, temp_nbrow, lptr, temp_nbrow, usub, lsub, tempv1, indirect_thread, indirect2_thread, Lrowind_bc_ptr, Lnzval_bc_ptr, grid ); #if 0 //#ifdef USE_VTUNE __itt_pause(); // stop VTune __SSC_MARK(0x222); // stop SDE tracing #endif } #if ( PRNTlevel>=1 ) if (thread_id == 0) LookAheadScatterTimer += SuperLU_timer_() - tt_start; #endif } /* end omp for ij = ... */ #ifdef _OPENMP } /* end omp parallel */ #endif } /* end if Lnbrow>0 ... look-ahead GEMM and scatter */ /*************************************************************** * Updating remaining rows and columns on CPU. ***************************************************************/ ncols = jj_cpu==0 ? 0 : Ublock_info[jj_cpu-1].full_u_cols; if ( Rnbrow>0 && ldu>0 ) { /* There are still blocks remaining ... */ double flps = 8.0 * (double)Rnbrow * ldu * ncols; schur_flop_counter += flps; stat->ops[FACT] += flps; #if ( PRNTlevel>=1 ) RemainGEMM_flops += flps; gemm_max_m = SUPERLU_MAX(gemm_max_m, Rnbrow); gemm_max_n = SUPERLU_MAX(gemm_max_n, ncols); gemm_max_k = SUPERLU_MAX(gemm_max_k, ldu); tt_start = SuperLU_timer_(); /* printf("[%d] .. k0 %d, before large GEMM: %d-%d-%d, RemainBlk %d\n", iam, k0,Rnbrow,ldu,ncols,RemainBlk); fflush(stdout); assert( Rnbrow*ncols < bigv_size ); */ #endif /* calling aggregated large GEMM, result stored in bigV[]. */ #if defined (USE_VENDOR_BLAS) //zgemm_("N", "N", &Rnbrow, &ncols, &ldu, &alpha, zgemm_("N", "N", &gemm_m_pad, &gemm_n_pad, &gemm_k_pad, &alpha, //&Remain_L_buff[(knsupc-ldu)*Rnbrow], &Rnbrow, &Remain_L_buff[0], &gemm_m_pad, &bigU[0], &gemm_k_pad, &beta, bigV, &gemm_m_pad, 1, 1); #else //zgemm_("N", "N", &Rnbrow, &ncols, &ldu, &alpha, zgemm_("N", "N", &gemm_m_pad, &gemm_n_pad, &gemm_k_pad, &alpha, //&Remain_L_buff[(knsupc-ldu)*Rnbrow], &Rnbrow, &Remain_L_buff[0], &gemm_m_pad, &bigU[0], &gemm_k_pad, &beta, bigV, &gemm_m_pad); #endif #if ( PRNTlevel>=1 ) tt_end = SuperLU_timer_(); RemainGEMMTimer += tt_end - tt_start; #if ( PROFlevel>=1 ) //fprintf(fgemm, "%8d%8d%8d %16.8e\n", Rnbrow, ncols, ldu, // (tt_end - tt_start)*1e6); // time in microsecond //fflush(fgemm); gemm_stats[gemm_count].m = Rnbrow; gemm_stats[gemm_count].n = ncols; gemm_stats[gemm_count].k = ldu; gemm_stats[gemm_count++].microseconds = (tt_end - tt_start) * 1e6; #endif tt_start = SuperLU_timer_(); #endif #ifdef USE_VTUNE __SSC_MARK(0x111);// start SDE tracing, note uses 2 underscores __itt_resume(); // start VTune, again use 2 underscores #endif /* Scatter into destination block-by-block. */ #ifdef _OPENMP #pragma omp parallel default(shared) private(thread_id) { thread_id = omp_get_thread_num(); /* Ideally, should organize the loop as: for (j = 0; j < jj_cpu; ++j) { for (lb = 0; lb < RemainBlk; ++lb) { L(lb,k) X U(k,j) -> tempv[] } } But now, we use collapsed loop to achieve more parallelism. Total number of block updates is: (# of RemainBlk in L(:,k)) X (# of blocks in U(k,:)) */ int i = sizeof(int); int* indirect_thread = indirect + (ldt + CACHELINE/i) * thread_id; int* indirect2_thread = indirect2 + (ldt + CACHELINE/i) * thread_id; #pragma omp for \ private (j,lb,rukp,iukp,jb,nsupc,ljb,lptr,ib,temp_nbrow,cum_nrow) \ schedule(dynamic) #else /* not use _OPENMP */ thread_id = 0; int* indirect_thread = indirect; int* indirect2_thread = indirect2; #endif /* Each thread is assigned one loop index ij, responsible for block update L(lb,k) * U(k,j) -> tempv[]. */ for (int ij = 0; ij < RemainBlk*(jj_cpu-jj0); ++ij) { /* jj_cpu := nub, jj0 starts after look-ahead window. */ int j = ij / RemainBlk + jj0; /* j-th block in U panel */ int lb = ij % RemainBlk; /* lb-th block in L panel */ /* Getting U block U(k,j) information */ /* unsigned long long ut_start, ut_end; */ int_t rukp = Ublock_info[j].rukp; int_t iukp = Ublock_info[j].iukp; int jb = Ublock_info[j].jb; int nsupc = SuperSize(jb); int ljb = LBj (jb, grid); int st_col; int ncols; if ( j>jj0 ) { ncols = Ublock_info[j].full_u_cols - Ublock_info[j-1].full_u_cols; st_col = Ublock_info[j-1].full_u_cols; } else { ncols = Ublock_info[j].full_u_cols; st_col = 0; } /* Getting L block L(i,k) information */ int_t lptr = Remain_info[lb].lptr; int ib = Remain_info[lb].ib; int temp_nbrow = lsub[lptr+1]; lptr += LB_DESCRIPTOR; int cum_nrow = (lb==0 ? 0 : Remain_info[lb-1].FullRow); /* tempv1 points to block(i,j) in bigV : LDA == Rnbrow */ //double* tempv1 = bigV + (st_col * Rnbrow + cum_nrow); Sherry doublecomplex* tempv1 = bigV + (st_col * gemm_m_pad + cum_nrow); /* Sherry */ // printf("[%d] .. before scatter: ib %d, jb %d, temp_nbrow %d, Rnbrow %d\n", iam, ib, jb, temp_nbrow, Rnbrow); fflush(stdout); /* Now scattering the block */ if ( ib < jb ) { zscatter_u ( ib, jb, nsupc, iukp, xsup, //klst, Rnbrow, /*** klst, temp_nbrow, Sherry */ klst, gemm_m_pad, /*** klst, temp_nbrow, Sherry */ lptr, temp_nbrow, /* row dimension of the block */ lsub, usub, tempv1, Ufstnz_br_ptr, Unzval_br_ptr, grid ); } else { zscatter_l( ib, ljb, nsupc, iukp, xsup, //klst, temp_nbrow, Sherry klst, gemm_m_pad, /*** temp_nbrow, Sherry */ lptr, temp_nbrow, /* row dimension of the block */ usub, lsub, tempv1, indirect_thread, indirect2_thread, Lrowind_bc_ptr,Lnzval_bc_ptr, grid ); } } /* end omp for (int ij =...) */ #ifdef _OPENMP } /* end omp parallel region */ #endif #if ( PRNTlevel>=1 ) RemainScatterTimer += SuperLU_timer_() - tt_start; #endif #ifdef USE_VTUNE __itt_pause(); // stop VTune __SSC_MARK(0x222); // stop SDE tracing #endif } /* end if Rnbrow>0 ... update remaining block */ } /* end if L(:,k) and U(k,:) are not empty */
tree.h
/*! * Copyright (c) 2016 Microsoft Corporation. All rights reserved. * Licensed under the MIT License. See LICENSE file in the project root for license information. */ #ifndef LIGHTGBM_TREE_H_ #define LIGHTGBM_TREE_H_ #include <LightGBM/dataset.h> #include <LightGBM/meta.h> #include <string> #include <map> #include <memory> #include <unordered_map> #include <vector> namespace LightGBM { #define kCategoricalMask (1) #define kDefaultLeftMask (2) /*! * \brief Tree model */ class Tree { public: /*! * \brief Constructor * \param max_leaves The number of max leaves */ explicit Tree(int max_leaves); /*! * \brief Constructor, from a string * \param str Model string * \param used_len used count of str */ Tree(const char* str, size_t* used_len); ~Tree(); /*! * \brief Performing a split on tree leaves. * \param leaf Index of leaf to be split * \param feature Index of feature; the converted index after removing useless features * \param real_feature Index of feature, the original index on data * \param threshold_bin Threshold(bin) of split * \param threshold_double Threshold on feature value * \param left_value Model Left child output * \param right_value Model Right child output * \param left_cnt Count of left child * \param right_cnt Count of right child * \param left_weight Weight of left child * \param right_weight Weight of right child * \param gain Split gain * \param missing_type missing type * \param default_left default direction for missing value * \return The index of new leaf. */ int Split(int leaf, int feature, int real_feature, uint32_t threshold_bin, double threshold_double, double left_value, double right_value, int left_cnt, int right_cnt, double left_weight, double right_weight, float gain, MissingType missing_type, bool default_left); /*! * \brief Performing a split on tree leaves, with categorical feature * \param leaf Index of leaf to be split * \param feature Index of feature; the converted index after removing useless features * \param real_feature Index of feature, the original index on data * \param threshold_bin Threshold(bin) of split, use bitset to represent * \param num_threshold_bin size of threshold_bin * \param threshold Thresholds of real feature value, use bitset to represent * \param num_threshold size of threshold * \param left_value Model Left child output * \param right_value Model Right child output * \param left_cnt Count of left child * \param right_cnt Count of right child * \param left_weight Weight of left child * \param right_weight Weight of right child * \param gain Split gain * \return The index of new leaf. */ int SplitCategorical(int leaf, int feature, int real_feature, const uint32_t* threshold_bin, int num_threshold_bin, const uint32_t* threshold, int num_threshold, double left_value, double right_value, int left_cnt, int right_cnt, double left_weight, double right_weight, float gain, MissingType missing_type); /*! \brief Get the output of one leaf */ inline double LeafOutput(int leaf) const { return leaf_value_[leaf]; } /*! \brief Set the output of one leaf */ inline void SetLeafOutput(int leaf, double output) { leaf_value_[leaf] = MaybeRoundToZero(output); } /*! * \brief Adding prediction value of this tree model to scores * \param data The dataset * \param num_data Number of total data * \param score Will add prediction to score */ void AddPredictionToScore(const Dataset* data, data_size_t num_data, double* score) const; /*! * \brief Adding prediction value of this tree model to scores * \param data The dataset * \param used_data_indices Indices of used data * \param num_data Number of total data * \param score Will add prediction to score */ void AddPredictionToScore(const Dataset* data, const data_size_t* used_data_indices, data_size_t num_data, double* score) const; /*! * \brief Get upper bound leaf value of this tree model */ double GetUpperBoundValue() const; /*! * \brief Get lower bound leaf value of this tree model */ double GetLowerBoundValue() const; /*! * \brief Prediction on one record * \param feature_values Feature value of this record * \return Prediction result */ inline double Predict(const double* feature_values) const; inline double PredictByMap(const std::unordered_map<int, double>& feature_values) const; inline int PredictLeafIndex(const double* feature_values) const; inline int PredictLeafIndexByMap(const std::unordered_map<int, double>& feature_values) const; inline void PredictContrib(const double* feature_values, int num_features, double* output); /*! \brief Get Number of leaves*/ inline int num_leaves() const { return num_leaves_; } /*! \brief Get depth of specific leaf*/ inline int leaf_depth(int leaf_idx) const { return leaf_depth_[leaf_idx]; } /*! \brief Get feature of specific split*/ inline int split_feature(int split_idx) const { return split_feature_[split_idx]; } inline double split_gain(int split_idx) const { return split_gain_[split_idx]; } /*! \brief Get the number of data points that fall at or below this node*/ inline int data_count(int node) const { return node >= 0 ? internal_count_[node] : leaf_count_[~node]; } /*! * \brief Shrinkage for the tree's output * shrinkage rate (a.k.a learning rate) is used to tune the training process * \param rate The factor of shrinkage */ inline void Shrinkage(double rate) { #pragma omp parallel for schedule(static, 1024) if (num_leaves_ >= 2048) for (int i = 0; i < num_leaves_ - 1; ++i) { leaf_value_[i] = MaybeRoundToZero(leaf_value_[i] * rate); internal_value_[i] = MaybeRoundToZero(internal_value_[i] * rate); } leaf_value_[num_leaves_ - 1] = MaybeRoundToZero(leaf_value_[num_leaves_ - 1] * rate); shrinkage_ *= rate; } inline double shrinkage() const { return shrinkage_; } inline void AddBias(double val) { #pragma omp parallel for schedule(static, 1024) if (num_leaves_ >= 2048) for (int i = 0; i < num_leaves_ - 1; ++i) { leaf_value_[i] = MaybeRoundToZero(leaf_value_[i] + val); internal_value_[i] = MaybeRoundToZero(internal_value_[i] + val); } leaf_value_[num_leaves_ - 1] = MaybeRoundToZero(leaf_value_[num_leaves_ - 1] + val); // force to 1.0 shrinkage_ = 1.0f; } inline void AsConstantTree(double val) { num_leaves_ = 1; shrinkage_ = 1.0f; leaf_value_[0] = val; } /*! \brief Serialize this object to string*/ std::string ToString() const; /*! \brief Serialize this object to json*/ std::string ToJSON() const; /*! \brief Serialize this object to if-else statement*/ std::string ToIfElse(int index, bool predict_leaf_index) const; inline static bool IsZero(double fval) { if (fval > -kZeroThreshold && fval <= kZeroThreshold) { return true; } else { return false; } } inline static double MaybeRoundToZero(double fval) { if (fval > -kZeroThreshold && fval <= kZeroThreshold) { return 0; } else { return fval; } } inline static bool GetDecisionType(int8_t decision_type, int8_t mask) { return (decision_type & mask) > 0; } inline static void SetDecisionType(int8_t* decision_type, bool input, int8_t mask) { if (input) { (*decision_type) |= mask; } else { (*decision_type) &= (127 - mask); } } inline static int8_t GetMissingType(int8_t decision_type) { return (decision_type >> 2) & 3; } inline static void SetMissingType(int8_t* decision_type, int8_t input) { (*decision_type) &= 3; (*decision_type) |= (input << 2); } void RecomputeMaxDepth(); int NextLeafId() const { return num_leaves_; } private: std::string NumericalDecisionIfElse(int node) const; std::string CategoricalDecisionIfElse(int node) const; inline int NumericalDecision(double fval, int node) const { uint8_t missing_type = GetMissingType(decision_type_[node]); if (std::isnan(fval)) { if (missing_type != 2) { fval = 0.0f; } } if ((missing_type == 1 && IsZero(fval)) || (missing_type == 2 && std::isnan(fval))) { if (GetDecisionType(decision_type_[node], kDefaultLeftMask)) { return left_child_[node]; } else { return right_child_[node]; } } if (fval <= threshold_[node]) { return left_child_[node]; } else { return right_child_[node]; } } inline int NumericalDecisionInner(uint32_t fval, int node, uint32_t default_bin, uint32_t max_bin) const { uint8_t missing_type = GetMissingType(decision_type_[node]); if ((missing_type == 1 && fval == default_bin) || (missing_type == 2 && fval == max_bin)) { if (GetDecisionType(decision_type_[node], kDefaultLeftMask)) { return left_child_[node]; } else { return right_child_[node]; } } if (fval <= threshold_in_bin_[node]) { return left_child_[node]; } else { return right_child_[node]; } } inline int CategoricalDecision(double fval, int node) const { uint8_t missing_type = GetMissingType(decision_type_[node]); int int_fval = static_cast<int>(fval); if (int_fval < 0) { return right_child_[node];; } else if (std::isnan(fval)) { // NaN is always in the right if (missing_type == 2) { return right_child_[node]; } int_fval = 0; } int cat_idx = static_cast<int>(threshold_[node]); if (Common::FindInBitset(cat_threshold_.data() + cat_boundaries_[cat_idx], cat_boundaries_[cat_idx + 1] - cat_boundaries_[cat_idx], int_fval)) { return left_child_[node]; } return right_child_[node]; } inline int CategoricalDecisionInner(uint32_t fval, int node) const { int cat_idx = static_cast<int>(threshold_in_bin_[node]); if (Common::FindInBitset(cat_threshold_inner_.data() + cat_boundaries_inner_[cat_idx], cat_boundaries_inner_[cat_idx + 1] - cat_boundaries_inner_[cat_idx], fval)) { return left_child_[node]; } return right_child_[node]; } inline int Decision(double fval, int node) const { if (GetDecisionType(decision_type_[node], kCategoricalMask)) { return CategoricalDecision(fval, node); } else { return NumericalDecision(fval, node); } } inline int DecisionInner(uint32_t fval, int node, uint32_t default_bin, uint32_t max_bin) const { if (GetDecisionType(decision_type_[node], kCategoricalMask)) { return CategoricalDecisionInner(fval, node); } else { return NumericalDecisionInner(fval, node, default_bin, max_bin); } } inline void Split(int leaf, int feature, int real_feature, double left_value, double right_value, int left_cnt, int right_cnt, double left_weight, double right_weight, float gain); /*! * \brief Find leaf index of which record belongs by features * \param feature_values Feature value of this record * \return Leaf index */ inline int GetLeaf(const double* feature_values) const; inline int GetLeafByMap(const std::unordered_map<int, double>& feature_values) const; /*! \brief Serialize one node to json*/ std::string NodeToJSON(int index) const; /*! \brief Serialize one node to if-else statement*/ std::string NodeToIfElse(int index, bool predict_leaf_index) const; std::string NodeToIfElseByMap(int index, bool predict_leaf_index) const; double ExpectedValue() const; /*! \brief This is used fill in leaf_depth_ after reloading a model*/ inline void RecomputeLeafDepths(int node = 0, int depth = 0); /*! * \brief Used by TreeSHAP for data we keep about our decision path */ struct PathElement { int feature_index; double zero_fraction; double one_fraction; // note that pweight is included for convenience and is not tied with the other attributes, // the pweight of the i'th path element is the permutation weight of paths with i-1 ones in them double pweight; PathElement() {} PathElement(int i, double z, double o, double w) : feature_index(i), zero_fraction(z), one_fraction(o), pweight(w) {} }; /*! \brief Polynomial time algorithm for SHAP values (arXiv:1706.06060)*/ void TreeSHAP(const double *feature_values, double *phi, int node, int unique_depth, PathElement *parent_unique_path, double parent_zero_fraction, double parent_one_fraction, int parent_feature_index) const; /*! \brief Extend our decision path with a fraction of one and zero extensions for TreeSHAP*/ static void ExtendPath(PathElement *unique_path, int unique_depth, double zero_fraction, double one_fraction, int feature_index); /*! \brief Undo a previous extension of the decision path for TreeSHAP*/ static void UnwindPath(PathElement *unique_path, int unique_depth, int path_index); /*! determine what the total permutation weight would be if we unwound a previous extension in the decision path*/ static double UnwoundPathSum(const PathElement *unique_path, int unique_depth, int path_index); /*! \brief Number of max leaves*/ int max_leaves_; /*! \brief Number of current leaves*/ int num_leaves_; // following values used for non-leaf node /*! \brief A non-leaf node's left child */ std::vector<int> left_child_; /*! \brief A non-leaf node's right child */ std::vector<int> right_child_; /*! \brief A non-leaf node's split feature */ std::vector<int> split_feature_inner_; /*! \brief A non-leaf node's split feature, the original index */ std::vector<int> split_feature_; /*! \brief A non-leaf node's split threshold in bin */ std::vector<uint32_t> threshold_in_bin_; /*! \brief A non-leaf node's split threshold in feature value */ std::vector<double> threshold_; int num_cat_; std::vector<int> cat_boundaries_inner_; std::vector<uint32_t> cat_threshold_inner_; std::vector<int> cat_boundaries_; std::vector<uint32_t> cat_threshold_; /*! \brief Store the information for categorical feature handle and missing value handle. */ std::vector<int8_t> decision_type_; /*! \brief A non-leaf node's split gain */ std::vector<float> split_gain_; // used for leaf node /*! \brief The parent of leaf */ std::vector<int> leaf_parent_; /*! \brief Output of leaves */ std::vector<double> leaf_value_; /*! \brief weight of leaves */ std::vector<double> leaf_weight_; /*! \brief DataCount of leaves */ std::vector<int> leaf_count_; /*! \brief Output of non-leaf nodes */ std::vector<double> internal_value_; /*! \brief weight of non-leaf nodes */ std::vector<double> internal_weight_; /*! \brief DataCount of non-leaf nodes */ std::vector<int> internal_count_; /*! \brief Depth for leaves */ std::vector<int> leaf_depth_; double shrinkage_; int max_depth_; }; inline void Tree::Split(int leaf, int feature, int real_feature, double left_value, double right_value, int left_cnt, int right_cnt, double left_weight, double right_weight, float gain) { int new_node_idx = num_leaves_ - 1; // update parent info int parent = leaf_parent_[leaf]; if (parent >= 0) { // if cur node is left child if (left_child_[parent] == ~leaf) { left_child_[parent] = new_node_idx; } else { right_child_[parent] = new_node_idx; } } // add new node split_feature_inner_[new_node_idx] = feature; split_feature_[new_node_idx] = real_feature; split_gain_[new_node_idx] = gain; // add two new leaves left_child_[new_node_idx] = ~leaf; right_child_[new_node_idx] = ~num_leaves_; // update new leaves leaf_parent_[leaf] = new_node_idx; leaf_parent_[num_leaves_] = new_node_idx; // save current leaf value to internal node before change internal_weight_[new_node_idx] = leaf_weight_[leaf]; internal_value_[new_node_idx] = leaf_value_[leaf]; internal_count_[new_node_idx] = left_cnt + right_cnt; leaf_value_[leaf] = std::isnan(left_value) ? 0.0f : left_value; leaf_weight_[leaf] = left_weight; leaf_count_[leaf] = left_cnt; leaf_value_[num_leaves_] = std::isnan(right_value) ? 0.0f : right_value; leaf_weight_[num_leaves_] = right_weight; leaf_count_[num_leaves_] = right_cnt; // update leaf depth leaf_depth_[num_leaves_] = leaf_depth_[leaf] + 1; leaf_depth_[leaf]++; } inline double Tree::Predict(const double* feature_values) const { if (num_leaves_ > 1) { int leaf = GetLeaf(feature_values); return LeafOutput(leaf); } else { return leaf_value_[0]; } } inline double Tree::PredictByMap(const std::unordered_map<int, double>& feature_values) const { if (num_leaves_ > 1) { int leaf = GetLeafByMap(feature_values); return LeafOutput(leaf); } else { return leaf_value_[0]; } } inline int Tree::PredictLeafIndex(const double* feature_values) const { if (num_leaves_ > 1) { int leaf = GetLeaf(feature_values); return leaf; } else { return 0; } } inline int Tree::PredictLeafIndexByMap(const std::unordered_map<int, double>& feature_values) const { if (num_leaves_ > 1) { int leaf = GetLeafByMap(feature_values); return leaf; } else { return 0; } } inline void Tree::PredictContrib(const double* feature_values, int num_features, double* output) { output[num_features] += ExpectedValue(); // Run the recursion with preallocated space for the unique path data if (num_leaves_ > 1) { CHECK_GE(max_depth_, 0); const int max_path_len = max_depth_ + 1; std::vector<PathElement> unique_path_data(max_path_len*(max_path_len + 1) / 2); TreeSHAP(feature_values, output, 0, 0, unique_path_data.data(), 1, 1, -1); } } inline void Tree::RecomputeLeafDepths(int node, int depth) { if (node == 0) leaf_depth_.resize(num_leaves()); if (node < 0) { leaf_depth_[~node] = depth; } else { RecomputeLeafDepths(left_child_[node], depth + 1); RecomputeLeafDepths(right_child_[node], depth + 1); } } inline int Tree::GetLeaf(const double* feature_values) const { int node = 0; if (num_cat_ > 0) { while (node >= 0) { node = Decision(feature_values[split_feature_[node]], node); } } else { while (node >= 0) { node = NumericalDecision(feature_values[split_feature_[node]], node); } } return ~node; } inline int Tree::GetLeafByMap(const std::unordered_map<int, double>& feature_values) const { int node = 0; if (num_cat_ > 0) { while (node >= 0) { node = Decision(feature_values.count(split_feature_[node]) > 0 ? feature_values.at(split_feature_[node]) : 0.0f, node); } } else { while (node >= 0) { node = NumericalDecision(feature_values.count(split_feature_[node]) > 0 ? feature_values.at(split_feature_[node]) : 0.0f, node); } } return ~node; } } // namespace LightGBM #endif // LightGBM_TREE_H_
alloc_fail.c
// RUN: %libomptarget-compile-aarch64-unknown-linux-gnu // RUN: %libomptarget-run-fail-aarch64-unknown-linux-gnu 2>&1 \ // RUN: | %fcheck-aarch64-unknown-linux-gnu // RUN: %libomptarget-compile-powerpc64-ibm-linux-gnu // RUN: %libomptarget-run-fail-powerpc64-ibm-linux-gnu 2>&1 \ // RUN: | %fcheck-powerpc64-ibm-linux-gnu // RUN: %libomptarget-compile-powerpc64le-ibm-linux-gnu // RUN: %libomptarget-run-fail-powerpc64le-ibm-linux-gnu 2>&1 \ // RUN: | %fcheck-powerpc64le-ibm-linux-gnu // RUN: %libomptarget-compile-x86_64-pc-linux-gnu // RUN: %libomptarget-run-fail-x86_64-pc-linux-gnu 2>&1 \ // RUN: | %fcheck-x86_64-pc-linux-gnu // RUN: %libomptarget-compile-nvptx64-nvidia-cuda // RUN: %libomptarget-run-fail-nvptx64-nvidia-cuda 2>&1 \ // RUN: | %fcheck-nvptx64-nvidia-cuda // CHECK: Libomptarget fatal error 1: failure of target construct while offloading is mandatory int main() { int arr[4] = {0, 1, 2, 3}; #pragma omp target data map(alloc: arr[0:2]) #pragma omp target data map(alloc: arr[1:2]) ; return 0; }
Network.h
#pragma once /** * Defines layer and network data types for the neural network. * Note that the result arrays returned(NUM_TYPE* type return values) must not be modified. **/ #include "Config.h" #include "Activation.h" #include "Layer.h" #include "Dataset.h" #include <cstring> #include <cassert> #include <iostream> namespace nn { /** * The neural network. * Composed of the layers, this class contains the operation for them including train and test(predict). */ class Network { public: class Builder { public: /** * Sets input size of the network. * This needs to call first, if not `load()`ing a full network; calling this method clears any layers added before. * @returns this, for chaining. */ Builder& input(unsigned int input_size) { delete_list(true); this->input_size = input_size; return *this; } /** * Adds a layer with the output neurons given. * Input size of the layer is set as the previous layer's output or `input()` size. * The activation function is Sigmoid by default, this can be modified by specifying a function in the template. * @throws std::invalid_argument when no `input()` is specified before. * @returns this, for chaining. */ template<typename A = activation::Sigmoid> Builder& addLayer(unsigned int neurons) { unsigned int last_size; if(tail) { last_size = tail->output_size; } else { last_size = input_size; } if(last_size == 0 || neurons == 0) { throw std::invalid_argument("Neuron count cannot be zero, maybe you missed the call to Builder::input()"); } Layer* layer = new LayerImpl<A>(last_size, neurons); layer->initialize_weights(); LayerList* list = new LayerList; list->layer = layer; list->output_size = neurons; list->next = NULL; if(tail) { tail->next = list; tail = list; } else { head = tail = list; } count++; return *this; } /** * Builds the network. * @throws std::length_error when `build()` is called with no layers added. * @returns The network built. */ Network* build() { if (count <= 0) throw std::length_error("No layers present in the network definition!"); Layer** layers = new Layer*[count]; LayerList* curr = head; for(unsigned int i = 0; i < count && curr != NULL; i++, curr = curr->next) { layers[i] = curr->layer; } Network* net = new Network(count, layers, input_size, tail->output_size); //delete this; return net; } /** * Loads a network from stream. * This method can be called alone, or with other layers before or after the `load()` call. * By combining other layers, the network can be pre-trained per each layers. * @param input The input stream to read from. * @throws std::invalid_argument if the stream is not a valid network. * @throws std::runtime_error if the activation function, read from the stream, is unknown. * @throws std::length_error if the layers cannot be connected due to the output and input size mismatch. * @returns this, for chaining. */ Builder& load(std::istream& input) { char magic[6]; input.read(magic, 5); magic[5] = '\0'; if (input.fail() || strcmp(magic, "NeNet") != 0) throw std::invalid_argument("The input is not a network save file"); int layers; input.read((char*) &layers, sizeof(layers)); NUM_TYPE* weight_buf = NULL; int buf_size = -1; for (int i = 0; i < layers; i++) { char type; input.read(&type, sizeof(type)); assert(!input.fail()); int in, out; input.read((char*) &in, sizeof(in)); assert(!input.fail()); input.read((char*) &out, sizeof(out)); assert(!input.fail()); int weight_count; input.read((char*) &weight_count, sizeof(weight_count)); assert(!input.fail()); if (weight_count > buf_size) { NUM_TYPE* newbuf = new NUM_TYPE[weight_count]; delete[] weight_buf; weight_buf = newbuf; buf_size = weight_count; } input.read((char*) weight_buf, sizeof(NUM_TYPE) * weight_count); assert(!input.fail()); Layer* layer; switch(type) { case activation::types::Sigmoid: layer = new LayerImpl<activation::Sigmoid>(in, out); break; case activation::types::Tanh: layer = new LayerImpl<activation::Tanh>(in, out); break; case activation::types::HardSigmoid: layer = new LayerImpl<activation::HardSigmoid>(in, out); break; case activation::types::ReLU: layer = new LayerImpl<activation::ReLU>(in, out); break; case activation::types::LeakyReLU: layer = new LayerImpl<activation::LeakyReLU>(in, out); break; case activation::types::ELU: layer = new LayerImpl<activation::ELU>(in, out); break; default: throw std::runtime_error("Invalid activation function type!"); } layer->load_weights(weight_buf, weight_count); LayerList* list = new LayerList; list->layer = layer; list->output_size = out; list->next = NULL; if (tail) { if (tail->output_size != in) throw std::length_error("Last layer's output size doesn't match the new layer's input size!"); tail->next = list; tail = list; } else { input_size = in; head = tail = list; } count++; } delete[] weight_buf; return *this; } Builder& popLayer() { LayerList *prev = head; assert(prev != NULL); if(prev == tail) { delete_list(true); input_size = 0; } else { while (prev->next != tail) prev = prev->next; delete prev->next->layer; delete prev->next; count--; tail = prev; } return *this; } Builder() : head(NULL), tail(NULL), input_size(0), count(0) {} ~Builder() { delete_list(); } private: struct LayerList { Layer* layer; unsigned int output_size; LayerList* next; } *head, *tail; unsigned int input_size; unsigned int count; void delete_list(bool delete_layers = false) { if(!head) return; for(LayerList *curr = head; curr != NULL;) { LayerList* next = curr->next; if(delete_layers) delete curr->layer; delete curr; curr = next; } head = tail = NULL; count = 0; } }; /** * Trains the network with the given data batch of size `n`. * TODO: implement batch weight update, instead of one update per single data entry. * @param n Number of data to read from the `data` array. * @param data Data array used to train the network. */ void train(unsigned int n, DataEntry* data) { #ifdef BATCH_TRAIN for(int i = 0; i < layer_count; i++) { layers[i]->clear_delta(); } #endif for (unsigned int i = 0; i < n; i++) { assert(data[i].data_count == inputs && data[i].label_count == outputs); /* Retrieve the result(f = output) of the layers */ results[0] = data[i].data; for (int l = 0; l < layer_count; l++) { results[l + 1] = layers[l]->forward(results[l], true); } /* Restore to pre-allocated [outputs] sized array. The pointer is changed during the backpropagation process */ NUM_TYPE* delta = delta_buf; /* Calculate delta for the output layer */ for (int j = 0; j < outputs; j++) { delta[j] = data[i].label[j] - results[layer_count][j]; } /* Backpropagate and get a new delta for the next('backward') layer. */ for (int l = layer_count - 1; l >= 0; l--) { delta = layers[l]->backward(delta); } #ifdef BATCH_TRAIN // Update after whole batch is applied } #endif /* Update weights with their optimizer */ #pragma omp parallel for for(int l = 0; l < layer_count; l++) { layers[l]->update_weights(results[l]); } #ifndef BATCH_TRAIN } #endif } /** * Predict using the given input, forward-propagated through the network. * @param data Input data. Asserts the length is `Network::inputs`. * @returns Predicted result, the length is same as `Network::outputs`. */ NUM_TYPE* predict(NUM_TYPE* data) { for(int i = 0; i < layer_count; i++) { data = layers[i]->forward(data); } return data; } ~Network() { delete[] delta_buf; delete[] results; for(int i = 0; i < layer_count; i++) { delete layers[i]; } delete[] layers; } /** * Writes the network to stream. * The saved network can be loaded by `Builder::load()`. * @param output Stream to dump this network */ void dump_network(std::ostream& output) { output.write("NeNet", 5); output.write((char*) &layer_count, sizeof(layer_count)); for (int i = 0; i < layer_count; i++) { char type = layers[i]->getActivationType(); int inputs = layers[i]->inputs; int outputs = layers[i]->outputs; output.write(&type, sizeof(type)); output.write((char*) &inputs, sizeof(inputs)); output.write((char*) &outputs, sizeof(outputs)); std::vector<NUM_TYPE> weights = layers[i]->dump_weights(); int size = weights.size(); output.write((char*) &size, sizeof(size)); output.write((char*) &weights[0], sizeof(NUM_TYPE) * size); } } const int layer_count; const int inputs, outputs; private: Layer** layers; NUM_TYPE** results; NUM_TYPE* delta_buf; Network(unsigned int layer_count, Layer** layers, unsigned int inputs, unsigned int outputs) : layers(layers), layer_count(layer_count), inputs(inputs), outputs(outputs), results(new NUM_TYPE*[layer_count + 1]), delta_buf(new NUM_TYPE[outputs]) {} }; }
Sema.h
//===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines the Sema class, which performs semantic analysis and // builds ASTs. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_SEMA_SEMA_H #define LLVM_CLANG_SEMA_SEMA_H #include "clang/AST/Attr.h" #include "clang/AST/DeclarationName.h" #include "clang/AST/Expr.h" #include "clang/AST/ExprObjC.h" #include "clang/AST/ExternalASTSource.h" #include "clang/AST/LambdaMangleContext.h" #include "clang/AST/NSAPI.h" #include "clang/AST/PrettyPrinter.h" #include "clang/AST/TypeLoc.h" #include "clang/Basic/ExpressionTraits.h" #include "clang/Basic/LangOptions.h" #include "clang/Basic/Specifiers.h" #include "clang/Basic/TemplateKinds.h" #include "clang/Basic/TypeTraits.h" #include "clang/Lex/ModuleLoader.h" #include "clang/Sema/AnalysisBasedWarnings.h" #include "clang/Sema/DeclSpec.h" #include "clang/Sema/ExternalSemaSource.h" #include "clang/Sema/IdentifierResolver.h" #include "clang/Sema/LocInfoType.h" #include "clang/Sema/ObjCMethodList.h" #include "clang/Sema/Ownership.h" #include "clang/Sema/ScopeInfo.h" #include "clang/Sema/TypoCorrection.h" #include "clang/Sema/Weak.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/Optional.h" #include "llvm/ADT/OwningPtr.h" #include "llvm/ADT/SetVector.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallVector.h" #include "llvm/MC/MCParser/MCAsmParser.h" #include <deque> #include <string> namespace llvm { class APSInt; template <typename ValueT> struct DenseMapInfo; template <typename ValueT, typename ValueInfoT> class DenseSet; class SmallBitVector; } namespace clang { class ADLResult; class ASTConsumer; class ASTContext; class ASTMutationListener; class ASTReader; class ASTWriter; class ArrayType; class AttributeList; class BlockDecl; class CapturedDecl; class CXXBasePath; class CXXBasePaths; class CXXBindTemporaryExpr; typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath; class CXXConstructorDecl; class CXXConversionDecl; class CXXDestructorDecl; class CXXFieldCollector; class CXXMemberCallExpr; class CXXMethodDecl; class CXXScopeSpec; class CXXTemporary; class CXXTryStmt; class CallExpr; class ClassTemplateDecl; class ClassTemplatePartialSpecializationDecl; class ClassTemplateSpecializationDecl; class CodeCompleteConsumer; class CodeCompletionAllocator; class CodeCompletionTUInfo; class CodeCompletionResult; class Decl; class DeclAccessPair; class DeclContext; class DeclRefExpr; class DeclaratorDecl; class DeducedTemplateArgument; class DependentDiagnostic; class DesignatedInitExpr; class Designation; class EnumConstantDecl; class Expr; class ExtVectorType; class ExternalSemaSource; class FormatAttr; class FriendDecl; class FunctionDecl; class FunctionProtoType; class FunctionTemplateDecl; class ImplicitConversionSequence; class InitListExpr; class InitializationKind; class InitializationSequence; class InitializedEntity; class IntegerLiteral; class LabelStmt; class LambdaExpr; class LangOptions; class LocalInstantiationScope; class LookupResult; class MacroInfo; class MultiLevelTemplateArgumentList; class NamedDecl; class NonNullAttr; class ObjCCategoryDecl; class ObjCCategoryImplDecl; class ObjCCompatibleAliasDecl; class ObjCContainerDecl; class ObjCImplDecl; class ObjCImplementationDecl; class ObjCInterfaceDecl; class ObjCIvarDecl; template <class T> class ObjCList; class ObjCMessageExpr; class ObjCMethodDecl; class ObjCPropertyDecl; class ObjCProtocolDecl; class OMPThreadPrivateDecl; class OverloadCandidateSet; class OverloadExpr; class ParenListExpr; class ParmVarDecl; class Preprocessor; class PseudoDestructorTypeStorage; class PseudoObjectExpr; class QualType; class StandardConversionSequence; class Stmt; class StringLiteral; class SwitchStmt; class TargetAttributesSema; class TemplateArgument; class TemplateArgumentList; class TemplateArgumentLoc; class TemplateDecl; class TemplateParameterList; class TemplatePartialOrderingContext; class TemplateTemplateParmDecl; class Token; class TypeAliasDecl; class TypedefDecl; class TypedefNameDecl; class TypeLoc; class UnqualifiedId; class UnresolvedLookupExpr; class UnresolvedMemberExpr; class UnresolvedSetImpl; class UnresolvedSetIterator; class UsingDecl; class UsingShadowDecl; class ValueDecl; class VarDecl; class VisibilityAttr; class VisibleDeclConsumer; class IndirectFieldDecl; namespace sema { class AccessedEntity; class BlockScopeInfo; class CapturedRegionScopeInfo; class CapturingScopeInfo; class CompoundScopeInfo; class DelayedDiagnostic; class DelayedDiagnosticPool; class FunctionScopeInfo; class LambdaScopeInfo; class PossiblyUnreachableDiag; class TemplateDeductionInfo; } // FIXME: No way to easily map from TemplateTypeParmTypes to // TemplateTypeParmDecls, so we have this horrible PointerUnion. typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>, SourceLocation> UnexpandedParameterPack; /// Sema - This implements semantic analysis and AST building for C. class Sema { Sema(const Sema &) LLVM_DELETED_FUNCTION; void operator=(const Sema &) LLVM_DELETED_FUNCTION; mutable const TargetAttributesSema* TheTargetAttributesSema; ///\brief Source of additional semantic information. ExternalSemaSource *ExternalSource; ///\brief Whether Sema has generated a multiplexer and has to delete it. bool isMultiplexExternalSource; static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD); static bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old, const NamedDecl *New) { // We are about to link these. It is now safe to compute the linkage of // the new decl. If the new decl has external linkage, we will // link it with the hidden decl (which also has external linkage) and // it will keep having external linkage. If it has internal linkage, we // will not link it. Since it has no previous decls, it will remain // with internal linkage. return !Old->isHidden() || New->hasExternalLinkage(); } public: typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy; typedef OpaquePtr<TemplateName> TemplateTy; typedef OpaquePtr<QualType> TypeTy; OpenCLOptions OpenCLFeatures; FPOptions FPFeatures; const LangOptions &LangOpts; Preprocessor &PP; ASTContext &Context; ASTConsumer &Consumer; DiagnosticsEngine &Diags; SourceManager &SourceMgr; /// \brief Flag indicating whether or not to collect detailed statistics. bool CollectStats; /// \brief Code-completion consumer. CodeCompleteConsumer *CodeCompleter; /// CurContext - This is the current declaration context of parsing. DeclContext *CurContext; /// \brief Generally null except when we temporarily switch decl contexts, /// like in \see ActOnObjCTemporaryExitContainerContext. DeclContext *OriginalLexicalContext; /// VAListTagName - The declaration name corresponding to __va_list_tag. /// This is used as part of a hack to omit that class from ADL results. DeclarationName VAListTagName; /// PackContext - Manages the stack for \#pragma pack. An alignment /// of 0 indicates default alignment. void *PackContext; // Really a "PragmaPackStack*" bool MSStructPragmaOn; // True when \#pragma ms_struct on /// VisContext - Manages the stack for \#pragma GCC visibility. void *VisContext; // Really a "PragmaVisStack*" /// \brief Flag indicating if Sema is building a recovery call expression. /// /// This flag is used to avoid building recovery call expressions /// if Sema is already doing so, which would cause infinite recursions. bool IsBuildingRecoveryCallExpr; /// ExprNeedsCleanups - True if the current evaluation context /// requires cleanups to be run at its conclusion. bool ExprNeedsCleanups; /// ExprCleanupObjects - This is the stack of objects requiring /// cleanup that are created by the current full expression. The /// element type here is ExprWithCleanups::Object. SmallVector<BlockDecl*, 8> ExprCleanupObjects; llvm::SmallPtrSet<Expr*, 2> MaybeODRUseExprs; /// \brief Stack containing information about each of the nested /// function, block, and method scopes that are currently active. /// /// This array is never empty. Clients should ignore the first /// element, which is used to cache a single FunctionScopeInfo /// that's used to parse every top-level function. SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes; typedef LazyVector<TypedefNameDecl *, ExternalSemaSource, &ExternalSemaSource::ReadExtVectorDecls, 2, 2> ExtVectorDeclsType; /// ExtVectorDecls - This is a list all the extended vector types. This allows /// us to associate a raw vector type with one of the ext_vector type names. /// This is only necessary for issuing pretty diagnostics. ExtVectorDeclsType ExtVectorDecls; /// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes. OwningPtr<CXXFieldCollector> FieldCollector; typedef llvm::SmallSetVector<const NamedDecl*, 16> NamedDeclSetType; /// \brief Set containing all declared private fields that are not used. NamedDeclSetType UnusedPrivateFields; typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy; /// PureVirtualClassDiagSet - a set of class declarations which we have /// emitted a list of pure virtual functions. Used to prevent emitting the /// same list more than once. OwningPtr<RecordDeclSetTy> PureVirtualClassDiagSet; /// ParsingInitForAutoVars - a set of declarations with auto types for which /// we are currently parsing the initializer. llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars; /// \brief A mapping from external names to the most recent /// locally-scoped extern "C" declaration with that name. /// /// This map contains external declarations introduced in local /// scopes, e.g., /// /// \code /// extern "C" void f() { /// void foo(int, int); /// } /// \endcode /// /// Here, the name "foo" will be associated with the declaration of /// "foo" within f. This name is not visible outside of /// "f". However, we still find it in two cases: /// /// - If we are declaring another global or extern "C" entity with /// the name "foo", we can find "foo" as a previous declaration, /// so that the types of this external declaration can be checked /// for compatibility. /// /// - If we would implicitly declare "foo" (e.g., due to a call to /// "foo" in C when no prototype or definition is visible), then /// we find this declaration of "foo" and complain that it is /// not visible. llvm::DenseMap<DeclarationName, NamedDecl *> LocallyScopedExternCDecls; /// \brief Look for a locally scoped extern "C" declaration by the given name. llvm::DenseMap<DeclarationName, NamedDecl *>::iterator findLocallyScopedExternCDecl(DeclarationName Name); typedef LazyVector<VarDecl *, ExternalSemaSource, &ExternalSemaSource::ReadTentativeDefinitions, 2, 2> TentativeDefinitionsType; /// \brief All the tentative definitions encountered in the TU. TentativeDefinitionsType TentativeDefinitions; typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2> UnusedFileScopedDeclsType; /// \brief The set of file scoped decls seen so far that have not been used /// and must warn if not used. Only contains the first declaration. UnusedFileScopedDeclsType UnusedFileScopedDecls; typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadDelegatingConstructors, 2, 2> DelegatingCtorDeclsType; /// \brief All the delegating constructors seen so far in the file, used for /// cycle detection at the end of the TU. DelegatingCtorDeclsType DelegatingCtorDecls; /// \brief All the destructors seen during a class definition that had their /// exception spec computation delayed because it depended on an unparsed /// exception spec. SmallVector<CXXDestructorDecl*, 2> DelayedDestructorExceptionSpecs; /// \brief All the overriding destructors seen during a class definition /// (there could be multiple due to nested classes) that had their exception /// spec checks delayed, plus the overridden destructor. SmallVector<std::pair<const CXXDestructorDecl*, const CXXDestructorDecl*>, 2> DelayedDestructorExceptionSpecChecks; /// \brief All the members seen during a class definition which were both /// explicitly defaulted and had explicitly-specified exception /// specifications, along with the function type containing their /// user-specified exception specification. Those exception specifications /// were overridden with the default specifications, but we still need to /// check whether they are compatible with the default specification, and /// we can't do that until the nesting set of class definitions is complete. SmallVector<std::pair<CXXMethodDecl*, const FunctionProtoType*>, 2> DelayedDefaultedMemberExceptionSpecs; /// \brief Callback to the parser to parse templated functions when needed. typedef void LateTemplateParserCB(void *P, const FunctionDecl *FD); LateTemplateParserCB *LateTemplateParser; void *OpaqueParser; void SetLateTemplateParser(LateTemplateParserCB *LTP, void *P) { LateTemplateParser = LTP; OpaqueParser = P; } class DelayedDiagnostics; class DelayedDiagnosticsState { sema::DelayedDiagnosticPool *SavedPool; friend class Sema::DelayedDiagnostics; }; typedef DelayedDiagnosticsState ParsingDeclState; typedef DelayedDiagnosticsState ProcessingContextState; /// A class which encapsulates the logic for delaying diagnostics /// during parsing and other processing. class DelayedDiagnostics { /// \brief The current pool of diagnostics into which delayed /// diagnostics should go. sema::DelayedDiagnosticPool *CurPool; public: DelayedDiagnostics() : CurPool(0) {} /// Adds a delayed diagnostic. void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h /// Determines whether diagnostics should be delayed. bool shouldDelayDiagnostics() { return CurPool != 0; } /// Returns the current delayed-diagnostics pool. sema::DelayedDiagnosticPool *getCurrentPool() const { return CurPool; } /// Enter a new scope. Access and deprecation diagnostics will be /// collected in this pool. DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = &pool; return state; } /// Leave a delayed-diagnostic state that was previously pushed. /// Do not emit any of the diagnostics. This is performed as part /// of the bookkeeping of popping a pool "properly". void popWithoutEmitting(DelayedDiagnosticsState state) { CurPool = state.SavedPool; } /// Enter a new scope where access and deprecation diagnostics are /// not delayed. DelayedDiagnosticsState pushUndelayed() { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = 0; return state; } /// Undo a previous pushUndelayed(). void popUndelayed(DelayedDiagnosticsState state) { assert(CurPool == NULL); CurPool = state.SavedPool; } } DelayedDiagnostics; /// A RAII object to temporarily push a declaration context. class ContextRAII { private: Sema &S; DeclContext *SavedContext; ProcessingContextState SavedContextState; QualType SavedCXXThisTypeOverride; public: ContextRAII(Sema &S, DeclContext *ContextToPush) : S(S), SavedContext(S.CurContext), SavedContextState(S.DelayedDiagnostics.pushUndelayed()), SavedCXXThisTypeOverride(S.CXXThisTypeOverride) { assert(ContextToPush && "pushing null context"); S.CurContext = ContextToPush; } void pop() { if (!SavedContext) return; S.CurContext = SavedContext; S.DelayedDiagnostics.popUndelayed(SavedContextState); S.CXXThisTypeOverride = SavedCXXThisTypeOverride; SavedContext = 0; } ~ContextRAII() { pop(); } }; /// \brief RAII object to handle the state changes required to synthesize /// a function body. class SynthesizedFunctionScope { Sema &S; Sema::ContextRAII SavedContext; public: SynthesizedFunctionScope(Sema &S, DeclContext *DC) : S(S), SavedContext(S, DC) { S.PushFunctionScope(); S.PushExpressionEvaluationContext(Sema::PotentiallyEvaluated); } ~SynthesizedFunctionScope() { S.PopExpressionEvaluationContext(); S.PopFunctionScopeInfo(); } }; /// WeakUndeclaredIdentifiers - Identifiers contained in /// \#pragma weak before declared. rare. may alias another /// identifier, declared or undeclared llvm::DenseMap<IdentifierInfo*,WeakInfo> WeakUndeclaredIdentifiers; /// ExtnameUndeclaredIdentifiers - Identifiers contained in /// \#pragma redefine_extname before declared. Used in Solaris system headers /// to define functions that occur in multiple standards to call the version /// in the currently selected standard. llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers; /// \brief Load weak undeclared identifiers from the external source. void LoadExternalWeakUndeclaredIdentifiers(); /// WeakTopLevelDecl - Translation-unit scoped declarations generated by /// \#pragma weak during processing of other Decls. /// I couldn't figure out a clean way to generate these in-line, so /// we store them here and handle separately -- which is a hack. /// It would be best to refactor this. SmallVector<Decl*,2> WeakTopLevelDecl; IdentifierResolver IdResolver; /// Translation Unit Scope - useful to Objective-C actions that need /// to lookup file scope declarations in the "ordinary" C decl namespace. /// For example, user-defined classes, built-in "id" type, etc. Scope *TUScope; /// \brief The C++ "std" namespace, where the standard library resides. LazyDeclPtr StdNamespace; /// \brief The C++ "std::bad_alloc" class, which is defined by the C++ /// standard library. LazyDeclPtr StdBadAlloc; /// \brief The C++ "std::initializer_list" template, which is defined in /// \<initializer_list>. ClassTemplateDecl *StdInitializerList; /// \brief The C++ "type_info" declaration, which is defined in \<typeinfo>. RecordDecl *CXXTypeInfoDecl; /// \brief The MSVC "_GUID" struct, which is defined in MSVC header files. RecordDecl *MSVCGuidDecl; /// \brief Caches identifiers/selectors for NSFoundation APIs. OwningPtr<NSAPI> NSAPIObj; /// \brief The declaration of the Objective-C NSNumber class. ObjCInterfaceDecl *NSNumberDecl; /// \brief Pointer to NSNumber type (NSNumber *). QualType NSNumberPointer; /// \brief The Objective-C NSNumber methods used to create NSNumber literals. ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods]; /// \brief The declaration of the Objective-C NSString class. ObjCInterfaceDecl *NSStringDecl; /// \brief Pointer to NSString type (NSString *). QualType NSStringPointer; /// \brief The declaration of the stringWithUTF8String: method. ObjCMethodDecl *StringWithUTF8StringMethod; /// \brief The declaration of the Objective-C NSArray class. ObjCInterfaceDecl *NSArrayDecl; /// \brief The declaration of the arrayWithObjects:count: method. ObjCMethodDecl *ArrayWithObjectsMethod; /// \brief The declaration of the Objective-C NSDictionary class. ObjCInterfaceDecl *NSDictionaryDecl; /// \brief The declaration of the dictionaryWithObjects:forKeys:count: method. ObjCMethodDecl *DictionaryWithObjectsMethod; /// \brief id<NSCopying> type. QualType QIDNSCopying; /// \brief will hold 'respondsToSelector:' Selector RespondsToSelectorSel; /// A flag to remember whether the implicit forms of operator new and delete /// have been declared. bool GlobalNewDeleteDeclared; /// \brief Describes how the expressions currently being parsed are /// evaluated at run-time, if at all. enum ExpressionEvaluationContext { /// \brief The current expression and its subexpressions occur within an /// unevaluated operand (C++11 [expr]p7), such as the subexpression of /// \c sizeof, where the type of the expression may be significant but /// no code will be generated to evaluate the value of the expression at /// run time. Unevaluated, /// \brief The current context is "potentially evaluated" in C++11 terms, /// but the expression is evaluated at compile-time (like the values of /// cases in a switch statment). ConstantEvaluated, /// \brief The current expression is potentially evaluated at run time, /// which means that code may be generated to evaluate the value of the /// expression at run time. PotentiallyEvaluated, /// \brief The current expression is potentially evaluated, but any /// declarations referenced inside that expression are only used if /// in fact the current expression is used. /// /// This value is used when parsing default function arguments, for which /// we would like to provide diagnostics (e.g., passing non-POD arguments /// through varargs) but do not want to mark declarations as "referenced" /// until the default argument is used. PotentiallyEvaluatedIfUsed }; /// \brief Data structure used to record current or nested /// expression evaluation contexts. struct ExpressionEvaluationContextRecord { /// \brief The expression evaluation context. ExpressionEvaluationContext Context; /// \brief Whether the enclosing context needed a cleanup. bool ParentNeedsCleanups; /// \brief Whether we are in a decltype expression. bool IsDecltype; /// \brief The number of active cleanup objects when we entered /// this expression evaluation context. unsigned NumCleanupObjects; llvm::SmallPtrSet<Expr*, 2> SavedMaybeODRUseExprs; /// \brief The lambdas that are present within this context, if it /// is indeed an unevaluated context. SmallVector<LambdaExpr *, 2> Lambdas; /// \brief The declaration that provides context for the lambda expression /// if the normal declaration context does not suffice, e.g., in a /// default function argument. Decl *LambdaContextDecl; /// \brief The context information used to mangle lambda expressions /// within this context. /// /// This mangling information is allocated lazily, since most contexts /// do not have lambda expressions. IntrusiveRefCntPtr<LambdaMangleContext> LambdaMangle; /// \brief If we are processing a decltype type, a set of call expressions /// for which we have deferred checking the completeness of the return type. SmallVector<CallExpr *, 8> DelayedDecltypeCalls; /// \brief If we are processing a decltype type, a set of temporary binding /// expressions for which we have deferred checking the destructor. SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds; ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context, unsigned NumCleanupObjects, bool ParentNeedsCleanups, Decl *LambdaContextDecl, bool IsDecltype) : Context(Context), ParentNeedsCleanups(ParentNeedsCleanups), IsDecltype(IsDecltype), NumCleanupObjects(NumCleanupObjects), LambdaContextDecl(LambdaContextDecl), LambdaMangle() { } /// \brief Retrieve the mangling context for lambdas. LambdaMangleContext &getLambdaMangleContext() { assert(LambdaContextDecl && "Need to have a lambda context declaration"); if (!LambdaMangle) LambdaMangle = new LambdaMangleContext; return *LambdaMangle; } }; /// A stack of expression evaluation contexts. SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts; /// SpecialMemberOverloadResult - The overloading result for a special member /// function. /// /// This is basically a wrapper around PointerIntPair. The lowest bits of the /// integer are used to determine whether overload resolution succeeded. class SpecialMemberOverloadResult : public llvm::FastFoldingSetNode { public: enum Kind { NoMemberOrDeleted, Ambiguous, Success }; private: llvm::PointerIntPair<CXXMethodDecl*, 2> Pair; public: SpecialMemberOverloadResult(const llvm::FoldingSetNodeID &ID) : FastFoldingSetNode(ID) {} CXXMethodDecl *getMethod() const { return Pair.getPointer(); } void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); } Kind getKind() const { return static_cast<Kind>(Pair.getInt()); } void setKind(Kind K) { Pair.setInt(K); } }; /// \brief A cache of special member function overload resolution results /// for C++ records. llvm::FoldingSet<SpecialMemberOverloadResult> SpecialMemberCache; /// \brief The kind of translation unit we are processing. /// /// When we're processing a complete translation unit, Sema will perform /// end-of-translation-unit semantic tasks (such as creating /// initializers for tentative definitions in C) once parsing has /// completed. Modules and precompiled headers perform different kinds of /// checks. TranslationUnitKind TUKind; llvm::BumpPtrAllocator BumpAlloc; /// \brief The number of SFINAE diagnostics that have been trapped. unsigned NumSFINAEErrors; typedef llvm::DenseMap<ParmVarDecl *, SmallVector<ParmVarDecl *, 1> > UnparsedDefaultArgInstantiationsMap; /// \brief A mapping from parameters with unparsed default arguments to the /// set of instantiations of each parameter. /// /// This mapping is a temporary data structure used when parsing /// nested class templates or nested classes of class templates, /// where we might end up instantiating an inner class before the /// default arguments of its methods have been parsed. UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations; // Contains the locations of the beginning of unparsed default // argument locations. llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs; /// UndefinedInternals - all the used, undefined objects which require a /// definition in this translation unit. llvm::DenseMap<NamedDecl *, SourceLocation> UndefinedButUsed; /// Obtain a sorted list of functions that are undefined but ODR-used. void getUndefinedButUsed( llvm::SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined); typedef std::pair<ObjCMethodList, ObjCMethodList> GlobalMethods; typedef llvm::DenseMap<Selector, GlobalMethods> GlobalMethodPool; /// Method Pool - allows efficient lookup when typechecking messages to "id". /// We need to maintain a list, since selectors can have differing signatures /// across classes. In Cocoa, this happens to be extremely uncommon (only 1% /// of selectors are "overloaded"). /// At the head of the list it is recorded whether there were 0, 1, or >= 2 /// methods inside categories with a particular selector. GlobalMethodPool MethodPool; /// Method selectors used in a \@selector expression. Used for implementation /// of -Wselector. llvm::DenseMap<Selector, SourceLocation> ReferencedSelectors; /// Kinds of C++ special members. enum CXXSpecialMember { CXXDefaultConstructor, CXXCopyConstructor, CXXMoveConstructor, CXXCopyAssignment, CXXMoveAssignment, CXXDestructor, CXXInvalid }; typedef std::pair<CXXRecordDecl*, CXXSpecialMember> SpecialMemberDecl; /// The C++ special members which we are currently in the process of /// declaring. If this process recursively triggers the declaration of the /// same special member, we should act as if it is not yet declared. llvm::SmallSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared; void ReadMethodPool(Selector Sel); /// Private Helper predicate to check for 'self'. bool isSelfExpr(Expr *RExpr); /// \brief Cause the active diagnostic on the DiagosticsEngine to be /// emitted. This is closely coupled to the SemaDiagnosticBuilder class and /// should not be used elsewhere. void EmitCurrentDiagnostic(unsigned DiagID); /// Records and restores the FP_CONTRACT state on entry/exit of compound /// statements. class FPContractStateRAII { public: FPContractStateRAII(Sema& S) : S(S), OldFPContractState(S.FPFeatures.fp_contract) {} ~FPContractStateRAII() { S.FPFeatures.fp_contract = OldFPContractState; } private: Sema& S; bool OldFPContractState : 1; }; typedef llvm::MCAsmParserSemaCallback::InlineAsmIdentifierInfo InlineAsmIdentifierInfo; public: Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer, TranslationUnitKind TUKind = TU_Complete, CodeCompleteConsumer *CompletionConsumer = 0); ~Sema(); /// \brief Perform initialization that occurs after the parser has been /// initialized but before it parses anything. void Initialize(); const LangOptions &getLangOpts() const { return LangOpts; } OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; } FPOptions &getFPOptions() { return FPFeatures; } DiagnosticsEngine &getDiagnostics() const { return Diags; } SourceManager &getSourceManager() const { return SourceMgr; } const TargetAttributesSema &getTargetAttributesSema() const; Preprocessor &getPreprocessor() const { return PP; } ASTContext &getASTContext() const { return Context; } ASTConsumer &getASTConsumer() const { return Consumer; } ASTMutationListener *getASTMutationListener() const; ExternalSemaSource* getExternalSource() const { return ExternalSource; } ///\brief Registers an external source. If an external source already exists, /// creates a multiplex external source and appends to it. /// ///\param[in] E - A non-null external sema source. /// void addExternalSource(ExternalSemaSource *E); void PrintStats() const; /// \brief Helper class that creates diagnostics with optional /// template instantiation stacks. /// /// This class provides a wrapper around the basic DiagnosticBuilder /// class that emits diagnostics. SemaDiagnosticBuilder is /// responsible for emitting the diagnostic (as DiagnosticBuilder /// does) and, if the diagnostic comes from inside a template /// instantiation, printing the template instantiation stack as /// well. class SemaDiagnosticBuilder : public DiagnosticBuilder { Sema &SemaRef; unsigned DiagID; public: SemaDiagnosticBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID) : DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) { } ~SemaDiagnosticBuilder() { // If we aren't active, there is nothing to do. if (!isActive()) return; // Otherwise, we need to emit the diagnostic. First flush the underlying // DiagnosticBuilder data, and clear the diagnostic builder itself so it // won't emit the diagnostic in its own destructor. // // This seems wasteful, in that as written the DiagnosticBuilder dtor will // do its own needless checks to see if the diagnostic needs to be // emitted. However, because we take care to ensure that the builder // objects never escape, a sufficiently smart compiler will be able to // eliminate that code. FlushCounts(); Clear(); // Dispatch to Sema to emit the diagnostic. SemaRef.EmitCurrentDiagnostic(DiagID); } }; /// \brief Emit a diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID) { DiagnosticBuilder DB = Diags.Report(Loc, DiagID); return SemaDiagnosticBuilder(DB, *this, DiagID); } /// \brief Emit a partial diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic& PD); /// \brief Build a partial diagnostic. PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h bool findMacroSpelling(SourceLocation &loc, StringRef name); /// \brief Get a string to suggest for zero-initialization of a type. std::string getFixItZeroInitializerForType(QualType T) const; std::string getFixItZeroLiteralForType(QualType T) const; ExprResult Owned(Expr* E) { return E; } ExprResult Owned(ExprResult R) { return R; } StmtResult Owned(Stmt* S) { return S; } void ActOnEndOfTranslationUnit(); void CheckDelegatingCtorCycles(); Scope *getScopeForContext(DeclContext *Ctx); void PushFunctionScope(); void PushBlockScope(Scope *BlockScope, BlockDecl *Block); void PushLambdaScope(CXXRecordDecl *Lambda, CXXMethodDecl *CallOperator); void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD, RecordDecl *RD, sema::CapturedRegionScopeInfo::CapturedRegionKind K); void PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP =0, const Decl *D = 0, const BlockExpr *blkExpr = 0); sema::FunctionScopeInfo *getCurFunction() const { return FunctionScopes.back(); } void PushCompoundScope(); void PopCompoundScope(); sema::CompoundScopeInfo &getCurCompoundScope() const; bool hasAnyUnrecoverableErrorsInThisFunction() const; /// \brief Retrieve the current block, if any. sema::BlockScopeInfo *getCurBlock(); /// \brief Retrieve the current lambda expression, if any. sema::LambdaScopeInfo *getCurLambda(); /// \brief Retrieve the current captured region, if any. sema::CapturedRegionScopeInfo *getCurCapturedRegion(); /// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls SmallVector<Decl*,2> &WeakTopLevelDecls() { return WeakTopLevelDecl; } void ActOnComment(SourceRange Comment); //===--------------------------------------------------------------------===// // Type Analysis / Processing: SemaType.cpp. // QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs, const DeclSpec *DS = 0); QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA, const DeclSpec *DS = 0); QualType BuildPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildReferenceType(QualType T, bool LValueRef, SourceLocation Loc, DeclarationName Entity); QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM, Expr *ArraySize, unsigned Quals, SourceRange Brackets, DeclarationName Entity); QualType BuildExtVectorType(QualType T, Expr *ArraySize, SourceLocation AttrLoc); /// \brief Build a function type. /// /// This routine checks the function type according to C++ rules and /// under the assumption that the result type and parameter types have /// just been instantiated from a template. It therefore duplicates /// some of the behavior of GetTypeForDeclarator, but in a much /// simpler form that is only suitable for this narrow use case. /// /// \param T The return type of the function. /// /// \param ParamTypes The parameter types of the function. This array /// will be modified to account for adjustments to the types of the /// function parameters. /// /// \param Loc The location of the entity whose type involves this /// function type or, if there is no such entity, the location of the /// type that will have function type. /// /// \param Entity The name of the entity that involves the function /// type, if known. /// /// \param EPI Extra information about the function type. Usually this will /// be taken from an existing function with the same prototype. /// /// \returns A suitable function type, if there are no errors. The /// unqualified type will always be a FunctionProtoType. /// Otherwise, returns a NULL type. QualType BuildFunctionType(QualType T, llvm::MutableArrayRef<QualType> ParamTypes, SourceLocation Loc, DeclarationName Entity, const FunctionProtoType::ExtProtoInfo &EPI); QualType BuildMemberPointerType(QualType T, QualType Class, SourceLocation Loc, DeclarationName Entity); QualType BuildBlockPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildParenType(QualType T); QualType BuildAtomicType(QualType T, SourceLocation Loc); TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S); TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy); TypeSourceInfo *GetTypeSourceInfoForDeclarator(Declarator &D, QualType T, TypeSourceInfo *ReturnTypeInfo); /// \brief Package the given type and TSI into a ParsedType. ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo); DeclarationNameInfo GetNameForDeclarator(Declarator &D); DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name); static QualType GetTypeFromParser(ParsedType Ty, TypeSourceInfo **TInfo = 0); CanThrowResult canThrow(const Expr *E); const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc, const FunctionProtoType *FPT); bool CheckSpecifiedExceptionType(QualType &T, const SourceRange &Range); bool CheckDistantExceptionSpec(QualType T); bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New); bool CheckEquivalentExceptionSpec( const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool CheckEquivalentExceptionSpec( const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID, const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc, bool *MissingExceptionSpecification = 0, bool *MissingEmptyExceptionSpecification = 0, bool AllowNoexceptAllMatchWithNoSpec = false, bool IsOperatorNew = false); bool CheckExceptionSpecSubset( const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID, const FunctionProtoType *Superset, SourceLocation SuperLoc, const FunctionProtoType *Subset, SourceLocation SubLoc); bool CheckParamExceptionSpec(const PartialDiagnostic & NoteID, const FunctionProtoType *Target, SourceLocation TargetLoc, const FunctionProtoType *Source, SourceLocation SourceLoc); TypeResult ActOnTypeName(Scope *S, Declarator &D); /// \brief The parser has parsed the context-sensitive type 'instancetype' /// in an Objective-C message declaration. Return the appropriate type. ParsedType ActOnObjCInstanceType(SourceLocation Loc); /// \brief Abstract class used to diagnose incomplete types. struct TypeDiagnoser { bool Suppressed; TypeDiagnoser(bool Suppressed = false) : Suppressed(Suppressed) { } virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0; virtual ~TypeDiagnoser() {} }; static int getPrintable(int I) { return I; } static unsigned getPrintable(unsigned I) { return I; } static bool getPrintable(bool B) { return B; } static const char * getPrintable(const char *S) { return S; } static StringRef getPrintable(StringRef S) { return S; } static const std::string &getPrintable(const std::string &S) { return S; } static const IdentifierInfo *getPrintable(const IdentifierInfo *II) { return II; } static DeclarationName getPrintable(DeclarationName N) { return N; } static QualType getPrintable(QualType T) { return T; } static SourceRange getPrintable(SourceRange R) { return R; } static SourceRange getPrintable(SourceLocation L) { return L; } static SourceRange getPrintable(Expr *E) { return E->getSourceRange(); } static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();} template<typename T1> class BoundTypeDiagnoser1 : public TypeDiagnoser { unsigned DiagID; const T1 &Arg1; public: BoundTypeDiagnoser1(unsigned DiagID, const T1 &Arg1) : TypeDiagnoser(DiagID == 0), DiagID(DiagID), Arg1(Arg1) { } virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) { if (Suppressed) return; S.Diag(Loc, DiagID) << getPrintable(Arg1) << T; } virtual ~BoundTypeDiagnoser1() { } }; template<typename T1, typename T2> class BoundTypeDiagnoser2 : public TypeDiagnoser { unsigned DiagID; const T1 &Arg1; const T2 &Arg2; public: BoundTypeDiagnoser2(unsigned DiagID, const T1 &Arg1, const T2 &Arg2) : TypeDiagnoser(DiagID == 0), DiagID(DiagID), Arg1(Arg1), Arg2(Arg2) { } virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) { if (Suppressed) return; S.Diag(Loc, DiagID) << getPrintable(Arg1) << getPrintable(Arg2) << T; } virtual ~BoundTypeDiagnoser2() { } }; template<typename T1, typename T2, typename T3> class BoundTypeDiagnoser3 : public TypeDiagnoser { unsigned DiagID; const T1 &Arg1; const T2 &Arg2; const T3 &Arg3; public: BoundTypeDiagnoser3(unsigned DiagID, const T1 &Arg1, const T2 &Arg2, const T3 &Arg3) : TypeDiagnoser(DiagID == 0), DiagID(DiagID), Arg1(Arg1), Arg2(Arg2), Arg3(Arg3) { } virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) { if (Suppressed) return; S.Diag(Loc, DiagID) << getPrintable(Arg1) << getPrintable(Arg2) << getPrintable(Arg3) << T; } virtual ~BoundTypeDiagnoser3() { } }; bool RequireCompleteType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID); template<typename T1> bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID, const T1 &Arg1) { BoundTypeDiagnoser1<T1> Diagnoser(DiagID, Arg1); return RequireCompleteType(Loc, T, Diagnoser); } template<typename T1, typename T2> bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID, const T1 &Arg1, const T2 &Arg2) { BoundTypeDiagnoser2<T1, T2> Diagnoser(DiagID, Arg1, Arg2); return RequireCompleteType(Loc, T, Diagnoser); } template<typename T1, typename T2, typename T3> bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID, const T1 &Arg1, const T2 &Arg2, const T3 &Arg3) { BoundTypeDiagnoser3<T1, T2, T3> Diagnoser(DiagID, Arg1, Arg2, Arg3); return RequireCompleteType(Loc, T, Diagnoser); } bool RequireCompleteExprType(Expr *E, TypeDiagnoser &Diagnoser); bool RequireCompleteExprType(Expr *E, unsigned DiagID); template<typename T1> bool RequireCompleteExprType(Expr *E, unsigned DiagID, const T1 &Arg1) { BoundTypeDiagnoser1<T1> Diagnoser(DiagID, Arg1); return RequireCompleteExprType(E, Diagnoser); } template<typename T1, typename T2> bool RequireCompleteExprType(Expr *E, unsigned DiagID, const T1 &Arg1, const T2 &Arg2) { BoundTypeDiagnoser2<T1, T2> Diagnoser(DiagID, Arg1, Arg2); return RequireCompleteExprType(E, Diagnoser); } template<typename T1, typename T2, typename T3> bool RequireCompleteExprType(Expr *E, unsigned DiagID, const T1 &Arg1, const T2 &Arg2, const T3 &Arg3) { BoundTypeDiagnoser3<T1, T2, T3> Diagnoser(DiagID, Arg1, Arg2, Arg3); return RequireCompleteExprType(E, Diagnoser); } bool RequireLiteralType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID); template<typename T1> bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID, const T1 &Arg1) { BoundTypeDiagnoser1<T1> Diagnoser(DiagID, Arg1); return RequireLiteralType(Loc, T, Diagnoser); } template<typename T1, typename T2> bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID, const T1 &Arg1, const T2 &Arg2) { BoundTypeDiagnoser2<T1, T2> Diagnoser(DiagID, Arg1, Arg2); return RequireLiteralType(Loc, T, Diagnoser); } template<typename T1, typename T2, typename T3> bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID, const T1 &Arg1, const T2 &Arg2, const T3 &Arg3) { BoundTypeDiagnoser3<T1, T2, T3> Diagnoser(DiagID, Arg1, Arg2, Arg3); return RequireLiteralType(Loc, T, Diagnoser); } QualType getElaboratedType(ElaboratedTypeKeyword Keyword, const CXXScopeSpec &SS, QualType T); QualType BuildTypeofExprType(Expr *E, SourceLocation Loc); QualType BuildDecltypeType(Expr *E, SourceLocation Loc); QualType BuildUnaryTransformType(QualType BaseType, UnaryTransformType::UTTKind UKind, SourceLocation Loc); //===--------------------------------------------------------------------===// // Symbol table / Decl tracking callbacks: SemaDecl.cpp. // /// List of decls defined in a function prototype. This contains EnumConstants /// that incorrectly end up in translation unit scope because there is no /// function to pin them on. ActOnFunctionDeclarator reads this list and patches /// them into the FunctionDecl. std::vector<NamedDecl*> DeclsInPrototypeScope; /// Nonzero if we are currently parsing a function declarator. This is a counter /// as opposed to a boolean so we can deal with nested function declarators /// such as: /// void f(void (*g)(), ...) unsigned InFunctionDeclarator; DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = 0); void DiagnoseUseOfUnimplementedSelectors(); bool isSimpleTypeSpecifier(tok::TokenKind Kind) const; ParsedType getTypeName(IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec *SS = 0, bool isClassName = false, bool HasTrailingDot = false, ParsedType ObjectType = ParsedType(), bool IsCtorOrDtorName = false, bool WantNontrivialTypeSourceInfo = false, IdentifierInfo **CorrectedII = 0); TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S); bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S); bool DiagnoseUnknownTypeName(IdentifierInfo *&II, SourceLocation IILoc, Scope *S, CXXScopeSpec *SS, ParsedType &SuggestedType); /// \brief Describes the result of the name lookup and resolution performed /// by \c ClassifyName(). enum NameClassificationKind { NC_Unknown, NC_Error, NC_Keyword, NC_Type, NC_Expression, NC_NestedNameSpecifier, NC_TypeTemplate, NC_FunctionTemplate }; class NameClassification { NameClassificationKind Kind; ExprResult Expr; TemplateName Template; ParsedType Type; const IdentifierInfo *Keyword; explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {} public: NameClassification(ExprResult Expr) : Kind(NC_Expression), Expr(Expr) {} NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {} NameClassification(const IdentifierInfo *Keyword) : Kind(NC_Keyword), Keyword(Keyword) { } static NameClassification Error() { return NameClassification(NC_Error); } static NameClassification Unknown() { return NameClassification(NC_Unknown); } static NameClassification NestedNameSpecifier() { return NameClassification(NC_NestedNameSpecifier); } static NameClassification TypeTemplate(TemplateName Name) { NameClassification Result(NC_TypeTemplate); Result.Template = Name; return Result; } static NameClassification FunctionTemplate(TemplateName Name) { NameClassification Result(NC_FunctionTemplate); Result.Template = Name; return Result; } NameClassificationKind getKind() const { return Kind; } ParsedType getType() const { assert(Kind == NC_Type); return Type; } ExprResult getExpression() const { assert(Kind == NC_Expression); return Expr; } TemplateName getTemplateName() const { assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate); return Template; } TemplateNameKind getTemplateNameKind() const { assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate); return Kind == NC_TypeTemplate? TNK_Type_template : TNK_Function_template; } }; /// \brief Perform name lookup on the given name, classifying it based on /// the results of name lookup and the following token. /// /// This routine is used by the parser to resolve identifiers and help direct /// parsing. When the identifier cannot be found, this routine will attempt /// to correct the typo and classify based on the resulting name. /// /// \param S The scope in which we're performing name lookup. /// /// \param SS The nested-name-specifier that precedes the name. /// /// \param Name The identifier. If typo correction finds an alternative name, /// this pointer parameter will be updated accordingly. /// /// \param NameLoc The location of the identifier. /// /// \param NextToken The token following the identifier. Used to help /// disambiguate the name. /// /// \param IsAddressOfOperand True if this name is the operand of a unary /// address of ('&') expression, assuming it is classified as an /// expression. /// /// \param CCC The correction callback, if typo correction is desired. NameClassification ClassifyName(Scope *S, CXXScopeSpec &SS, IdentifierInfo *&Name, SourceLocation NameLoc, const Token &NextToken, bool IsAddressOfOperand, CorrectionCandidateCallback *CCC = 0); Decl *ActOnDeclarator(Scope *S, Declarator &D); NamedDecl *HandleDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParameterLists); void RegisterLocallyScopedExternCDecl(NamedDecl *ND, const LookupResult &Previous, Scope *S); bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info); bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC, DeclarationName Name, SourceLocation Loc); void DiagnoseFunctionSpecifiers(const DeclSpec &DS); void CheckShadow(Scope *S, VarDecl *D, const LookupResult& R); void CheckShadow(Scope *S, VarDecl *D); void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange); void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D); NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous); NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D, LookupResult &Previous, bool &Redeclaration); NamedDecl* ActOnVariableDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists); // Returns true if the variable declaration is a redeclaration bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous); void CheckCompleteVariableDeclaration(VarDecl *var); void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D); void ActOnStartFunctionDeclarator(); void ActOnEndFunctionDeclarator(); NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope); bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD); void checkVoidParamDecl(ParmVarDecl *Param); bool CheckConstexprFunctionDecl(const FunctionDecl *FD); bool CheckConstexprFunctionBody(const FunctionDecl *FD, Stmt *Body); void DiagnoseHiddenVirtualMethods(CXXRecordDecl *DC, CXXMethodDecl *MD); // Returns true if the function declaration is a redeclaration bool CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD, LookupResult &Previous, bool IsExplicitSpecialization); void CheckMain(FunctionDecl *FD, const DeclSpec &D); Decl *ActOnParamDeclarator(Scope *S, Declarator &D); ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC, SourceLocation Loc, QualType T); ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc, SourceLocation NameLoc, IdentifierInfo *Name, QualType T, TypeSourceInfo *TSInfo, StorageClass SC); void ActOnParamDefaultArgument(Decl *param, SourceLocation EqualLoc, Expr *defarg); void ActOnParamUnparsedDefaultArgument(Decl *param, SourceLocation EqualLoc, SourceLocation ArgLoc); void ActOnParamDefaultArgumentError(Decl *param); bool SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg, SourceLocation EqualLoc); void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit, bool TypeMayContainAuto); void ActOnUninitializedDecl(Decl *dcl, bool TypeMayContainAuto); void ActOnInitializerError(Decl *Dcl); void ActOnCXXForRangeDecl(Decl *D); void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc); void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc); void FinalizeDeclaration(Decl *D); DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS, Decl **Group, unsigned NumDecls); DeclGroupPtrTy BuildDeclaratorGroup(Decl **Group, unsigned NumDecls, bool TypeMayContainAuto = true); /// Should be called on all declarations that might have attached /// documentation comments. void ActOnDocumentableDecl(Decl *D); void ActOnDocumentableDecls(Decl **Group, unsigned NumDecls); void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D, SourceLocation LocAfterDecls); void CheckForFunctionRedefinition(FunctionDecl *FD); Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D); Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D); void ActOnStartOfObjCMethodDef(Scope *S, Decl *D); bool isObjCMethodDecl(Decl *D) { return D && isa<ObjCMethodDecl>(D); } /// \brief Determine whether we can skip parsing the body of a function /// definition, assuming we don't care about analyzing its body or emitting /// code for that function. /// /// This will be \c false only if we may need the body of the function in /// order to parse the rest of the program (for instance, if it is /// \c constexpr in C++11 or has an 'auto' return type in C++14). bool canSkipFunctionBody(Decl *D); void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation); Decl *ActOnSkippedFunctionBody(Decl *Decl); /// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an /// attribute for which parsing is delayed. void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs); /// \brief Diagnose any unused parameters in the given sequence of /// ParmVarDecl pointers. void DiagnoseUnusedParameters(ParmVarDecl * const *Begin, ParmVarDecl * const *End); /// \brief Diagnose whether the size of parameters or return value of a /// function or obj-c method definition is pass-by-value and larger than a /// specified threshold. void DiagnoseSizeOfParametersAndReturnValue(ParmVarDecl * const *Begin, ParmVarDecl * const *End, QualType ReturnTy, NamedDecl *D); void DiagnoseInvalidJumps(Stmt *Body); Decl *ActOnFileScopeAsmDecl(Expr *expr, SourceLocation AsmLoc, SourceLocation RParenLoc); /// \brief Handle a C++11 empty-declaration and attribute-declaration. Decl *ActOnEmptyDeclaration(Scope *S, AttributeList *AttrList, SourceLocation SemiLoc); /// \brief The parser has processed a module import declaration. /// /// \param AtLoc The location of the '@' symbol, if any. /// /// \param ImportLoc The location of the 'import' keyword. /// /// \param Path The module access path. DeclResult ActOnModuleImport(SourceLocation AtLoc, SourceLocation ImportLoc, ModuleIdPath Path); /// \brief Create an implicit import of the given module at the given /// source location. /// /// This routine is typically used for error recovery, when the entity found /// by name lookup is actually hidden within a module that we know about but /// the user has forgotten to import. void createImplicitModuleImport(SourceLocation Loc, Module *Mod); /// \brief Retrieve a suitable printing policy. PrintingPolicy getPrintingPolicy() const { return getPrintingPolicy(Context, PP); } /// \brief Retrieve a suitable printing policy. static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx, const Preprocessor &PP); /// Scope actions. void ActOnPopScope(SourceLocation Loc, Scope *S); void ActOnTranslationUnitScope(Scope *S); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, MultiTemplateParamsArg TemplateParams, bool IsExplicitInstantiation = false); Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS, AccessSpecifier AS, RecordDecl *Record); Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS, RecordDecl *Record); bool isAcceptableTagRedeclaration(const TagDecl *Previous, TagTypeKind NewTag, bool isDefinition, SourceLocation NewTagLoc, const IdentifierInfo &Name); enum TagUseKind { TUK_Reference, // Reference to a tag: 'struct foo *X;' TUK_Declaration, // Fwd decl of a tag: 'struct foo;' TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;' TUK_Friend // Friend declaration: 'friend struct foo;' }; Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, AttributeList *Attr, AccessSpecifier AS, SourceLocation ModulePrivateLoc, MultiTemplateParamsArg TemplateParameterLists, bool &OwnedDecl, bool &IsDependent, SourceLocation ScopedEnumKWLoc, bool ScopedEnumUsesClassTag, TypeResult UnderlyingType); Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc, unsigned TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, AttributeList *Attr, MultiTemplateParamsArg TempParamLists); TypeResult ActOnDependentTag(Scope *S, unsigned TagSpec, TagUseKind TUK, const CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation TagLoc, SourceLocation NameLoc); void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart, IdentifierInfo *ClassName, SmallVectorImpl<Decl *> &Decls); Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth); FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS); MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS, AttributeList *MSPropertyAttr); FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T, TypeSourceInfo *TInfo, RecordDecl *Record, SourceLocation Loc, bool Mutable, Expr *BitfieldWidth, InClassInitStyle InitStyle, SourceLocation TSSL, AccessSpecifier AS, NamedDecl *PrevDecl, Declarator *D = 0); bool CheckNontrivialField(FieldDecl *FD); void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM); bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM, bool Diagnose = false); CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD); void ActOnLastBitfield(SourceLocation DeclStart, SmallVectorImpl<Decl *> &AllIvarDecls); Decl *ActOnIvar(Scope *S, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, tok::ObjCKeywordKind visibility); // This is used for both record definitions and ObjC interface declarations. void ActOnFields(Scope* S, SourceLocation RecLoc, Decl *TagDecl, ArrayRef<Decl *> Fields, SourceLocation LBrac, SourceLocation RBrac, AttributeList *AttrList); /// ActOnTagStartDefinition - Invoked when we have entered the /// scope of a tag's definition (e.g., for an enumeration, class, /// struct, or union). void ActOnTagStartDefinition(Scope *S, Decl *TagDecl); Decl *ActOnObjCContainerStartDefinition(Decl *IDecl); /// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a /// C++ record definition's base-specifiers clause and are starting its /// member declarations. void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl, SourceLocation FinalLoc, SourceLocation LBraceLoc); /// ActOnTagFinishDefinition - Invoked once we have finished parsing /// the definition of a tag (enumeration, class, struct, or union). void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl, SourceLocation RBraceLoc); void ActOnObjCContainerFinishDefinition(); /// \brief Invoked when we must temporarily exit the objective-c container /// scope for parsing/looking-up C constructs. /// /// Must be followed by a call to \see ActOnObjCReenterContainerContext void ActOnObjCTemporaryExitContainerContext(DeclContext *DC); void ActOnObjCReenterContainerContext(DeclContext *DC); /// ActOnTagDefinitionError - Invoked when there was an unrecoverable /// error parsing the definition of a tag. void ActOnTagDefinitionError(Scope *S, Decl *TagDecl); EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum, EnumConstantDecl *LastEnumConst, SourceLocation IdLoc, IdentifierInfo *Id, Expr *val); bool CheckEnumUnderlyingType(TypeSourceInfo *TI); bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped, QualType EnumUnderlyingTy, const EnumDecl *Prev); Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant, SourceLocation IdLoc, IdentifierInfo *Id, AttributeList *Attrs, SourceLocation EqualLoc, Expr *Val); void ActOnEnumBody(SourceLocation EnumLoc, SourceLocation LBraceLoc, SourceLocation RBraceLoc, Decl *EnumDecl, Decl **Elements, unsigned NumElements, Scope *S, AttributeList *Attr); DeclContext *getContainingDC(DeclContext *DC); /// Set the current declaration context until it gets popped. void PushDeclContext(Scope *S, DeclContext *DC); void PopDeclContext(); /// EnterDeclaratorContext - Used when we must lookup names in the context /// of a declarator's nested name specifier. void EnterDeclaratorContext(Scope *S, DeclContext *DC); void ExitDeclaratorContext(Scope *S); /// Push the parameters of D, which must be a function, into scope. void ActOnReenterFunctionContext(Scope* S, Decl* D); void ActOnExitFunctionContext(); DeclContext *getFunctionLevelDeclContext(); /// getCurFunctionDecl - If inside of a function body, this returns a pointer /// to the function decl for the function being parsed. If we're currently /// in a 'block', this returns the containing context. FunctionDecl *getCurFunctionDecl(); /// getCurMethodDecl - If inside of a method body, this returns a pointer to /// the method decl for the method being parsed. If we're currently /// in a 'block', this returns the containing context. ObjCMethodDecl *getCurMethodDecl(); /// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method /// or C function we're in, otherwise return null. If we're currently /// in a 'block', this returns the containing context. NamedDecl *getCurFunctionOrMethodDecl(); /// Add this decl to the scope shadowed decl chains. void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true); /// \brief Make the given externally-produced declaration visible at the /// top level scope. /// /// \param D The externally-produced declaration to push. /// /// \param Name The name of the externally-produced declaration. void pushExternalDeclIntoScope(NamedDecl *D, DeclarationName Name); /// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true /// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns /// true if 'D' belongs to the given declaration context. /// /// \param ExplicitInstantiationOrSpecialization When true, we are checking /// whether the declaration is in scope for the purposes of explicit template /// instantiation or specialization. The default is false. bool isDeclInScope(NamedDecl *&D, DeclContext *Ctx, Scope *S = 0, bool ExplicitInstantiationOrSpecialization = false); /// Finds the scope corresponding to the given decl context, if it /// happens to be an enclosing scope. Otherwise return NULL. static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC); /// Subroutines of ActOnDeclarator(). TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T, TypeSourceInfo *TInfo); bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New); /// Attribute merging methods. Return true if a new attribute was added. AvailabilityAttr *mergeAvailabilityAttr(NamedDecl *D, SourceRange Range, IdentifierInfo *Platform, VersionTuple Introduced, VersionTuple Deprecated, VersionTuple Obsoleted, bool IsUnavailable, StringRef Message, bool Override, unsigned AttrSpellingListIndex); TypeVisibilityAttr *mergeTypeVisibilityAttr(Decl *D, SourceRange Range, TypeVisibilityAttr::VisibilityType Vis, unsigned AttrSpellingListIndex); VisibilityAttr *mergeVisibilityAttr(Decl *D, SourceRange Range, VisibilityAttr::VisibilityType Vis, unsigned AttrSpellingListIndex); DLLImportAttr *mergeDLLImportAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); DLLExportAttr *mergeDLLExportAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); FormatAttr *mergeFormatAttr(Decl *D, SourceRange Range, StringRef Format, int FormatIdx, int FirstArg, unsigned AttrSpellingListIndex); SectionAttr *mergeSectionAttr(Decl *D, SourceRange Range, StringRef Name, unsigned AttrSpellingListIndex); /// \brief Describes the kind of merge to perform for availability /// attributes (including "deprecated", "unavailable", and "availability"). enum AvailabilityMergeKind { /// \brief Don't merge availability attributes at all. AMK_None, /// \brief Merge availability attributes for a redeclaration, which requires /// an exact match. AMK_Redeclaration, /// \brief Merge availability attributes for an override, which requires /// an exact match or a weakening of constraints. AMK_Override }; void mergeDeclAttributes(NamedDecl *New, Decl *Old, AvailabilityMergeKind AMK = AMK_Redeclaration); void MergeTypedefNameDecl(TypedefNameDecl *New, LookupResult &OldDecls); bool MergeFunctionDecl(FunctionDecl *New, Decl *Old, Scope *S); bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old, Scope *S); void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old); void MergeVarDecl(VarDecl *New, LookupResult &OldDecls, bool OldDeclsWereHidden); void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool OldIsHidden); void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old); bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S); // AssignmentAction - This is used by all the assignment diagnostic functions // to represent what is actually causing the operation enum AssignmentAction { AA_Assigning, AA_Passing, AA_Returning, AA_Converting, AA_Initializing, AA_Sending, AA_Casting }; /// C++ Overloading. enum OverloadKind { /// This is a legitimate overload: the existing declarations are /// functions or function templates with different signatures. Ovl_Overload, /// This is not an overload because the signature exactly matches /// an existing declaration. Ovl_Match, /// This is not an overload because the lookup results contain a /// non-function. Ovl_NonFunction }; OverloadKind CheckOverload(Scope *S, FunctionDecl *New, const LookupResult &OldDecls, NamedDecl *&OldDecl, bool IsForUsingDecl); bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl); /// \brief Checks availability of the function depending on the current /// function context.Inside an unavailable function,unavailability is ignored. /// /// \returns true if \p FD is unavailable and current context is inside /// an available function, false otherwise. bool isFunctionConsideredUnavailable(FunctionDecl *FD); ImplicitConversionSequence TryImplicitConversion(Expr *From, QualType ToType, bool SuppressUserConversions, bool AllowExplicit, bool InOverloadResolution, bool CStyle, bool AllowObjCWritebackConversion); bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType); bool IsFloatingPointPromotion(QualType FromType, QualType ToType); bool IsComplexPromotion(QualType FromType, QualType ToType); bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCWritebackConversion(QualType FromType, QualType ToType, QualType &ConvertedType); bool IsBlockPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType); bool FunctionArgTypesAreEqual(const FunctionProtoType *OldType, const FunctionProtoType *NewType, unsigned *ArgPos = 0); void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag, QualType FromType, QualType ToType); CastKind PrepareCastToObjCObjectPointer(ExprResult &E); bool CheckPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath& BasePath, bool IgnoreBaseAccess); bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType &ConvertedType); bool CheckMemberPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath &BasePath, bool IgnoreBaseAccess); bool IsQualificationConversion(QualType FromType, QualType ToType, bool CStyle, bool &ObjCLifetimeConversion); bool IsNoReturnConversion(QualType FromType, QualType ToType, QualType &ResultTy); bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType); bool isSameOrCompatibleFunctionType(CanQualType Param, CanQualType Arg); ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity, const VarDecl *NRVOCandidate, QualType ResultType, Expr *Value, bool AllowNRVO = true); bool CanPerformCopyInitialization(const InitializedEntity &Entity, ExprResult Init); ExprResult PerformCopyInitialization(const InitializedEntity &Entity, SourceLocation EqualLoc, ExprResult Init, bool TopLevelOfInitList = false, bool AllowExplicit = false); ExprResult PerformObjectArgumentInitialization(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, CXXMethodDecl *Method); ExprResult PerformContextuallyConvertToBool(Expr *From); ExprResult PerformContextuallyConvertToObjCPointer(Expr *From); /// Contexts in which a converted constant expression is required. enum CCEKind { CCEK_CaseValue, ///< Expression in a case label. CCEK_Enumerator, ///< Enumerator value with fixed underlying type. CCEK_TemplateArg ///< Value of a non-type template parameter. }; ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, llvm::APSInt &Value, CCEKind CCE); /// \brief Abstract base class used to diagnose problems that occur while /// trying to convert an expression to integral or enumeration type. class ICEConvertDiagnoser { public: bool Suppress; bool SuppressConversion; ICEConvertDiagnoser(bool Suppress = false, bool SuppressConversion = false) : Suppress(Suppress), SuppressConversion(SuppressConversion) { } /// \brief Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual DiagnosticBuilder diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0; /// \brief Emits a diagnostic when the expression has incomplete class type. virtual DiagnosticBuilder diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0; /// \brief Emits a diagnostic when the only matching conversion function /// is explicit. virtual DiagnosticBuilder diagnoseExplicitConv(Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; /// \brief Emits a note for the explicit conversion function. virtual DiagnosticBuilder noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// \brief Emits a diagnostic when there are multiple possible conversion /// functions. virtual DiagnosticBuilder diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0; /// \brief Emits a note for one of the candidate conversions. virtual DiagnosticBuilder noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// \brief Emits a diagnostic when we picked a conversion function /// (for cases when we are not allowed to pick a conversion function). virtual DiagnosticBuilder diagnoseConversion(Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; virtual ~ICEConvertDiagnoser() {} }; ExprResult ConvertToIntegralOrEnumerationType(SourceLocation Loc, Expr *FromE, ICEConvertDiagnoser &Diagnoser, bool AllowScopedEnumerations); enum ObjCSubscriptKind { OS_Array, OS_Dictionary, OS_Error }; ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE); // Note that LK_String is intentionally after the other literals, as // this is used for diagnostics logic. enum ObjCLiteralKind { LK_Array, LK_Dictionary, LK_Numeric, LK_Boxed, LK_String, LK_Block, LK_None }; ObjCLiteralKind CheckLiteralKind(Expr *FromE); ExprResult PerformObjectMemberConversion(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, NamedDecl *Member); // Members have to be NamespaceDecl* or TranslationUnitDecl*. // TODO: make this is a typesafe union. typedef llvm::SmallPtrSet<DeclContext *, 16> AssociatedNamespaceSet; typedef llvm::SmallPtrSet<CXXRecordDecl *, 16> AssociatedClassSet; void AddOverloadCandidate(FunctionDecl *Function, DeclAccessPair FoundDecl, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, bool AllowExplicit = false); void AddFunctionCandidates(const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, TemplateArgumentListInfo *ExplicitTemplateArgs = 0); void AddMethodCandidate(DeclAccessPair FoundDecl, QualType ObjectType, Expr::Classification ObjectClassification, Expr **Args, unsigned NumArgs, OverloadCandidateSet& CandidateSet, bool SuppressUserConversion = false); void AddMethodCandidate(CXXMethodDecl *Method, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false); void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false); void AddTemplateOverloadCandidate(FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false); void AddConversionCandidate(CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet& CandidateSet); void AddTemplateConversionCandidate(FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet &CandidateSet); void AddSurrogateCandidate(CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, const FunctionProtoType *Proto, Expr *Object, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddMemberOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, Expr **Args, unsigned NumArgs, OverloadCandidateSet& CandidateSet, SourceRange OpRange = SourceRange()); void AddBuiltinCandidate(QualType ResultTy, QualType *ParamTys, Expr **Args, unsigned NumArgs, OverloadCandidateSet& CandidateSet, bool IsAssignmentOperator = false, unsigned NumContextualBoolArguments = 0); void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, Expr **Args, unsigned NumArgs, OverloadCandidateSet& CandidateSet); void AddArgumentDependentLookupCandidates(DeclarationName Name, bool Operator, SourceLocation Loc, ArrayRef<Expr *> Args, TemplateArgumentListInfo *ExplicitTemplateArgs, OverloadCandidateSet& CandidateSet, bool PartialOverloading = false); // Emit as a 'note' the specific overload candidate void NoteOverloadCandidate(FunctionDecl *Fn, QualType DestType = QualType()); // Emit as a series of 'note's all template and non-templates // identified by the expression Expr void NoteAllOverloadCandidates(Expr* E, QualType DestType = QualType()); // [PossiblyAFunctionType] --> [Return] // NonFunctionType --> NonFunctionType // R (A) --> R(A) // R (*)(A) --> R (A) // R (&)(A) --> R (A) // R (S::*)(A) --> R (A) QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType); FunctionDecl * ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr, QualType TargetType, bool Complain, DeclAccessPair &Found, bool *pHadMultipleCandidates = 0); FunctionDecl *ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl, bool Complain = false, DeclAccessPair* Found = 0); bool ResolveAndFixSingleFunctionTemplateSpecialization( ExprResult &SrcExpr, bool DoFunctionPointerConverion = false, bool Complain = false, const SourceRange& OpRangeForComplaining = SourceRange(), QualType DestTypeForComplaining = QualType(), unsigned DiagIDForComplaining = 0); Expr *FixOverloadedFunctionReference(Expr *E, DeclAccessPair FoundDecl, FunctionDecl *Fn); ExprResult FixOverloadedFunctionReference(ExprResult, DeclAccessPair FoundDecl, FunctionDecl *Fn); void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool PartialOverloading = false); // An enum used to represent the different possible results of building a // range-based for loop. enum ForRangeStatus { FRS_Success, FRS_NoViableFunction, FRS_DiagnosticIssued }; // An enum to represent whether something is dealing with a call to begin() // or a call to end() in a range-based for loop. enum BeginEndFunction { BEF_begin, BEF_end }; ForRangeStatus BuildForRangeBeginEndCall(Scope *S, SourceLocation Loc, SourceLocation RangeLoc, VarDecl *Decl, BeginEndFunction BEF, const DeclarationNameInfo &NameInfo, LookupResult &MemberLookup, OverloadCandidateSet *CandidateSet, Expr *Range, ExprResult *CallExpr); ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, SourceLocation LParenLoc, Expr **Args, unsigned NumArgs, SourceLocation RParenLoc, Expr *ExecConfig, bool AllowTypoCorrection=true); bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, Expr **Args, unsigned NumArgs, SourceLocation RParenLoc, OverloadCandidateSet *CandidateSet, ExprResult *Result); ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc, unsigned Opc, const UnresolvedSetImpl &Fns, Expr *input); ExprResult CreateOverloadedBinOp(SourceLocation OpLoc, unsigned Opc, const UnresolvedSetImpl &Fns, Expr *LHS, Expr *RHS); ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc, SourceLocation RLoc, Expr *Base,Expr *Idx); ExprResult BuildCallToMemberFunction(Scope *S, Expr *MemExpr, SourceLocation LParenLoc, Expr **Args, unsigned NumArgs, SourceLocation RParenLoc); ExprResult BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc, Expr **Args, unsigned NumArgs, SourceLocation RParenLoc); ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base, SourceLocation OpLoc); /// CheckCallReturnType - Checks that a call expression's return type is /// complete. Returns true on failure. The location passed in is the location /// that best represents the call. bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc, CallExpr *CE, FunctionDecl *FD); /// Helpers for dealing with blocks and functions. bool CheckParmsForFunctionDef(ParmVarDecl **Param, ParmVarDecl **ParamEnd, bool CheckParameterNames); void CheckCXXDefaultArguments(FunctionDecl *FD); void CheckExtraCXXDefaultArguments(Declarator &D); Scope *getNonFieldDeclScope(Scope *S); /// \name Name lookup /// /// These routines provide name lookup that is used during semantic /// analysis to resolve the various kinds of names (identifiers, /// overloaded operator names, constructor names, etc.) into zero or /// more declarations within a particular scope. The major entry /// points are LookupName, which performs unqualified name lookup, /// and LookupQualifiedName, which performs qualified name lookup. /// /// All name lookup is performed based on some specific criteria, /// which specify what names will be visible to name lookup and how /// far name lookup should work. These criteria are important both /// for capturing language semantics (certain lookups will ignore /// certain names, for example) and for performance, since name /// lookup is often a bottleneck in the compilation of C++. Name /// lookup criteria is specified via the LookupCriteria enumeration. /// /// The results of name lookup can vary based on the kind of name /// lookup performed, the current language, and the translation /// unit. In C, for example, name lookup will either return nothing /// (no entity found) or a single declaration. In C++, name lookup /// can additionally refer to a set of overloaded functions or /// result in an ambiguity. All of the possible results of name /// lookup are captured by the LookupResult class, which provides /// the ability to distinguish among them. //@{ /// @brief Describes the kind of name lookup to perform. enum LookupNameKind { /// Ordinary name lookup, which finds ordinary names (functions, /// variables, typedefs, etc.) in C and most kinds of names /// (functions, variables, members, types, etc.) in C++. LookupOrdinaryName = 0, /// Tag name lookup, which finds the names of enums, classes, /// structs, and unions. LookupTagName, /// Label name lookup. LookupLabel, /// Member name lookup, which finds the names of /// class/struct/union members. LookupMemberName, /// Look up of an operator name (e.g., operator+) for use with /// operator overloading. This lookup is similar to ordinary name /// lookup, but will ignore any declarations that are class members. LookupOperatorName, /// Look up of a name that precedes the '::' scope resolution /// operator in C++. This lookup completely ignores operator, object, /// function, and enumerator names (C++ [basic.lookup.qual]p1). LookupNestedNameSpecifierName, /// Look up a namespace name within a C++ using directive or /// namespace alias definition, ignoring non-namespace names (C++ /// [basic.lookup.udir]p1). LookupNamespaceName, /// Look up all declarations in a scope with the given name, /// including resolved using declarations. This is appropriate /// for checking redeclarations for a using declaration. LookupUsingDeclName, /// Look up an ordinary name that is going to be redeclared as a /// name with linkage. This lookup ignores any declarations that /// are outside of the current scope unless they have linkage. See /// C99 6.2.2p4-5 and C++ [basic.link]p6. LookupRedeclarationWithLinkage, /// Look up the name of an Objective-C protocol. LookupObjCProtocolName, /// Look up implicit 'self' parameter of an objective-c method. LookupObjCImplicitSelfParam, /// \brief Look up any declaration with any name. LookupAnyName }; /// \brief Specifies whether (or how) name lookup is being performed for a /// redeclaration (vs. a reference). enum RedeclarationKind { /// \brief The lookup is a reference to this name that is not for the /// purpose of redeclaring the name. NotForRedeclaration = 0, /// \brief The lookup results will be used for redeclaration of a name, /// if an entity by that name already exists. ForRedeclaration }; /// \brief The possible outcomes of name lookup for a literal operator. enum LiteralOperatorLookupResult { /// \brief The lookup resulted in an error. LOLR_Error, /// \brief The lookup found a single 'cooked' literal operator, which /// expects a normal literal to be built and passed to it. LOLR_Cooked, /// \brief The lookup found a single 'raw' literal operator, which expects /// a string literal containing the spelling of the literal token. LOLR_Raw, /// \brief The lookup found an overload set of literal operator templates, /// which expect the characters of the spelling of the literal token to be /// passed as a non-type template argument pack. LOLR_Template }; SpecialMemberOverloadResult *LookupSpecialMember(CXXRecordDecl *D, CXXSpecialMember SM, bool ConstArg, bool VolatileArg, bool RValueThis, bool ConstThis, bool VolatileThis); private: bool CppLookupName(LookupResult &R, Scope *S); // \brief The set of known/encountered (unique, canonicalized) NamespaceDecls. // // The boolean value will be true to indicate that the namespace was loaded // from an AST/PCH file, or false otherwise. llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces; /// \brief Whether we have already loaded known namespaces from an extenal /// source. bool LoadedExternalKnownNamespaces; public: /// \brief Look up a name, looking for a single declaration. Return /// null if the results were absent, ambiguous, or overloaded. /// /// It is preferable to use the elaborated form and explicitly handle /// ambiguity and overloaded. NamedDecl *LookupSingleName(Scope *S, DeclarationName Name, SourceLocation Loc, LookupNameKind NameKind, RedeclarationKind Redecl = NotForRedeclaration); bool LookupName(LookupResult &R, Scope *S, bool AllowBuiltinCreation = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, bool InUnqualifiedLookup = false); bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS, bool AllowBuiltinCreation = false, bool EnteringContext = false); ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc, RedeclarationKind Redecl = NotForRedeclaration); void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S, QualType T1, QualType T2, UnresolvedSetImpl &Functions); LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc, SourceLocation GnuLabelLoc = SourceLocation()); DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class); CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class); CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class); LiteralOperatorLookupResult LookupLiteralOperator(Scope *S, LookupResult &R, ArrayRef<QualType> ArgTys, bool AllowRawAndTemplate); bool isKnownName(StringRef name); void ArgumentDependentLookup(DeclarationName Name, bool Operator, SourceLocation Loc, ArrayRef<Expr *> Args, ADLResult &Functions); void LookupVisibleDecls(Scope *S, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true); void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true); TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, DeclContext *MemberContext = 0, bool EnteringContext = false, const ObjCObjectPointerType *OPT = 0); void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc, ArrayRef<Expr *> Args, AssociatedNamespaceSet &AssociatedNamespaces, AssociatedClassSet &AssociatedClasses); void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S, bool ConsiderLinkage, bool ExplicitInstantiationOrSpecialization); bool DiagnoseAmbiguousLookup(LookupResult &Result); //@} ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id, SourceLocation IdLoc, bool TypoCorrection = false); NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID, Scope *S, bool ForRedeclaration, SourceLocation Loc); NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II, Scope *S); void AddKnownFunctionAttributes(FunctionDecl *FD); // More parsing and symbol table subroutines. void ProcessPragmaWeak(Scope *S, Decl *D); // Decl attributes - this routine is the top level dispatcher. void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD, bool NonInheritable = true, bool Inheritable = true); void ProcessDeclAttributeList(Scope *S, Decl *D, const AttributeList *AL, bool NonInheritable = true, bool Inheritable = true, bool IncludeCXX11Attributes = true); bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl, const AttributeList *AttrList); void checkUnusedDeclAttributes(Declarator &D); bool CheckRegparmAttr(const AttributeList &attr, unsigned &value); bool CheckCallingConvAttr(const AttributeList &attr, CallingConv &CC, const FunctionDecl *FD = 0); bool CheckNoReturnAttr(const AttributeList &attr); void CheckAlignasUnderalignment(Decl *D); /// \brief Stmt attributes - this routine is the top level dispatcher. StmtResult ProcessStmtAttributes(Stmt *Stmt, AttributeList *Attrs, SourceRange Range); void WarnUndefinedMethod(SourceLocation ImpLoc, ObjCMethodDecl *method, bool &IncompleteImpl, unsigned DiagID); void WarnConflictingTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); void CheckConflictingOverridingMethod(ObjCMethodDecl *Method, ObjCMethodDecl *Overridden, bool IsProtocolMethodDecl); /// WarnExactTypedMethods - This routine issues a warning if method /// implementation declaration matches exactly that of its declaration. void WarnExactTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); bool isPropertyReadonly(ObjCPropertyDecl *PropertyDecl, ObjCInterfaceDecl *IDecl); typedef llvm::SmallPtrSet<Selector, 8> SelectorSet; typedef llvm::DenseMap<Selector, ObjCMethodDecl*> ProtocolsMethodsMap; /// CheckProtocolMethodDefs - This routine checks unimplemented /// methods declared in protocol, and those referenced by it. void CheckProtocolMethodDefs(SourceLocation ImpLoc, ObjCProtocolDecl *PDecl, bool& IncompleteImpl, const SelectorSet &InsMap, const SelectorSet &ClsMap, ObjCContainerDecl *CDecl); /// CheckImplementationIvars - This routine checks if the instance variables /// listed in the implelementation match those listed in the interface. void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl, ObjCIvarDecl **Fields, unsigned nIvars, SourceLocation Loc); /// ImplMethodsVsClassMethods - This is main routine to warn if any method /// remains unimplemented in the class or category \@implementation. void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool IncompleteImpl = false); /// DiagnoseUnimplementedProperties - This routine warns on those properties /// which must be implemented by this implementation. void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl *CDecl, const SelectorSet &InsMap); /// DefaultSynthesizeProperties - This routine default synthesizes all /// properties which must be synthesized in the class's \@implementation. void DefaultSynthesizeProperties (Scope *S, ObjCImplDecl* IMPDecl, ObjCInterfaceDecl *IDecl); void DefaultSynthesizeProperties(Scope *S, Decl *D); /// CollectImmediateProperties - This routine collects all properties in /// the class and its conforming protocols; but not those it its super class. void CollectImmediateProperties(ObjCContainerDecl *CDecl, llvm::DenseMap<IdentifierInfo *, ObjCPropertyDecl*>& PropMap, llvm::DenseMap<IdentifierInfo *, ObjCPropertyDecl*>& SuperPropMap); /// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is /// an ivar synthesized for 'Method' and 'Method' is a property accessor /// declared in class 'IFace'. bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace, ObjCMethodDecl *Method, ObjCIvarDecl *IV); /// Called by ActOnProperty to handle \@property declarations in /// class extensions. ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, Selector SetterSel, const bool isAssign, const bool isReadWrite, const unsigned Attributes, const unsigned AttributesAsWritten, bool *isOverridingProperty, TypeSourceInfo *T, tok::ObjCKeywordKind MethodImplKind); /// Called by ActOnProperty and HandlePropertyInClassExtension to /// handle creating the ObjcPropertyDecl for a category or \@interface. ObjCPropertyDecl *CreatePropertyDecl(Scope *S, ObjCContainerDecl *CDecl, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, Selector SetterSel, const bool isAssign, const bool isReadWrite, const unsigned Attributes, const unsigned AttributesAsWritten, TypeSourceInfo *T, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = 0); /// AtomicPropertySetterGetterRules - This routine enforces the rule (via /// warning) when atomic property has one but not the other user-declared /// setter or getter. void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl); void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D); void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID); enum MethodMatchStrategy { MMS_loose, MMS_strict }; /// MatchTwoMethodDeclarations - Checks if two methods' type match and returns /// true, or false, accordingly. bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method, const ObjCMethodDecl *PrevMethod, MethodMatchStrategy strategy = MMS_strict); /// MatchAllMethodDeclarations - Check methods declaraed in interface or /// or protocol against those declared in their implementations. void MatchAllMethodDeclarations(const SelectorSet &InsMap, const SelectorSet &ClsMap, SelectorSet &InsMapSeen, SelectorSet &ClsMapSeen, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool &IncompleteImpl, bool ImmediateClass, bool WarnCategoryMethodImpl=false); /// CheckCategoryVsClassMethodMatches - Checks that methods implemented in /// category matches with those implemented in its primary class and /// warns each time an exact match is found. void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP); /// \brief Add the given method to the list of globally-known methods. void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method); private: /// AddMethodToGlobalPool - Add an instance or factory method to the global /// pool. See descriptoin of AddInstanceMethodToGlobalPool. void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance); /// LookupMethodInGlobalPool - Returns the instance or factory method and /// optionally warns if there are multiple signatures. ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass, bool warn, bool instance); public: /// AddInstanceMethodToGlobalPool - All instance methods in a translation /// unit are added to a global pool. This allows us to efficiently associate /// a selector with a method declaraation for purposes of typechecking /// messages sent to "id" (where the class of the object is unknown). void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/true); } /// AddFactoryMethodToGlobalPool - Same as above, but for factory methods. void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/false); } /// AddAnyMethodToGlobalPool - Add any method, instance or factory to global /// pool. void AddAnyMethodToGlobalPool(Decl *D); /// LookupInstanceMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false, bool warn=true) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, warn, /*instance*/true); } /// LookupFactoryMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false, bool warn=true) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, warn, /*instance*/false); } /// LookupImplementedMethodInGlobalPool - Returns the method which has an /// implementation. ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel); /// CollectIvarsToConstructOrDestruct - Collect those ivars which require /// initialization. void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI, SmallVectorImpl<ObjCIvarDecl*> &Ivars); //===--------------------------------------------------------------------===// // Statement Parsing Callbacks: SemaStmt.cpp. public: class FullExprArg { public: FullExprArg(Sema &actions) : E(0) { } // FIXME: The const_cast here is ugly. RValue references would make this // much nicer (or we could duplicate a bunch of the move semantics // emulation code from Ownership.h). FullExprArg(const FullExprArg& Other) : E(Other.E) {} ExprResult release() { return E; } Expr *get() const { return E; } Expr *operator->() { return E; } private: // FIXME: No need to make the entire Sema class a friend when it's just // Sema::MakeFullExpr that needs access to the constructor below. friend class Sema; explicit FullExprArg(Expr *expr) : E(expr) {} Expr *E; }; FullExprArg MakeFullExpr(Expr *Arg) { return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation()); } FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) { return FullExprArg(ActOnFinishFullExpr(Arg, CC).release()); } FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) { ExprResult FE = ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(), /*DiscardedValue*/ true); return FullExprArg(FE.release()); } StmtResult ActOnExprStmt(ExprResult Arg); StmtResult ActOnExprStmtError(); StmtResult ActOnNullStmt(SourceLocation SemiLoc, bool HasLeadingEmptyMacro = false); void ActOnStartOfCompoundStmt(); void ActOnFinishOfCompoundStmt(); StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R, MultiStmtArg Elts, bool isStmtExpr); /// \brief A RAII object to enter scope of a compound statement. class CompoundScopeRAII { public: CompoundScopeRAII(Sema &S): S(S) { S.ActOnStartOfCompoundStmt(); } ~CompoundScopeRAII() { S.ActOnFinishOfCompoundStmt(); } private: Sema &S; }; StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl, SourceLocation StartLoc, SourceLocation EndLoc); void ActOnForEachDeclStmt(DeclGroupPtrTy Decl); StmtResult ActOnForEachLValueExpr(Expr *E); StmtResult ActOnCaseStmt(SourceLocation CaseLoc, Expr *LHSVal, SourceLocation DotDotDotLoc, Expr *RHSVal, SourceLocation ColonLoc); void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt); StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc, SourceLocation ColonLoc, Stmt *SubStmt, Scope *CurScope); StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl, SourceLocation ColonLoc, Stmt *SubStmt); StmtResult ActOnAttributedStmt(SourceLocation AttrLoc, ArrayRef<const Attr*> Attrs, Stmt *SubStmt); StmtResult ActOnIfStmt(SourceLocation IfLoc, FullExprArg CondVal, Decl *CondVar, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc, Expr *Cond, Decl *CondVar); StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch, Stmt *Body); StmtResult ActOnWhileStmt(SourceLocation WhileLoc, FullExprArg Cond, Decl *CondVar, Stmt *Body); StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body, SourceLocation WhileLoc, SourceLocation CondLParen, Expr *Cond, SourceLocation CondRParen); StmtResult ActOnForStmt(SourceLocation ForLoc, SourceLocation LParenLoc, Stmt *First, FullExprArg Second, Decl *SecondVar, FullExprArg Third, SourceLocation RParenLoc, Stmt *Body); ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc, Expr *collection); StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc, Stmt *First, Expr *collection, SourceLocation RParenLoc); StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body); enum BuildForRangeKind { /// Initial building of a for-range statement. BFRK_Build, /// Instantiation or recovery rebuild of a for-range statement. Don't /// attempt any typo-correction. BFRK_Rebuild, /// Determining whether a for-range statement could be built. Avoid any /// unnecessary or irreversible actions. BFRK_Check }; StmtResult ActOnCXXForRangeStmt(SourceLocation ForLoc, Stmt *LoopVar, SourceLocation ColonLoc, Expr *Collection, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc, SourceLocation ColonLoc, Stmt *RangeDecl, Stmt *BeginEndDecl, Expr *Cond, Expr *Inc, Stmt *LoopVarDecl, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body); StmtResult ActOnGotoStmt(SourceLocation GotoLoc, SourceLocation LabelLoc, LabelDecl *TheDecl); StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc, SourceLocation StarLoc, Expr *DestExp); StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope); StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope); void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, sema::CapturedRegionScopeInfo::CapturedRegionKind Kind); StmtResult ActOnCapturedRegionEnd(Stmt *S); void ActOnCapturedRegionError(bool IsInstantiation = false); RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD, SourceLocation Loc); const VarDecl *getCopyElisionCandidate(QualType ReturnType, Expr *E, bool AllowFunctionParameters); StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple, bool IsVolatile, unsigned NumOutputs, unsigned NumInputs, IdentifierInfo **Names, MultiExprArg Constraints, MultiExprArg Exprs, Expr *AsmString, MultiExprArg Clobbers, SourceLocation RParenLoc); NamedDecl *LookupInlineAsmIdentifier(StringRef &LineBuf, SourceLocation Loc, InlineAsmIdentifierInfo &Info); bool LookupInlineAsmField(StringRef Base, StringRef Member, unsigned &Offset, SourceLocation AsmLoc); StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc, ArrayRef<Token> AsmToks, SourceLocation EndLoc); VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id, bool Invalid = false); Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D); StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen, Decl *Parm, Stmt *Body); StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body); StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try, MultiStmtArg Catch, Stmt *Finally); StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw); StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw, Scope *CurScope); ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc, Expr *operand); StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc, Expr *SynchExpr, Stmt *SynchBody); StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body); VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id); Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D); StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc, Decl *ExDecl, Stmt *HandlerBlock); StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock, MultiStmtArg Handlers); StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ? SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); StmtResult ActOnSEHExceptBlock(SourceLocation Loc, Expr *FilterExpr, Stmt *Block); StmtResult ActOnSEHFinallyBlock(SourceLocation Loc, Stmt *Block); void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock); bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const; /// \brief If it's a file scoped decl that must warn if not used, keep track /// of it. void MarkUnusedFileScopedDecl(const DeclaratorDecl *D); /// DiagnoseUnusedExprResult - If the statement passed in is an expression /// whose result is unused, warn. void DiagnoseUnusedExprResult(const Stmt *S); void DiagnoseUnusedDecl(const NamedDecl *ND); /// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null /// statement as a \p Body, and it is located on the same line. /// /// This helps prevent bugs due to typos, such as: /// if (condition); /// do_stuff(); void DiagnoseEmptyStmtBody(SourceLocation StmtLoc, const Stmt *Body, unsigned DiagID); /// Warn if a for/while loop statement \p S, which is followed by /// \p PossibleBody, has a suspicious null statement as a body. void DiagnoseEmptyLoopBody(const Stmt *S, const Stmt *PossibleBody); ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) { return DelayedDiagnostics.push(pool); } void PopParsingDeclaration(ParsingDeclState state, Decl *decl); typedef ProcessingContextState ParsingClassState; ParsingClassState PushParsingClass() { return DelayedDiagnostics.pushUndelayed(); } void PopParsingClass(ParsingClassState state) { DelayedDiagnostics.popUndelayed(state); } void redelayDiagnostics(sema::DelayedDiagnosticPool &pool); void EmitDeprecationWarning(NamedDecl *D, StringRef Message, SourceLocation Loc, const ObjCInterfaceDecl *UnknownObjCClass, const ObjCPropertyDecl *ObjCProperty); void HandleDelayedDeprecationCheck(sema::DelayedDiagnostic &DD, Decl *Ctx); bool makeUnavailableInSystemHeader(SourceLocation loc, StringRef message); //===--------------------------------------------------------------------===// // Expression Parsing Callbacks: SemaExpr.cpp. bool CanUseDecl(NamedDecl *D); bool DiagnoseUseOfDecl(NamedDecl *D, SourceLocation Loc, const ObjCInterfaceDecl *UnknownObjCClass=0); void NoteDeletedFunction(FunctionDecl *FD); std::string getDeletedOrUnavailableSuffix(const FunctionDecl *FD); bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD, ObjCMethodDecl *Getter, SourceLocation Loc); void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc, Expr **Args, unsigned NumArgs); void PushExpressionEvaluationContext(ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = 0, bool IsDecltype = false); enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl }; void PushExpressionEvaluationContext(ExpressionEvaluationContext NewContext, ReuseLambdaContextDecl_t, bool IsDecltype = false); void PopExpressionEvaluationContext(); void DiscardCleanupsInEvaluationContext(); ExprResult TransformToPotentiallyEvaluated(Expr *E); ExprResult HandleExprEvaluationContextForTypeof(Expr *E); ExprResult ActOnConstantExpression(ExprResult Res); // Functions for marking a declaration referenced. These functions also // contain the relevant logic for marking if a reference to a function or // variable is an odr-use (in the C++11 sense). There are separate variants // for expressions referring to a decl; these exist because odr-use marking // needs to be delayed for some constant variables when we build one of the // named expressions. void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool OdrUse); void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func); void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var); void MarkDeclRefReferenced(DeclRefExpr *E); void MarkMemberReferenced(MemberExpr *E); void UpdateMarkingForLValueToRValue(Expr *E); void CleanupVarDeclMarking(); enum TryCaptureKind { TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef }; /// \brief Try to capture the given variable. /// /// \param Var The variable to capture. /// /// \param Loc The location at which the capture occurs. /// /// \param Kind The kind of capture, which may be implicit (for either a /// block or a lambda), or explicit by-value or by-reference (for a lambda). /// /// \param EllipsisLoc The location of the ellipsis, if one is provided in /// an explicit lambda capture. /// /// \param BuildAndDiagnose Whether we are actually supposed to add the /// captures or diagnose errors. If false, this routine merely check whether /// the capture can occur without performing the capture itself or complaining /// if the variable cannot be captured. /// /// \param CaptureType Will be set to the type of the field used to capture /// this variable in the innermost block or lambda. Only valid when the /// variable can be captured. /// /// \param DeclRefType Will be set to the type of a reference to the capture /// from within the current scope. Only valid when the variable can be /// captured. /// /// \returns true if an error occurred (i.e., the variable cannot be /// captured) and false if the capture succeeded. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind, SourceLocation EllipsisLoc, bool BuildAndDiagnose, QualType &CaptureType, QualType &DeclRefType); /// \brief Try to capture the given variable. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind = TryCapture_Implicit, SourceLocation EllipsisLoc = SourceLocation()); /// \brief Given a variable, determine the type that a reference to that /// variable will have in the given scope. QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc); void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T); void MarkDeclarationsReferencedInExpr(Expr *E, bool SkipLocalVariables = false); /// \brief Try to recover by turning the given expression into a /// call. Returns true if recovery was attempted or an error was /// emitted; this may also leave the ExprResult invalid. bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD, bool ForceComplain = false, bool (*IsPlausibleResult)(QualType) = 0); /// \brief Figure out if an expression could be turned into a call. bool isExprCallable(const Expr &E, QualType &ZeroArgCallReturnTy, UnresolvedSetImpl &NonTemplateOverloads); /// \brief Conditionally issue a diagnostic based on the current /// evaluation context. /// /// \param Statement If Statement is non-null, delay reporting the /// diagnostic until the function body is parsed, and then do a basic /// reachability analysis to determine if the statement is reachable. /// If it is unreachable, the diagnostic will not be emitted. bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement, const PartialDiagnostic &PD); // Primary Expressions. SourceRange getExprRange(Expr *E) const; ExprResult ActOnIdExpression(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand, CorrectionCandidateCallback *CCC = 0); void DecomposeUnqualifiedId(const UnqualifiedId &Id, TemplateArgumentListInfo &Buffer, DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *&TemplateArgs); bool DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R, CorrectionCandidateCallback &CCC, TemplateArgumentListInfo *ExplicitTemplateArgs = 0, ArrayRef<Expr *> Args = ArrayRef<Expr *>()); ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S, IdentifierInfo *II, bool AllowBuiltinCreation=false); ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, bool isAddressOfOperand, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, SourceLocation Loc, const CXXScopeSpec *SS = 0); ExprResult BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, const DeclarationNameInfo &NameInfo, const CXXScopeSpec *SS = 0, NamedDecl *FoundD = 0); ExprResult BuildAnonymousStructUnionMemberReference(const CXXScopeSpec &SS, SourceLocation nameLoc, IndirectFieldDecl *indirectField, Expr *baseObjectExpr = 0, SourceLocation opLoc = SourceLocation()); ExprResult BuildPossibleImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, bool IsDefiniteInstance); bool UseArgumentDependentLookup(const CXXScopeSpec &SS, const LookupResult &R, bool HasTrailingLParen); ExprResult BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, bool IsAddressOfOperand); ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS, LookupResult &R, bool NeedsADL); ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D, NamedDecl *FoundD = 0); ExprResult BuildLiteralOperatorCall(LookupResult &R, DeclarationNameInfo &SuffixInfo, ArrayRef<Expr*> Args, SourceLocation LitEndLoc, TemplateArgumentListInfo *ExplicitTemplateArgs = 0); ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind); ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val); ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = 0); ExprResult ActOnCharacterConstant(const Token &Tok, Scope *UDLScope = 0); ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E); ExprResult ActOnParenListExpr(SourceLocation L, SourceLocation R, MultiExprArg Val); /// ActOnStringLiteral - The specified tokens were lexed as pasted string /// fragments (e.g. "foo" "bar" L"baz"). ExprResult ActOnStringLiteral(const Token *StringToks, unsigned NumStringToks, Scope *UDLScope = 0); ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, MultiTypeArg ArgTypes, MultiExprArg ArgExprs); ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, TypeSourceInfo **Types, Expr **Exprs, unsigned NumAssocs); // Binary/Unary Operators. 'Tok' is the token for the operator. ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *InputExpr); ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *Input); ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Op, Expr *Input); ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, SourceRange R); ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, bool IsType, void *TyOrEx, const SourceRange &ArgRange); ExprResult CheckPlaceholderExpr(Expr *E); bool CheckVecStepExpr(Expr *E); bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind); bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc, SourceRange ExprRange, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnSizeofParameterPackExpr(Scope *S, SourceLocation OpLoc, IdentifierInfo &Name, SourceLocation NameLoc, SourceLocation RParenLoc); ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Kind, Expr *Input); ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); // This struct is for use by ActOnMemberAccess to allow // BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after // changing the access operator from a '.' to a '->' (to see if that is the // change needed to fix an error about an unknown member, e.g. when the class // defines a custom operator->). struct ActOnMemberAccessExtraArgs { Scope *S; UnqualifiedId &Id; Decl *ObjCImpDecl; bool HasTrailingLParen; }; ExprResult BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, bool SuppressQualifierCheck = false, ActOnMemberAccessExtraArgs *ExtraArgs = 0); ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow); ExprResult LookupMemberExpr(LookupResult &R, ExprResult &Base, bool &IsArrow, SourceLocation OpLoc, CXXScopeSpec &SS, Decl *ObjCImpDecl, bool HasTemplateArgs); bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType, const CXXScopeSpec &SS, const LookupResult &R); ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Member, Decl *ObjCImpDecl, bool HasTrailingLParen); void ActOnDefaultCtorInitializers(Decl *CDtorDecl); bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn, FunctionDecl *FDecl, const FunctionProtoType *Proto, Expr **Args, unsigned NumArgs, SourceLocation RParenLoc, bool ExecConfig = false); void CheckStaticArrayArgument(SourceLocation CallLoc, ParmVarDecl *Param, const Expr *ArgExpr); /// ActOnCallExpr - Handle a call to Fn with the specified array of arguments. /// This provides the location of the left/right parens and a list of comma /// locations. ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig = 0, bool IsExecConfig = false); ExprResult BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl, SourceLocation LParenLoc, Expr **Args, unsigned NumArgs, SourceLocation RParenLoc, Expr *Config = 0, bool IsExecConfig = false); ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc, MultiExprArg ExecConfig, SourceLocation GGGLoc); ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc, Declarator &D, ParsedType &Ty, SourceLocation RParenLoc, Expr *CastExpr); ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc, TypeSourceInfo *Ty, SourceLocation RParenLoc, Expr *Op); CastKind PrepareScalarCast(ExprResult &src, QualType destType); /// \brief Build an altivec or OpenCL literal. ExprResult BuildVectorLiteral(SourceLocation LParenLoc, SourceLocation RParenLoc, Expr *E, TypeSourceInfo *TInfo); ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME); ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc, Expr *InitExpr); ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc, TypeSourceInfo *TInfo, SourceLocation RParenLoc, Expr *LiteralExpr); ExprResult ActOnInitList(SourceLocation LBraceLoc, MultiExprArg InitArgList, SourceLocation RBraceLoc); ExprResult ActOnDesignatedInitializer(Designation &Desig, SourceLocation Loc, bool GNUSyntax, ExprResult Init); ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc, tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr); ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); /// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null /// in the case of a the GNU conditional expr extension. ExprResult ActOnConditionalOp(SourceLocation QuestionLoc, SourceLocation ColonLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr); /// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo". ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc, LabelDecl *TheDecl); void ActOnStartStmtExpr(); ExprResult ActOnStmtExpr(SourceLocation LPLoc, Stmt *SubStmt, SourceLocation RPLoc); // "({..})" void ActOnStmtExprError(); // __builtin_offsetof(type, identifier(.identifier|[expr])*) struct OffsetOfComponent { SourceLocation LocStart, LocEnd; bool isBrackets; // true if [expr], false if .ident union { IdentifierInfo *IdentInfo; Expr *E; } U; }; /// __builtin_offsetof(type, a.b[123][456].c) ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc, TypeSourceInfo *TInfo, OffsetOfComponent *CompPtr, unsigned NumComponents, SourceLocation RParenLoc); ExprResult ActOnBuiltinOffsetOf(Scope *S, SourceLocation BuiltinLoc, SourceLocation TypeLoc, ParsedType ParsedArgTy, OffsetOfComponent *CompPtr, unsigned NumComponents, SourceLocation RParenLoc); // __builtin_choose_expr(constExpr, expr1, expr2) ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr, SourceLocation RPLoc); // __builtin_va_arg(expr, type) ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty, SourceLocation RPLoc); ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E, TypeSourceInfo *TInfo, SourceLocation RPLoc); // __null ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc); bool CheckCaseExpression(Expr *E); /// \brief Describes the result of an "if-exists" condition check. enum IfExistsResult { /// \brief The symbol exists. IER_Exists, /// \brief The symbol does not exist. IER_DoesNotExist, /// \brief The name is a dependent name, so the results will differ /// from one instantiation to the next. IER_Dependent, /// \brief An error occurred. IER_Error }; IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS, const DeclarationNameInfo &TargetNameInfo); IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name); StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, NestedNameSpecifierLoc QualifierLoc, DeclarationNameInfo NameInfo, Stmt *Nested); StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name, Stmt *Nested); //===------------------------- "Block" Extension ------------------------===// /// ActOnBlockStart - This callback is invoked when a block literal is /// started. void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockArguments - This callback allows processing of block arguments. /// If there are no arguments, this is still invoked. void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo, Scope *CurScope); /// ActOnBlockError - If there is an error parsing a block, this callback /// is invoked to pop the information about the block from the action impl. void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockStmtExpr - This is called when the body of a block statement /// literal was successfully completed. ^(int x){...} ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body, Scope *CurScope); //===---------------------------- OpenCL Features -----------------------===// /// __builtin_astype(...) ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- C++ Features --------------------------===// // Act on C++ namespaces Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc, SourceLocation NamespaceLoc, SourceLocation IdentLoc, IdentifierInfo *Ident, SourceLocation LBrace, AttributeList *AttrList); void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace); NamespaceDecl *getStdNamespace() const; NamespaceDecl *getOrCreateStdNamespace(); CXXRecordDecl *getStdBadAlloc() const; /// \brief Tests whether Ty is an instance of std::initializer_list and, if /// it is and Element is not NULL, assigns the element type to Element. bool isStdInitializerList(QualType Ty, QualType *Element); /// \brief Looks for the std::initializer_list template and instantiates it /// with Element, or emits an error if it's not found. /// /// \returns The instantiated template, or null on error. QualType BuildStdInitializerList(QualType Element, SourceLocation Loc); /// \brief Determine whether Ctor is an initializer-list constructor, as /// defined in [dcl.init.list]p2. bool isInitListConstructor(const CXXConstructorDecl *Ctor); Decl *ActOnUsingDirective(Scope *CurScope, SourceLocation UsingLoc, SourceLocation NamespcLoc, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *NamespcName, AttributeList *AttrList); void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir); Decl *ActOnNamespaceAliasDef(Scope *CurScope, SourceLocation NamespaceLoc, SourceLocation AliasLoc, IdentifierInfo *Alias, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *Ident); void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow); bool CheckUsingShadowDecl(UsingDecl *UD, NamedDecl *Target, const LookupResult &PreviousDecls); UsingShadowDecl *BuildUsingShadowDecl(Scope *S, UsingDecl *UD, NamedDecl *Target); bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc, bool isTypeName, const CXXScopeSpec &SS, SourceLocation NameLoc, const LookupResult &Previous); bool CheckUsingDeclQualifier(SourceLocation UsingLoc, const CXXScopeSpec &SS, SourceLocation NameLoc); NamedDecl *BuildUsingDeclaration(Scope *S, AccessSpecifier AS, SourceLocation UsingLoc, CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, AttributeList *AttrList, bool IsInstantiation, bool IsTypeName, SourceLocation TypenameLoc); bool CheckInheritingConstructorUsingDecl(UsingDecl *UD); Decl *ActOnUsingDeclaration(Scope *CurScope, AccessSpecifier AS, bool HasUsingKeyword, SourceLocation UsingLoc, CXXScopeSpec &SS, UnqualifiedId &Name, AttributeList *AttrList, bool IsTypeName, SourceLocation TypenameLoc); Decl *ActOnAliasDeclaration(Scope *CurScope, AccessSpecifier AS, MultiTemplateParamsArg TemplateParams, SourceLocation UsingLoc, UnqualifiedId &Name, AttributeList *AttrList, TypeResult Type); /// BuildCXXConstructExpr - Creates a complete call to a constructor, /// including handling of its default argument expressions. /// /// \param ConstructKind - a CXXConstructExpr::ConstructionKind ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, CXXConstructorDecl *Constructor, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); // FIXME: Can re remove this and have the above BuildCXXConstructExpr check if // the constructor can be elidable? ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); /// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating /// the default expr if needed. ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// FinalizeVarWithDestructor - Prepare for calling destructor on the /// constructed variable. void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType); /// \brief Helper class that collects exception specifications for /// implicitly-declared special member functions. class ImplicitExceptionSpecification { // Pointer to allow copying Sema *Self; // We order exception specifications thus: // noexcept is the most restrictive, but is only used in C++11. // throw() comes next. // Then a throw(collected exceptions) // Finally no specification, which is expressed as noexcept(false). // throw(...) is used instead if any called function uses it. ExceptionSpecificationType ComputedEST; llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen; SmallVector<QualType, 4> Exceptions; void ClearExceptions() { ExceptionsSeen.clear(); Exceptions.clear(); } public: explicit ImplicitExceptionSpecification(Sema &Self) : Self(&Self), ComputedEST(EST_BasicNoexcept) { if (!Self.getLangOpts().CPlusPlus11) ComputedEST = EST_DynamicNone; } /// \brief Get the computed exception specification type. ExceptionSpecificationType getExceptionSpecType() const { assert(ComputedEST != EST_ComputedNoexcept && "noexcept(expr) should not be a possible result"); return ComputedEST; } /// \brief The number of exceptions in the exception specification. unsigned size() const { return Exceptions.size(); } /// \brief The set of exceptions in the exception specification. const QualType *data() const { return Exceptions.data(); } /// \brief Integrate another called method into the collected data. void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method); /// \brief Integrate an invoked expression into the collected data. void CalledExpr(Expr *E); /// \brief Overwrite an EPI's exception specification with this /// computed exception specification. void getEPI(FunctionProtoType::ExtProtoInfo &EPI) const { EPI.ExceptionSpecType = getExceptionSpecType(); if (EPI.ExceptionSpecType == EST_Dynamic) { EPI.NumExceptions = size(); EPI.Exceptions = data(); } else if (EPI.ExceptionSpecType == EST_None) { /// C++11 [except.spec]p14: /// The exception-specification is noexcept(false) if the set of /// potential exceptions of the special member function contains "any" EPI.ExceptionSpecType = EST_ComputedNoexcept; EPI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(), tok::kw_false).take(); } } FunctionProtoType::ExtProtoInfo getEPI() const { FunctionProtoType::ExtProtoInfo EPI; getEPI(EPI); return EPI; } }; /// \brief Determine what sort of exception specification a defaulted /// copy constructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedDefaultCtorExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD); /// \brief Determine what sort of exception specification a defaulted /// default constructor of a class will have, and whether the parameter /// will be const. ImplicitExceptionSpecification ComputeDefaultedCopyCtorExceptionSpec(CXXMethodDecl *MD); /// \brief Determine what sort of exception specification a defautled /// copy assignment operator of a class will have, and whether the /// parameter will be const. ImplicitExceptionSpecification ComputeDefaultedCopyAssignmentExceptionSpec(CXXMethodDecl *MD); /// \brief Determine what sort of exception specification a defaulted move /// constructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedMoveCtorExceptionSpec(CXXMethodDecl *MD); /// \brief Determine what sort of exception specification a defaulted move /// assignment operator of a class will have. ImplicitExceptionSpecification ComputeDefaultedMoveAssignmentExceptionSpec(CXXMethodDecl *MD); /// \brief Determine what sort of exception specification a defaulted /// destructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedDtorExceptionSpec(CXXMethodDecl *MD); /// \brief Determine what sort of exception specification an inheriting /// constructor of a class will have. ImplicitExceptionSpecification ComputeInheritingCtorExceptionSpec(CXXConstructorDecl *CD); /// \brief Evaluate the implicit exception specification for a defaulted /// special member function. void EvaluateImplicitExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD); /// \brief Check the given exception-specification and update the /// extended prototype information with the results. void checkExceptionSpecification(ExceptionSpecificationType EST, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr, SmallVectorImpl<QualType> &Exceptions, FunctionProtoType::ExtProtoInfo &EPI); /// \brief Determine if a special member function should have a deleted /// definition when it is defaulted. bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM, bool Diagnose = false); /// \brief Declare the implicit default constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// default constructor will be added. /// /// \returns The implicitly-declared default constructor. CXXConstructorDecl *DeclareImplicitDefaultConstructor( CXXRecordDecl *ClassDecl); /// DefineImplicitDefaultConstructor - Checks for feasibility of /// defining this constructor as the default constructor. void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// \brief Declare the implicit destructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// destructor will be added. /// /// \returns The implicitly-declared destructor. CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl); /// DefineImplicitDestructor - Checks for feasibility of /// defining this destructor as the default destructor. void DefineImplicitDestructor(SourceLocation CurrentLocation, CXXDestructorDecl *Destructor); /// \brief Build an exception spec for destructors that don't have one. /// /// C++11 says that user-defined destructors with no exception spec get one /// that looks as if the destructor was implicitly declared. void AdjustDestructorExceptionSpec(CXXRecordDecl *ClassDecl, CXXDestructorDecl *Destructor); /// \brief Declare all inheriting constructors for the given class. /// /// \param ClassDecl The class declaration into which the inheriting /// constructors will be added. void DeclareInheritingConstructors(CXXRecordDecl *ClassDecl); /// \brief Define the specified inheriting constructor. void DefineInheritingConstructor(SourceLocation UseLoc, CXXConstructorDecl *Constructor); /// \brief Declare the implicit copy constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy constructor will be added. /// /// \returns The implicitly-declared copy constructor. CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitCopyConstructor - Checks for feasibility of /// defining this constructor as the copy constructor. void DefineImplicitCopyConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// \brief Declare the implicit move constructor for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move constructor will be added. /// /// \returns The implicitly-declared move constructor, or NULL if it wasn't /// declared. CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitMoveConstructor - Checks for feasibility of /// defining this constructor as the move constructor. void DefineImplicitMoveConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// \brief Declare the implicit copy assignment operator for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy assignment operator will be added. /// /// \returns The implicitly-declared copy assignment operator. CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl); /// \brief Defines an implicitly-declared copy assignment operator. void DefineImplicitCopyAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// \brief Declare the implicit move assignment operator for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move assignment operator will be added. /// /// \returns The implicitly-declared move assignment operator, or NULL if it /// wasn't declared. CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl); /// \brief Defines an implicitly-declared move assignment operator. void DefineImplicitMoveAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// \brief Force the declaration of any implicitly-declared members of this /// class. void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class); /// \brief Determine whether the given function is an implicitly-deleted /// special member function. bool isImplicitlyDeleted(FunctionDecl *FD); /// \brief Check whether 'this' shows up in the type of a static member /// function after the (naturally empty) cv-qualifier-seq would be. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method); /// \brief Whether this' shows up in the exception specification of a static /// member function. bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method); /// \brief Check whether 'this' shows up in the attributes of the given /// static member function. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method); /// MaybeBindToTemporary - If the passed in expression has a record type with /// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise /// it simply returns the passed in expression. ExprResult MaybeBindToTemporary(Expr *E); bool CompleteConstructorCall(CXXConstructorDecl *Constructor, MultiExprArg ArgsPtr, SourceLocation Loc, SmallVectorImpl<Expr*> &ConvertedArgs, bool AllowExplicit = false, bool IsListInitialization = false); ParsedType getInheritingConstructorName(CXXScopeSpec &SS, SourceLocation NameLoc, IdentifierInfo &Name); ParsedType getDestructorName(SourceLocation TildeLoc, IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, ParsedType ObjectType, bool EnteringContext); ParsedType getDestructorType(const DeclSpec& DS, ParsedType ObjectType); // Checks that reinterpret casts don't have undefined behavior. void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType, bool IsDereference, SourceRange Range); /// ActOnCXXNamedCast - Parse {dynamic,static,reinterpret,const}_cast's. ExprResult ActOnCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, SourceLocation LAngleBracketLoc, Declarator &D, SourceLocation RAngleBracketLoc, SourceLocation LParenLoc, Expr *E, SourceLocation RParenLoc); ExprResult BuildCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, TypeSourceInfo *Ty, Expr *E, SourceRange AngleBrackets, SourceRange Parens); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXTypeid - Parse typeid( something ). ExprResult ActOnCXXTypeid(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXUuidof - Parse __uuidof( something ). ExprResult ActOnCXXUuidof(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); //// ActOnCXXThis - Parse 'this' pointer. ExprResult ActOnCXXThis(SourceLocation loc); /// \brief Try to retrieve the type of the 'this' pointer. /// /// \returns The type of 'this', if possible. Otherwise, returns a NULL type. QualType getCurrentThisType(); /// \brief When non-NULL, the C++ 'this' expression is allowed despite the /// current context not being a non-static member function. In such cases, /// this provides the type used for 'this'. QualType CXXThisTypeOverride; /// \brief RAII object used to temporarily allow the C++ 'this' expression /// to be used, with the given qualifiers on the current class type. class CXXThisScopeRAII { Sema &S; QualType OldCXXThisTypeOverride; bool Enabled; public: /// \brief Introduce a new scope where 'this' may be allowed (when enabled), /// using the given declaration (which is either a class template or a /// class) along with the given qualifiers. /// along with the qualifiers placed on '*this'. CXXThisScopeRAII(Sema &S, Decl *ContextDecl, unsigned CXXThisTypeQuals, bool Enabled = true); ~CXXThisScopeRAII(); }; /// \brief Make sure the value of 'this' is actually available in the current /// context, if it is a potentially evaluated context. /// /// \param Loc The location at which the capture of 'this' occurs. /// /// \param Explicit Whether 'this' is explicitly captured in a lambda /// capture list. void CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false); /// \brief Determine whether the given type is the type of *this that is used /// outside of the body of a member function for a type that is currently /// being defined. bool isThisOutsideMemberFunctionBody(QualType BaseType); /// ActOnCXXBoolLiteral - Parse {true,false} literals. ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); /// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals. ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); /// ActOnCXXNullPtrLiteral - Parse 'nullptr'. ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc); //// ActOnCXXThrow - Parse throw expressions. ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr); ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex, bool IsThrownVarInScope); ExprResult CheckCXXThrowOperand(SourceLocation ThrowLoc, Expr *E, bool IsThrownVarInScope); /// ActOnCXXTypeConstructExpr - Parse construction of a specified type. /// Can be interpreted either as function-style casting ("int(x)") /// or class type construction ("ClassType(x,y,z)") /// or creation of a value-initialized type ("int()"). ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep, SourceLocation LParenLoc, MultiExprArg Exprs, SourceLocation RParenLoc); ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type, SourceLocation LParenLoc, MultiExprArg Exprs, SourceLocation RParenLoc); /// ActOnCXXNew - Parsed a C++ 'new' expression. ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, Declarator &D, Expr *Initializer); ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, QualType AllocType, TypeSourceInfo *AllocTypeInfo, Expr *ArraySize, SourceRange DirectInitRange, Expr *Initializer, bool TypeMayContainAuto = true); bool CheckAllocatedType(QualType AllocType, SourceLocation Loc, SourceRange R); bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range, bool UseGlobal, QualType AllocType, bool IsArray, Expr **PlaceArgs, unsigned NumPlaceArgs, FunctionDecl *&OperatorNew, FunctionDecl *&OperatorDelete); bool FindAllocationOverload(SourceLocation StartLoc, SourceRange Range, DeclarationName Name, Expr** Args, unsigned NumArgs, DeclContext *Ctx, bool AllowMissing, FunctionDecl *&Operator, bool Diagnose = true); void DeclareGlobalNewDelete(); void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return, QualType Argument, bool addMallocAttr = false); bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD, DeclarationName Name, FunctionDecl* &Operator, bool Diagnose = true); /// ActOnCXXDelete - Parsed a C++ 'delete' expression ExprResult ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal, bool ArrayForm, Expr *Operand); DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D); ExprResult CheckConditionVariable(VarDecl *ConditionVar, SourceLocation StmtLoc, bool ConvertToBoolean); ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen, Expr *Operand, SourceLocation RParen); ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand, SourceLocation RParen); /// ActOnUnaryTypeTrait - Parsed one of the unary type trait support /// pseudo-functions. ExprResult ActOnUnaryTypeTrait(UnaryTypeTrait OTT, SourceLocation KWLoc, ParsedType Ty, SourceLocation RParen); ExprResult BuildUnaryTypeTrait(UnaryTypeTrait OTT, SourceLocation KWLoc, TypeSourceInfo *T, SourceLocation RParen); /// ActOnBinaryTypeTrait - Parsed one of the bianry type trait support /// pseudo-functions. ExprResult ActOnBinaryTypeTrait(BinaryTypeTrait OTT, SourceLocation KWLoc, ParsedType LhsTy, ParsedType RhsTy, SourceLocation RParen); ExprResult BuildBinaryTypeTrait(BinaryTypeTrait BTT, SourceLocation KWLoc, TypeSourceInfo *LhsT, TypeSourceInfo *RhsT, SourceLocation RParen); /// \brief Parsed one of the type trait support pseudo-functions. ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<ParsedType> Args, SourceLocation RParenLoc); ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<TypeSourceInfo *> Args, SourceLocation RParenLoc); /// ActOnArrayTypeTrait - Parsed one of the bianry type trait support /// pseudo-functions. ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, ParsedType LhsTy, Expr *DimExpr, SourceLocation RParen); ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, TypeSourceInfo *TSInfo, Expr *DimExpr, SourceLocation RParen); /// ActOnExpressionTrait - Parsed one of the unary type trait support /// pseudo-functions. ExprResult ActOnExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult BuildExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult ActOnStartCXXMemberReference(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, ParsedType &ObjectType, bool &MayBePseudoDestructor); ExprResult DiagnoseDtorReference(SourceLocation NameLoc, Expr *MemExpr); ExprResult BuildPseudoDestructorExpr(Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, const CXXScopeSpec &SS, TypeSourceInfo *ScopeType, SourceLocation CCLoc, SourceLocation TildeLoc, PseudoDestructorTypeStorage DestroyedType, bool HasTrailingLParen); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, UnqualifiedId &FirstTypeName, SourceLocation CCLoc, SourceLocation TildeLoc, UnqualifiedId &SecondTypeName, bool HasTrailingLParen); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, SourceLocation TildeLoc, const DeclSpec& DS, bool HasTrailingLParen); /// MaybeCreateExprWithCleanups - If the current full-expression /// requires any cleanups, surround it with a ExprWithCleanups node. /// Otherwise, just returns the passed-in expression. Expr *MaybeCreateExprWithCleanups(Expr *SubExpr); Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt); ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr); ExprResult ActOnFinishFullExpr(Expr *Expr) { return ActOnFinishFullExpr(Expr, Expr ? Expr->getExprLoc() : SourceLocation()); } ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC, bool DiscardedValue = false, bool IsConstexpr = false); StmtResult ActOnFinishFullStmt(Stmt *Stmt); // Marks SS invalid if it represents an incomplete type. bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC); DeclContext *computeDeclContext(QualType T); DeclContext *computeDeclContext(const CXXScopeSpec &SS, bool EnteringContext = false); bool isDependentScopeSpecifier(const CXXScopeSpec &SS); CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS); bool isUnknownSpecialization(const CXXScopeSpec &SS); /// \brief The parser has parsed a global nested-name-specifier '::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param CCLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXGlobalScopeSpecifier(Scope *S, SourceLocation CCLoc, CXXScopeSpec &SS); bool isAcceptableNestedNameSpecifier(const NamedDecl *SD); NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS); bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, SourceLocation IdLoc, IdentifierInfo &II, ParsedType ObjectType); bool BuildCXXNestedNameSpecifier(Scope *S, IdentifierInfo &Identifier, SourceLocation IdentifierLoc, SourceLocation CCLoc, QualType ObjectType, bool EnteringContext, CXXScopeSpec &SS, NamedDecl *ScopeLookupResult, bool ErrorRecoveryLookup); /// \brief The parser has parsed a nested-name-specifier 'identifier::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param Identifier The identifier preceding the '::'. /// /// \param IdentifierLoc The location of the identifier. /// /// \param CCLoc The location of the '::'. /// /// \param ObjectType The type of the object, if we're parsing /// nested-name-specifier in a member access expression. /// /// \param EnteringContext Whether we're entering the context nominated by /// this nested-name-specifier. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, IdentifierInfo &Identifier, SourceLocation IdentifierLoc, SourceLocation CCLoc, ParsedType ObjectType, bool EnteringContext, CXXScopeSpec &SS); ExprResult ActOnDecltypeExpression(Expr *E); bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS, const DeclSpec &DS, SourceLocation ColonColonLoc); bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS, IdentifierInfo &Identifier, SourceLocation IdentifierLoc, SourceLocation ColonLoc, ParsedType ObjectType, bool EnteringContext); /// \brief The parser has parsed a nested-name-specifier /// 'template[opt] template-name < template-args >::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param TemplateKWLoc the location of the 'template' keyword, if any. /// \param TemplateName the template name. /// \param TemplateNameLoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). /// \param CCLoc The location of the '::'. /// /// \param EnteringContext Whether we're entering the context of the /// nested-name-specifier. /// /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateName, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, SourceLocation CCLoc, bool EnteringContext); /// \brief Given a C++ nested-name-specifier, produce an annotation value /// that the parser can use later to reconstruct the given /// nested-name-specifier. /// /// \param SS A nested-name-specifier. /// /// \returns A pointer containing all of the information in the /// nested-name-specifier \p SS. void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS); /// \brief Given an annotation pointer for a nested-name-specifier, restore /// the nested-name-specifier structure. /// /// \param Annotation The annotation pointer, produced by /// \c SaveNestedNameSpecifierAnnotation(). /// /// \param AnnotationRange The source range corresponding to the annotation. /// /// \param SS The nested-name-specifier that will be updated with the contents /// of the annotation pointer. void RestoreNestedNameSpecifierAnnotation(void *Annotation, SourceRange AnnotationRange, CXXScopeSpec &SS); bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global /// scope or nested-name-specifier) is parsed, part of a declarator-id. /// After this method is called, according to [C++ 3.4.3p3], names should be /// looked up in the declarator-id's scope, until the declarator is parsed and /// ActOnCXXExitDeclaratorScope is called. /// The 'SS' should be a non-empty valid CXXScopeSpec. bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS); /// ActOnCXXExitDeclaratorScope - Called when a declarator that previously /// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same /// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well. /// Used to indicate that names should revert to being looked up in the /// defining scope. void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an /// initializer for the declaration 'Dcl'. /// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a /// static data member of class X, names should be looked up in the scope of /// class X. void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl); /// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an /// initializer for the declaration 'Dcl'. void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl); /// \brief Create a new lambda closure type. CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange, TypeSourceInfo *Info, bool KnownDependent); /// \brief Start the definition of a lambda expression. CXXMethodDecl *startLambdaDefinition(CXXRecordDecl *Class, SourceRange IntroducerRange, TypeSourceInfo *MethodType, SourceLocation EndLoc, ArrayRef<ParmVarDecl *> Params); /// \brief Introduce the scope for a lambda expression. sema::LambdaScopeInfo *enterLambdaScope(CXXMethodDecl *CallOperator, SourceRange IntroducerRange, LambdaCaptureDefault CaptureDefault, bool ExplicitParams, bool ExplicitResultType, bool Mutable); /// \brief Note that we have finished the explicit captures for the /// given lambda. void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI); /// \brief Introduce the lambda parameters into scope. void addLambdaParameters(CXXMethodDecl *CallOperator, Scope *CurScope); /// \brief Deduce a block or lambda's return type based on the return /// statements present in the body. void deduceClosureReturnType(sema::CapturingScopeInfo &CSI); /// ActOnStartOfLambdaDefinition - This is called just before we start /// parsing the body of a lambda; it analyzes the explicit captures and /// arguments, and sets up various data-structures for the body of the /// lambda. void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro, Declarator &ParamInfo, Scope *CurScope); /// ActOnLambdaError - If there is an error parsing a lambda, this callback /// is invoked to pop the information about the lambda. void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope, bool IsInstantiation = false); /// ActOnLambdaExpr - This is called when the body of a lambda expression /// was successfully completed. ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body, Scope *CurScope, bool IsInstantiation = false); /// \brief Define the "body" of the conversion from a lambda object to a /// function pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToFunctionPointerConversion( SourceLocation CurrentLoc, CXXConversionDecl *Conv); /// \brief Define the "body" of the conversion from a lambda object to a /// block pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc, CXXConversionDecl *Conv); ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation, SourceLocation ConvLocation, CXXConversionDecl *Conv, Expr *Src); // ParseObjCStringLiteral - Parse Objective-C string literals. ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs, Expr **Strings, unsigned NumStrings); ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S); /// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the /// numeric literal expression. Type of the expression will be "NSNumber *" /// or "id" if NSNumber is unavailable. ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number); ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc, bool Value); ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements); /// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the /// '@' prefixed parenthesized expression. The type of the expression will /// either be "NSNumber *" or "NSString *" depending on the type of /// ValueType, which is allowed to be a built-in numeric type or /// "char *" or "const char *". ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr); ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr, Expr *IndexExpr, ObjCMethodDecl *getterMethod, ObjCMethodDecl *setterMethod); ExprResult BuildObjCDictionaryLiteral(SourceRange SR, ObjCDictionaryElement *Elements, unsigned NumElements); ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc, TypeSourceInfo *EncodedTypeInfo, SourceLocation RParenLoc); ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl, CXXConversionDecl *Method, bool HadMultipleCandidates); ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc, SourceLocation EncodeLoc, SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc); /// ParseObjCSelectorExpression - Build selector expression for \@selector ExprResult ParseObjCSelectorExpression(Selector Sel, SourceLocation AtLoc, SourceLocation SelLoc, SourceLocation LParenLoc, SourceLocation RParenLoc); /// ParseObjCProtocolExpression - Build protocol expression for \@protocol ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName, SourceLocation AtLoc, SourceLocation ProtoLoc, SourceLocation LParenLoc, SourceLocation ProtoIdLoc, SourceLocation RParenLoc); //===--------------------------------------------------------------------===// // C++ Declarations // Decl *ActOnStartLinkageSpecification(Scope *S, SourceLocation ExternLoc, SourceLocation LangLoc, StringRef Lang, SourceLocation LBraceLoc); Decl *ActOnFinishLinkageSpecification(Scope *S, Decl *LinkageSpec, SourceLocation RBraceLoc); //===--------------------------------------------------------------------===// // C++ Classes // bool isCurrentClassName(const IdentifierInfo &II, Scope *S, const CXXScopeSpec *SS = 0); bool ActOnAccessSpecifier(AccessSpecifier Access, SourceLocation ASLoc, SourceLocation ColonLoc, AttributeList *Attrs = 0); NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D, MultiTemplateParamsArg TemplateParameterLists, Expr *BitfieldWidth, const VirtSpecifiers &VS, InClassInitStyle InitStyle); void ActOnCXXInClassMemberInitializer(Decl *VarDecl, SourceLocation EqualLoc, Expr *Init); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, SourceLocation LParenLoc, Expr **Args, unsigned NumArgs, SourceLocation RParenLoc, SourceLocation EllipsisLoc); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *InitList, SourceLocation EllipsisLoc); MemInitResult BuildMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *Init, SourceLocation EllipsisLoc); MemInitResult BuildMemberInitializer(ValueDecl *Member, Expr *Init, SourceLocation IdLoc); MemInitResult BuildBaseInitializer(QualType BaseType, TypeSourceInfo *BaseTInfo, Expr *Init, CXXRecordDecl *ClassDecl, SourceLocation EllipsisLoc); MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo, Expr *Init, CXXRecordDecl *ClassDecl); bool SetDelegatingInitializer(CXXConstructorDecl *Constructor, CXXCtorInitializer *Initializer); bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors, ArrayRef<CXXCtorInitializer *> Initializers = ArrayRef<CXXCtorInitializer *>()); void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation); /// MarkBaseAndMemberDestructorsReferenced - Given a record decl, /// mark all the non-trivial destructors of its members and bases as /// referenced. void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc, CXXRecordDecl *Record); /// \brief The list of classes whose vtables have been used within /// this translation unit, and the source locations at which the /// first use occurred. typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse; /// \brief The list of vtables that are required but have not yet been /// materialized. SmallVector<VTableUse, 16> VTableUses; /// \brief The set of classes whose vtables have been used within /// this translation unit, and a bit that will be true if the vtable is /// required to be emitted (otherwise, it should be emitted only if needed /// by code generation). llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed; /// \brief Load any externally-stored vtable uses. void LoadExternalVTableUses(); typedef LazyVector<CXXRecordDecl *, ExternalSemaSource, &ExternalSemaSource::ReadDynamicClasses, 2, 2> DynamicClassesType; /// \brief A list of all of the dynamic classes in this translation /// unit. DynamicClassesType DynamicClasses; /// \brief Note that the vtable for the given class was used at the /// given location. void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class, bool DefinitionRequired = false); /// \brief Mark the exception specifications of all virtual member functions /// in the given class as needed. void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc, const CXXRecordDecl *RD); /// MarkVirtualMembersReferenced - Will mark all members of the given /// CXXRecordDecl referenced. void MarkVirtualMembersReferenced(SourceLocation Loc, const CXXRecordDecl *RD); /// \brief Define all of the vtables that have been used in this /// translation unit and reference any virtual members used by those /// vtables. /// /// \returns true if any work was done, false otherwise. bool DefineUsedVTables(); void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl); void ActOnMemInitializers(Decl *ConstructorDecl, SourceLocation ColonLoc, ArrayRef<CXXCtorInitializer*> MemInits, bool AnyErrors); void CheckCompletedCXXClass(CXXRecordDecl *Record); void ActOnFinishCXXMemberSpecification(Scope* S, SourceLocation RLoc, Decl *TagDecl, SourceLocation LBrac, SourceLocation RBrac, AttributeList *AttrList); void ActOnFinishCXXMemberDecls(); void ActOnReenterTemplateScope(Scope *S, Decl *Template); void ActOnReenterDeclaratorTemplateScope(Scope *S, DeclaratorDecl *D); void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param); void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnFinishDelayedMemberInitializers(Decl *Record); void MarkAsLateParsedTemplate(FunctionDecl *FD, bool Flag = true); bool IsInsideALocalClassWithinATemplateFunction(); Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, Expr *AssertMessageExpr, SourceLocation RParenLoc); Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, StringLiteral *AssertMessageExpr, SourceLocation RParenLoc, bool Failed); FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart, SourceLocation FriendLoc, TypeSourceInfo *TSInfo); Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS, MultiTemplateParamsArg TemplateParams); NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParams); QualType CheckConstructorDeclarator(Declarator &D, QualType R, StorageClass& SC); void CheckConstructor(CXXConstructorDecl *Constructor); QualType CheckDestructorDeclarator(Declarator &D, QualType R, StorageClass& SC); bool CheckDestructor(CXXDestructorDecl *Destructor); void CheckConversionDeclarator(Declarator &D, QualType &R, StorageClass& SC); Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion); void CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD); void CheckExplicitlyDefaultedMemberExceptionSpec(CXXMethodDecl *MD, const FunctionProtoType *T); void CheckDelayedExplicitlyDefaultedMemberExceptionSpecs(); //===--------------------------------------------------------------------===// // C++ Derived Classes // /// ActOnBaseSpecifier - Parsed a base specifier CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class, SourceRange SpecifierRange, bool Virtual, AccessSpecifier Access, TypeSourceInfo *TInfo, SourceLocation EllipsisLoc); BaseResult ActOnBaseSpecifier(Decl *classdecl, SourceRange SpecifierRange, ParsedAttributes &Attrs, bool Virtual, AccessSpecifier Access, ParsedType basetype, SourceLocation BaseLoc, SourceLocation EllipsisLoc); bool AttachBaseSpecifiers(CXXRecordDecl *Class, CXXBaseSpecifier **Bases, unsigned NumBases); void ActOnBaseSpecifiers(Decl *ClassDecl, CXXBaseSpecifier **Bases, unsigned NumBases); bool IsDerivedFrom(QualType Derived, QualType Base); bool IsDerivedFrom(QualType Derived, QualType Base, CXXBasePaths &Paths); // FIXME: I don't like this name. void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath); bool BasePathInvolvesVirtualBase(const CXXCastPath &BasePath); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, SourceLocation Loc, SourceRange Range, CXXCastPath *BasePath = 0, bool IgnoreAccess = false); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, unsigned InaccessibleBaseID, unsigned AmbigiousBaseConvID, SourceLocation Loc, SourceRange Range, DeclarationName Name, CXXCastPath *BasePath); std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths); bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionReturnType - Checks whether the return types are /// covariant, according to C++ [class.virtual]p5. bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionExceptionSpec - Checks whether the exception /// spec is a subset of base spec. bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New, const CXXMethodDecl *Old); bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange); /// CheckOverrideControl - Check C++11 override control semantics. void CheckOverrideControl(Decl *D); /// CheckForFunctionMarkedFinal - Checks whether a virtual member function /// overrides a virtual member function marked 'final', according to /// C++11 [class.virtual]p4. bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New, const CXXMethodDecl *Old); //===--------------------------------------------------------------------===// // C++ Access Control // enum AccessResult { AR_accessible, AR_inaccessible, AR_dependent, AR_delayed }; bool SetMemberAccessSpecifier(NamedDecl *MemberDecl, NamedDecl *PrevMemberDecl, AccessSpecifier LexicalAS); AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E, DeclAccessPair FoundDecl); AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E, DeclAccessPair FoundDecl); AccessResult CheckAllocationAccess(SourceLocation OperatorLoc, SourceRange PlacementRange, CXXRecordDecl *NamingClass, DeclAccessPair FoundDecl, bool Diagnose = true); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, const InitializedEntity &Entity, AccessSpecifier Access, bool IsCopyBindingRefToTemp = false); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, const InitializedEntity &Entity, AccessSpecifier Access, const PartialDiagnostic &PDiag); AccessResult CheckDestructorAccess(SourceLocation Loc, CXXDestructorDecl *Dtor, const PartialDiagnostic &PDiag, QualType objectType = QualType()); AccessResult CheckFriendAccess(NamedDecl *D); AccessResult CheckMemberOperatorAccess(SourceLocation Loc, Expr *ObjectExpr, Expr *ArgExpr, DeclAccessPair FoundDecl); AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr, DeclAccessPair FoundDecl); AccessResult CheckBaseClassAccess(SourceLocation AccessLoc, QualType Base, QualType Derived, const CXXBasePath &Path, unsigned DiagID, bool ForceCheck = false, bool ForceUnprivileged = false); void CheckLookupAccess(const LookupResult &R); bool IsSimplyAccessible(NamedDecl *decl, DeclContext *Ctx); bool isSpecialMemberAccessibleForDeletion(CXXMethodDecl *decl, AccessSpecifier access, QualType objectType); void HandleDependentAccessCheck(const DependentDiagnostic &DD, const MultiLevelTemplateArgumentList &TemplateArgs); void PerformDependentDiagnostics(const DeclContext *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx); /// \brief When true, access checking violations are treated as SFINAE /// failures rather than hard errors. bool AccessCheckingSFINAE; enum AbstractDiagSelID { AbstractNone = -1, AbstractReturnType, AbstractParamType, AbstractVariableType, AbstractFieldType, AbstractIvarType, AbstractArrayType }; bool RequireNonAbstractType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); template<typename T1> bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID, const T1 &Arg1) { BoundTypeDiagnoser1<T1> Diagnoser(DiagID, Arg1); return RequireNonAbstractType(Loc, T, Diagnoser); } template<typename T1, typename T2> bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID, const T1 &Arg1, const T2 &Arg2) { BoundTypeDiagnoser2<T1, T2> Diagnoser(DiagID, Arg1, Arg2); return RequireNonAbstractType(Loc, T, Diagnoser); } template<typename T1, typename T2, typename T3> bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID, const T1 &Arg1, const T2 &Arg2, const T3 &Arg3) { BoundTypeDiagnoser3<T1, T2, T3> Diagnoser(DiagID, Arg1, Arg2, Arg3); return RequireNonAbstractType(Loc, T, Diagnoser); } void DiagnoseAbstractType(const CXXRecordDecl *RD); bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID, AbstractDiagSelID SelID = AbstractNone); //===--------------------------------------------------------------------===// // C++ Overloaded Operators [C++ 13.5] // bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl); bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl); //===--------------------------------------------------------------------===// // C++ Templates [C++ 14] // void FilterAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true); bool hasAnyAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true); void LookupTemplateName(LookupResult &R, Scope *S, CXXScopeSpec &SS, QualType ObjectType, bool EnteringContext, bool &MemberOfUnknownSpecialization); TemplateNameKind isTemplateName(Scope *S, CXXScopeSpec &SS, bool hasTemplateKeyword, UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool &MemberOfUnknownSpecialization); bool DiagnoseUnknownTemplateName(const IdentifierInfo &II, SourceLocation IILoc, Scope *S, const CXXScopeSpec *SS, TemplateTy &SuggestedTemplate, TemplateNameKind &SuggestedKind); void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl); TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl); Decl *ActOnTypeParameter(Scope *S, bool Typename, bool Ellipsis, SourceLocation EllipsisLoc, SourceLocation KeyLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedType DefaultArg); QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc); Decl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D, unsigned Depth, unsigned Position, SourceLocation EqualLoc, Expr *DefaultArg); Decl *ActOnTemplateTemplateParameter(Scope *S, SourceLocation TmpLoc, TemplateParameterList *Params, SourceLocation EllipsisLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedTemplateArgument DefaultArg); TemplateParameterList * ActOnTemplateParameterList(unsigned Depth, SourceLocation ExportLoc, SourceLocation TemplateLoc, SourceLocation LAngleLoc, Decl **Params, unsigned NumParams, SourceLocation RAngleLoc); /// \brief The context in which we are checking a template parameter /// list. enum TemplateParamListContext { TPC_ClassTemplate, TPC_FunctionTemplate, TPC_ClassTemplateMember, TPC_FriendFunctionTemplate, TPC_FriendFunctionTemplateDefinition, TPC_TypeAliasTemplate }; bool CheckTemplateParameterList(TemplateParameterList *NewParams, TemplateParameterList *OldParams, TemplateParamListContext TPC); TemplateParameterList * MatchTemplateParametersToScopeSpecifier(SourceLocation DeclStartLoc, SourceLocation DeclLoc, const CXXScopeSpec &SS, TemplateParameterList **ParamLists, unsigned NumParamLists, bool IsFriend, bool &IsExplicitSpecialization, bool &Invalid); DeclResult CheckClassTemplate(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, AttributeList *Attr, TemplateParameterList *TemplateParams, AccessSpecifier AS, SourceLocation ModulePrivateLoc, unsigned NumOuterTemplateParamLists, TemplateParameterList **OuterTemplateParamLists); void translateTemplateArguments(const ASTTemplateArgsPtr &In, TemplateArgumentListInfo &Out); void NoteAllFoundTemplates(TemplateName Name); QualType CheckTemplateIdType(TemplateName Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs); TypeResult ActOnTemplateIdType(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy Template, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, bool IsCtorOrDtorName = false); /// \brief Parsed an elaborated-type-specifier that refers to a template-id, /// such as \c class T::template apply<U>. TypeResult ActOnTagTemplateIdType(TagUseKind TUK, TypeSpecifierType TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateD, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgsIn, SourceLocation RAngleLoc); ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, bool RequiresADL, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); TemplateNameKind ActOnDependentTemplateName(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template); DeclResult ActOnClassTemplateSpecialization(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, SourceLocation ModulePrivateLoc, CXXScopeSpec &SS, TemplateTy Template, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, AttributeList *Attr, MultiTemplateParamsArg TemplateParameterLists); Decl *ActOnTemplateDeclarator(Scope *S, MultiTemplateParamsArg TemplateParameterLists, Declarator &D); Decl *ActOnStartOfFunctionTemplateDef(Scope *FnBodyScope, MultiTemplateParamsArg TemplateParameterLists, Declarator &D); bool CheckSpecializationInstantiationRedecl(SourceLocation NewLoc, TemplateSpecializationKind NewTSK, NamedDecl *PrevDecl, TemplateSpecializationKind PrevTSK, SourceLocation PrevPtOfInstantiation, bool &SuppressNew); bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD, const TemplateArgumentListInfo &ExplicitTemplateArgs, LookupResult &Previous); bool CheckFunctionTemplateSpecialization(FunctionDecl *FD, TemplateArgumentListInfo *ExplicitTemplateArgs, LookupResult &Previous); bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, const CXXScopeSpec &SS, TemplateTy Template, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, AttributeList *Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, AttributeList *Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, Declarator &D); TemplateArgumentLoc SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, Decl *Param, SmallVectorImpl<TemplateArgument> &Converted); /// \brief Specifies the context in which a particular template /// argument is being checked. enum CheckTemplateArgumentKind { /// \brief The template argument was specified in the code or was /// instantiated with some deduced template arguments. CTAK_Specified, /// \brief The template argument was deduced via template argument /// deduction. CTAK_Deduced, /// \brief The template argument was deduced from an array bound /// via template argument deduction. CTAK_DeducedFromArrayBound }; bool CheckTemplateArgument(NamedDecl *Param, const TemplateArgumentLoc &Arg, NamedDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, unsigned ArgumentPackIndex, SmallVectorImpl<TemplateArgument> &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); /// \brief Check that the given template arguments can be be provided to /// the given template, converting the arguments along the way. /// /// \param Template The template to which the template arguments are being /// provided. /// /// \param TemplateLoc The location of the template name in the source. /// /// \param TemplateArgs The list of template arguments. If the template is /// a template template parameter, this function may extend the set of /// template arguments to also include substituted, defaulted template /// arguments. /// /// \param PartialTemplateArgs True if the list of template arguments is /// intentionally partial, e.g., because we're checking just the initial /// set of template arguments. /// /// \param Converted Will receive the converted, canonicalized template /// arguments. /// /// /// \param ExpansionIntoFixedList If non-NULL, will be set true to indicate /// when the template arguments contain a pack expansion that is being /// expanded into a fixed parameter list. /// /// \returns True if an error occurred, false otherwise. bool CheckTemplateArgumentList(TemplateDecl *Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs, bool PartialTemplateArgs, SmallVectorImpl<TemplateArgument> &Converted, bool *ExpansionIntoFixedList = 0); bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param, const TemplateArgumentLoc &Arg, SmallVectorImpl<TemplateArgument> &Converted); bool CheckTemplateArgument(TemplateTypeParmDecl *Param, TypeSourceInfo *Arg); ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param, QualType InstantiatedParamType, Expr *Arg, TemplateArgument &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); bool CheckTemplateArgument(TemplateTemplateParmDecl *Param, const TemplateArgumentLoc &Arg, unsigned ArgumentPackIndex); ExprResult BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg, QualType ParamType, SourceLocation Loc); ExprResult BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg, SourceLocation Loc); /// \brief Enumeration describing how template parameter lists are compared /// for equality. enum TemplateParameterListEqualKind { /// \brief We are matching the template parameter lists of two templates /// that might be redeclarations. /// /// \code /// template<typename T> struct X; /// template<typename T> struct X; /// \endcode TPL_TemplateMatch, /// \brief We are matching the template parameter lists of two template /// template parameters as part of matching the template parameter lists /// of two templates that might be redeclarations. /// /// \code /// template<template<int I> class TT> struct X; /// template<template<int Value> class Other> struct X; /// \endcode TPL_TemplateTemplateParmMatch, /// \brief We are matching the template parameter lists of a template /// template argument against the template parameter lists of a template /// template parameter. /// /// \code /// template<template<int Value> class Metafun> struct X; /// template<int Value> struct integer_c; /// X<integer_c> xic; /// \endcode TPL_TemplateTemplateArgumentMatch }; bool TemplateParameterListsAreEqual(TemplateParameterList *New, TemplateParameterList *Old, bool Complain, TemplateParameterListEqualKind Kind, SourceLocation TemplateArgLoc = SourceLocation()); bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams); /// \brief Called when the parser has parsed a C++ typename /// specifier, e.g., "typename T::type". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param II the identifier we're retrieving (e.g., 'type' in the example). /// \param IdLoc the location of the identifier. TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, const IdentifierInfo &II, SourceLocation IdLoc); /// \brief Called when the parser has parsed a C++ typename /// specifier that ends in a template-id, e.g., /// "typename MetaFun::template apply<T1, T2>". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param TemplateLoc the location of the 'template' keyword, if any. /// \param TemplateName The template name. /// \param TemplateNameLoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, SourceLocation TemplateLoc, TemplateTy TemplateName, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc); QualType CheckTypenameType(ElaboratedTypeKeyword Keyword, SourceLocation KeywordLoc, NestedNameSpecifierLoc QualifierLoc, const IdentifierInfo &II, SourceLocation IILoc); TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T, SourceLocation Loc, DeclarationName Name); bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS); ExprResult RebuildExprInCurrentInstantiation(Expr *E); bool RebuildTemplateParamsInCurrentInstantiation( TemplateParameterList *Params); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgumentList &Args); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgument *Args, unsigned NumArgs); //===--------------------------------------------------------------------===// // C++ Variadic Templates (C++0x [temp.variadic]) //===--------------------------------------------------------------------===// /// \brief The context in which an unexpanded parameter pack is /// being diagnosed. /// /// Note that the values of this enumeration line up with the first /// argument to the \c err_unexpanded_parameter_pack diagnostic. enum UnexpandedParameterPackContext { /// \brief An arbitrary expression. UPPC_Expression = 0, /// \brief The base type of a class type. UPPC_BaseType, /// \brief The type of an arbitrary declaration. UPPC_DeclarationType, /// \brief The type of a data member. UPPC_DataMemberType, /// \brief The size of a bit-field. UPPC_BitFieldWidth, /// \brief The expression in a static assertion. UPPC_StaticAssertExpression, /// \brief The fixed underlying type of an enumeration. UPPC_FixedUnderlyingType, /// \brief The enumerator value. UPPC_EnumeratorValue, /// \brief A using declaration. UPPC_UsingDeclaration, /// \brief A friend declaration. UPPC_FriendDeclaration, /// \brief A declaration qualifier. UPPC_DeclarationQualifier, /// \brief An initializer. UPPC_Initializer, /// \brief A default argument. UPPC_DefaultArgument, /// \brief The type of a non-type template parameter. UPPC_NonTypeTemplateParameterType, /// \brief The type of an exception. UPPC_ExceptionType, /// \brief Partial specialization. UPPC_PartialSpecialization, /// \brief Microsoft __if_exists. UPPC_IfExists, /// \brief Microsoft __if_not_exists. UPPC_IfNotExists, /// \brief Lambda expression. UPPC_Lambda, /// \brief Block expression, UPPC_Block }; /// \brief Diagnose unexpanded parameter packs. /// /// \param Loc The location at which we should emit the diagnostic. /// /// \param UPPC The context in which we are diagnosing unexpanded /// parameter packs. /// /// \param Unexpanded the set of unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc, UnexpandedParameterPackContext UPPC, ArrayRef<UnexpandedParameterPack> Unexpanded); /// \brief If the given type contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The source location where a diagnostc should be emitted. /// /// \param T The type that is being checked for unexpanded parameter /// packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T, UnexpandedParameterPackContext UPPC); /// \brief If the given expression contains an unexpanded parameter /// pack, diagnose the error. /// /// \param E The expression that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(Expr *E, UnexpandedParameterPackContext UPPC = UPPC_Expression); /// \brief If the given nested-name-specifier contains an unexpanded /// parameter pack, diagnose the error. /// /// \param SS The nested-name-specifier that is being checked for /// unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS, UnexpandedParameterPackContext UPPC); /// \brief If the given name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param NameInfo The name (with source location information) that /// is being checked for unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo, UnexpandedParameterPackContext UPPC); /// \brief If the given template name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The location of the template name. /// /// \param Template The template name that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TemplateName Template, UnexpandedParameterPackContext UPPC); /// \brief If the given template argument contains an unexpanded parameter /// pack, diagnose the error. /// /// \param Arg The template argument that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg, UnexpandedParameterPackContext UPPC); /// \brief Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgument Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// \brief Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// \brief Collect the set of unexpanded parameter packs within the given /// type. /// /// \param T The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(QualType T, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// \brief Collect the set of unexpanded parameter packs within the given /// type. /// /// \param TL The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TypeLoc TL, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// \brief Collect the set of unexpanded parameter packs within the given /// nested-name-specifier. /// /// \param SS The nested-name-specifier that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(CXXScopeSpec &SS, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// \brief Collect the set of unexpanded parameter packs within the given /// name. /// /// \param NameInfo The name that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// \brief Invoked when parsing a template argument followed by an /// ellipsis, which creates a pack expansion. /// /// \param Arg The template argument preceding the ellipsis, which /// may already be invalid. /// /// \param EllipsisLoc The location of the ellipsis. ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg, SourceLocation EllipsisLoc); /// \brief Invoked when parsing a type followed by an ellipsis, which /// creates a pack expansion. /// /// \param Type The type preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc); /// \brief Construct a pack expansion type from the pattern of the pack /// expansion. TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// \brief Construct a pack expansion type from the pattern of the pack /// expansion. QualType CheckPackExpansion(QualType Pattern, SourceRange PatternRange, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// \brief Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc); /// \brief Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// \brief Determine whether we could expand a pack expansion with the /// given set of parameter packs into separate arguments by repeatedly /// transforming the pattern. /// /// \param EllipsisLoc The location of the ellipsis that identifies the /// pack expansion. /// /// \param PatternRange The source range that covers the entire pattern of /// the pack expansion. /// /// \param Unexpanded The set of unexpanded parameter packs within the /// pattern. /// /// \param ShouldExpand Will be set to \c true if the transformer should /// expand the corresponding pack expansions into separate arguments. When /// set, \c NumExpansions must also be set. /// /// \param RetainExpansion Whether the caller should add an unexpanded /// pack expansion after all of the expanded arguments. This is used /// when extending explicitly-specified template argument packs per /// C++0x [temp.arg.explicit]p9. /// /// \param NumExpansions The number of separate arguments that will be in /// the expanded form of the corresponding pack expansion. This is both an /// input and an output parameter, which can be set by the caller if the /// number of expansions is known a priori (e.g., due to a prior substitution) /// and will be set by the callee when the number of expansions is known. /// The callee must set this value when \c ShouldExpand is \c true; it may /// set this value in other cases. /// /// \returns true if an error occurred (e.g., because the parameter packs /// are to be instantiated with arguments of different lengths), false /// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions) /// must be set. bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc, SourceRange PatternRange, ArrayRef<UnexpandedParameterPack> Unexpanded, const MultiLevelTemplateArgumentList &TemplateArgs, bool &ShouldExpand, bool &RetainExpansion, Optional<unsigned> &NumExpansions); /// \brief Determine the number of arguments in the given pack expansion /// type. /// /// This routine assumes that the number of arguments in the expansion is /// consistent across all of the unexpanded parameter packs in its pattern. /// /// Returns an empty Optional if the type can't be expanded. Optional<unsigned> getNumArgumentsInExpansion(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs); /// \brief Determine whether the given declarator contains any unexpanded /// parameter packs. /// /// This routine is used by the parser to disambiguate function declarators /// with an ellipsis prior to the ')', e.g., /// /// \code /// void f(T...); /// \endcode /// /// To determine whether we have an (unnamed) function parameter pack or /// a variadic function. /// /// \returns true if the declarator contains any unexpanded parameter packs, /// false otherwise. bool containsUnexpandedParameterPacks(Declarator &D); //===--------------------------------------------------------------------===// // C++ Template Argument Deduction (C++ [temp.deduct]) //===--------------------------------------------------------------------===// /// \brief Describes the result of template argument deduction. /// /// The TemplateDeductionResult enumeration describes the result of /// template argument deduction, as returned from /// DeduceTemplateArguments(). The separate TemplateDeductionInfo /// structure provides additional information about the results of /// template argument deduction, e.g., the deduced template argument /// list (if successful) or the specific template parameters or /// deduced arguments that were involved in the failure. enum TemplateDeductionResult { /// \brief Template argument deduction was successful. TDK_Success = 0, /// \brief The declaration was invalid; do nothing. TDK_Invalid, /// \brief Template argument deduction exceeded the maximum template /// instantiation depth (which has already been diagnosed). TDK_InstantiationDepth, /// \brief Template argument deduction did not deduce a value /// for every template parameter. TDK_Incomplete, /// \brief Template argument deduction produced inconsistent /// deduced values for the given template parameter. TDK_Inconsistent, /// \brief Template argument deduction failed due to inconsistent /// cv-qualifiers on a template parameter type that would /// otherwise be deduced, e.g., we tried to deduce T in "const T" /// but were given a non-const "X". TDK_Underqualified, /// \brief Substitution of the deduced template argument values /// resulted in an error. TDK_SubstitutionFailure, /// \brief A non-depnedent component of the parameter did not match the /// corresponding component of the argument. TDK_NonDeducedMismatch, /// \brief When performing template argument deduction for a function /// template, there were too many call arguments. TDK_TooManyArguments, /// \brief When performing template argument deduction for a function /// template, there were too few call arguments. TDK_TooFewArguments, /// \brief The explicitly-specified template arguments were not valid /// template arguments for the given template. TDK_InvalidExplicitArguments, /// \brief The arguments included an overloaded function name that could /// not be resolved to a suitable function. TDK_FailedOverloadResolution, /// \brief Deduction failed; that's all we know. TDK_MiscellaneousDeductionFailure }; TemplateDeductionResult DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult SubstituteExplicitTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo &ExplicitTemplateArgs, SmallVectorImpl<DeducedTemplateArgument> &Deduced, SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType, sema::TemplateDeductionInfo &Info); /// brief A function argument from which we performed template argument // deduction for a call. struct OriginalCallArg { OriginalCallArg(QualType OriginalParamType, unsigned ArgIdx, QualType OriginalArgType) : OriginalParamType(OriginalParamType), ArgIdx(ArgIdx), OriginalArgType(OriginalArgType) { } QualType OriginalParamType; unsigned ArgIdx; QualType OriginalArgType; }; TemplateDeductionResult FinishTemplateArgumentDeduction(FunctionTemplateDecl *FunctionTemplate, SmallVectorImpl<DeducedTemplateArgument> &Deduced, unsigned NumExplicitlySpecified, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = 0); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ArgFunctionType, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool InOverloadResolution = false); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, QualType ToType, CXXConversionDecl *&Specialization, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool InOverloadResolution = false); /// \brief Result type of DeduceAutoType. enum DeduceAutoResult { DAR_Succeeded, DAR_Failed, DAR_FailedAlreadyDiagnosed }; DeduceAutoResult DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer, TypeSourceInfo *&Result); void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init); FunctionTemplateDecl *getMoreSpecializedTemplate(FunctionTemplateDecl *FT1, FunctionTemplateDecl *FT2, SourceLocation Loc, TemplatePartialOrderingContext TPOC, unsigned NumCallArguments); UnresolvedSetIterator getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd, TemplatePartialOrderingContext TPOC, unsigned NumCallArguments, SourceLocation Loc, const PartialDiagnostic &NoneDiag, const PartialDiagnostic &AmbigDiag, const PartialDiagnostic &CandidateDiag, bool Complain = true, QualType TargetType = QualType()); ClassTemplatePartialSpecializationDecl * getMoreSpecializedPartialSpecialization( ClassTemplatePartialSpecializationDecl *PS1, ClassTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs, bool OnlyDeduced, unsigned Depth, llvm::SmallBitVector &Used); void MarkDeducedTemplateParameters( const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced) { return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced); } static void MarkDeducedTemplateParameters(ASTContext &Ctx, const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced); //===--------------------------------------------------------------------===// // C++ Template Instantiation // MultiLevelTemplateArgumentList getTemplateInstantiationArgs(NamedDecl *D, const TemplateArgumentList *Innermost = 0, bool RelativeToPrimary = false, const FunctionDecl *Pattern = 0); /// \brief A template instantiation that is currently in progress. struct ActiveTemplateInstantiation { /// \brief The kind of template instantiation we are performing enum InstantiationKind { /// We are instantiating a template declaration. The entity is /// the declaration we're instantiating (e.g., a CXXRecordDecl). TemplateInstantiation, /// We are instantiating a default argument for a template /// parameter. The Entity is the template, and /// TemplateArgs/NumTemplateArguments provides the template /// arguments as specified. /// FIXME: Use a TemplateArgumentList DefaultTemplateArgumentInstantiation, /// We are instantiating a default argument for a function. /// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs /// provides the template arguments as specified. DefaultFunctionArgumentInstantiation, /// We are substituting explicit template arguments provided for /// a function template. The entity is a FunctionTemplateDecl. ExplicitTemplateArgumentSubstitution, /// We are substituting template argument determined as part of /// template argument deduction for either a class template /// partial specialization or a function template. The /// Entity is either a ClassTemplatePartialSpecializationDecl or /// a FunctionTemplateDecl. DeducedTemplateArgumentSubstitution, /// We are substituting prior template arguments into a new /// template parameter. The template parameter itself is either a /// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl. PriorTemplateArgumentSubstitution, /// We are checking the validity of a default template argument that /// has been used when naming a template-id. DefaultTemplateArgumentChecking, /// We are instantiating the exception specification for a function /// template which was deferred until it was needed. ExceptionSpecInstantiation } Kind; /// \brief The point of instantiation within the source code. SourceLocation PointOfInstantiation; /// \brief The template (or partial specialization) in which we are /// performing the instantiation, for substitutions of prior template /// arguments. NamedDecl *Template; /// \brief The entity that is being instantiated. Decl *Entity; /// \brief The list of template arguments we are substituting, if they /// are not part of the entity. const TemplateArgument *TemplateArgs; /// \brief The number of template arguments in TemplateArgs. unsigned NumTemplateArgs; /// \brief The template deduction info object associated with the /// substitution or checking of explicit or deduced template arguments. sema::TemplateDeductionInfo *DeductionInfo; /// \brief The source range that covers the construct that cause /// the instantiation, e.g., the template-id that causes a class /// template instantiation. SourceRange InstantiationRange; ActiveTemplateInstantiation() : Kind(TemplateInstantiation), Template(0), Entity(0), TemplateArgs(0), NumTemplateArgs(0), DeductionInfo(0) {} /// \brief Determines whether this template is an actual instantiation /// that should be counted toward the maximum instantiation depth. bool isInstantiationRecord() const; friend bool operator==(const ActiveTemplateInstantiation &X, const ActiveTemplateInstantiation &Y) { if (X.Kind != Y.Kind) return false; if (X.Entity != Y.Entity) return false; switch (X.Kind) { case TemplateInstantiation: case ExceptionSpecInstantiation: return true; case PriorTemplateArgumentSubstitution: case DefaultTemplateArgumentChecking: if (X.Template != Y.Template) return false; // Fall through case DefaultTemplateArgumentInstantiation: case ExplicitTemplateArgumentSubstitution: case DeducedTemplateArgumentSubstitution: case DefaultFunctionArgumentInstantiation: return X.TemplateArgs == Y.TemplateArgs; } llvm_unreachable("Invalid InstantiationKind!"); } friend bool operator!=(const ActiveTemplateInstantiation &X, const ActiveTemplateInstantiation &Y) { return !(X == Y); } }; /// \brief List of active template instantiations. /// /// This vector is treated as a stack. As one template instantiation /// requires another template instantiation, additional /// instantiations are pushed onto the stack up to a /// user-configurable limit LangOptions::InstantiationDepth. SmallVector<ActiveTemplateInstantiation, 16> ActiveTemplateInstantiations; /// \brief Whether we are in a SFINAE context that is not associated with /// template instantiation. /// /// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside /// of a template instantiation or template argument deduction. bool InNonInstantiationSFINAEContext; /// \brief The number of ActiveTemplateInstantiation entries in /// \c ActiveTemplateInstantiations that are not actual instantiations and, /// therefore, should not be counted as part of the instantiation depth. unsigned NonInstantiationEntries; /// \brief The last template from which a template instantiation /// error or warning was produced. /// /// This value is used to suppress printing of redundant template /// instantiation backtraces when there are multiple errors in the /// same instantiation. FIXME: Does this belong in Sema? It's tough /// to implement it anywhere else. ActiveTemplateInstantiation LastTemplateInstantiationErrorContext; /// \brief The current index into pack expansion arguments that will be /// used for substitution of parameter packs. /// /// The pack expansion index will be -1 to indicate that parameter packs /// should be instantiated as themselves. Otherwise, the index specifies /// which argument within the parameter pack will be used for substitution. int ArgumentPackSubstitutionIndex; /// \brief RAII object used to change the argument pack substitution index /// within a \c Sema object. /// /// See \c ArgumentPackSubstitutionIndex for more information. class ArgumentPackSubstitutionIndexRAII { Sema &Self; int OldSubstitutionIndex; public: ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex) : Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) { Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex; } ~ArgumentPackSubstitutionIndexRAII() { Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex; } }; friend class ArgumentPackSubstitutionRAII; /// \brief The stack of calls expression undergoing template instantiation. /// /// The top of this stack is used by a fixit instantiating unresolved /// function calls to fix the AST to match the textual change it prints. SmallVector<CallExpr *, 8> CallsUndergoingInstantiation; /// \brief For each declaration that involved template argument deduction, the /// set of diagnostics that were suppressed during that template argument /// deduction. /// /// FIXME: Serialize this structure to the AST file. llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> > SuppressedDiagnostics; /// \brief A stack object to be created when performing template /// instantiation. /// /// Construction of an object of type \c InstantiatingTemplate /// pushes the current instantiation onto the stack of active /// instantiations. If the size of this stack exceeds the maximum /// number of recursive template instantiations, construction /// produces an error and evaluates true. /// /// Destruction of this object will pop the named instantiation off /// the stack. struct InstantiatingTemplate { /// \brief Note that we are instantiating a class template, /// function template, or a member thereof. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, Decl *Entity, SourceRange InstantiationRange = SourceRange()); struct ExceptionSpecification {}; /// \brief Note that we are instantiating an exception specification /// of a function template. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionDecl *Entity, ExceptionSpecification, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are instantiating a default argument in a /// template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are instantiating a default argument in a /// template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionTemplateDecl *FunctionTemplate, ArrayRef<TemplateArgument> TemplateArgs, ActiveTemplateInstantiation::InstantiationKind Kind, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are instantiating as part of template /// argument deduction for a class template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ClassTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ParmVarDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are substituting prior template arguments into a /// non-type or template template parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, NonTypeTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, TemplateTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// \brief Note that we are checking the default template argument /// against the template parameter for a given template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, NamedDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// \brief Note that we have finished instantiating this template. void Clear(); ~InstantiatingTemplate() { Clear(); } /// \brief Determines whether we have exceeded the maximum /// recursive template instantiations. operator bool() const { return Invalid; } private: Sema &SemaRef; bool Invalid; bool SavedInNonInstantiationSFINAEContext; bool CheckInstantiationDepth(SourceLocation PointOfInstantiation, SourceRange InstantiationRange); InstantiatingTemplate(const InstantiatingTemplate&) LLVM_DELETED_FUNCTION; InstantiatingTemplate& operator=(const InstantiatingTemplate&) LLVM_DELETED_FUNCTION; }; void PrintInstantiationStack(); /// \brief Determines whether we are currently in a context where /// template argument substitution failures are not considered /// errors. /// /// \returns An empty \c Optional if we're not in a SFINAE context. /// Otherwise, contains a pointer that, if non-NULL, contains the nearest /// template-deduction context object, which can be used to capture /// diagnostics that will be suppressed. Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const; /// \brief Determines whether we are currently in a context that /// is not evaluated as per C++ [expr] p5. bool isUnevaluatedContext() const { assert(!ExprEvalContexts.empty() && "Must be in an expression evaluation context"); return ExprEvalContexts.back().Context == Sema::Unevaluated; } /// \brief RAII class used to determine whether SFINAE has /// trapped any errors that occur during template argument /// deduction.` class SFINAETrap { Sema &SemaRef; unsigned PrevSFINAEErrors; bool PrevInNonInstantiationSFINAEContext; bool PrevAccessCheckingSFINAE; public: explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false) : SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors), PrevInNonInstantiationSFINAEContext( SemaRef.InNonInstantiationSFINAEContext), PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE) { if (!SemaRef.isSFINAEContext()) SemaRef.InNonInstantiationSFINAEContext = true; SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE; } ~SFINAETrap() { SemaRef.NumSFINAEErrors = PrevSFINAEErrors; SemaRef.InNonInstantiationSFINAEContext = PrevInNonInstantiationSFINAEContext; SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE; } /// \brief Determine whether any SFINAE errors have been trapped. bool hasErrorOccurred() const { return SemaRef.NumSFINAEErrors > PrevSFINAEErrors; } }; /// \brief The current instantiation scope used to store local /// variables. LocalInstantiationScope *CurrentInstantiationScope; /// \brief The number of typos corrected by CorrectTypo. unsigned TyposCorrected; typedef llvm::DenseMap<IdentifierInfo *, TypoCorrection> UnqualifiedTyposCorrectedMap; /// \brief A cache containing the results of typo correction for unqualified /// name lookup. /// /// The string is the string that we corrected to (which may be empty, if /// there was no correction), while the boolean will be true when the /// string represents a keyword. UnqualifiedTyposCorrectedMap UnqualifiedTyposCorrected; /// \brief Worker object for performing CFG-based warnings. sema::AnalysisBasedWarnings AnalysisWarnings; /// \brief An entity for which implicit template instantiation is required. /// /// The source location associated with the declaration is the first place in /// the source code where the declaration was "used". It is not necessarily /// the point of instantiation (which will be either before or after the /// namespace-scope declaration that triggered this implicit instantiation), /// However, it is the location that diagnostics should generally refer to, /// because users will need to know what code triggered the instantiation. typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation; /// \brief The queue of implicit template instantiations that are required /// but have not yet been performed. std::deque<PendingImplicitInstantiation> PendingInstantiations; /// \brief The queue of implicit template instantiations that are required /// and must be performed within the current local scope. /// /// This queue is only used for member functions of local classes in /// templates, which must be instantiated in the same scope as their /// enclosing function, so that they can reference function-local /// types, static variables, enumerators, etc. std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations; void PerformPendingInstantiations(bool LocalOnly = false); TypeSourceInfo *SubstType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); QualType SubstType(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstType(TypeLoc TL, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, CXXRecordDecl *ThisContext, unsigned ThisTypeQuals); ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, int indexAdjustment, Optional<unsigned> NumExpansions, bool ExpectParameterPack); bool SubstParmTypes(SourceLocation Loc, ParmVarDecl **Params, unsigned NumParams, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<QualType> &ParamTypes, SmallVectorImpl<ParmVarDecl *> *OutParams = 0); ExprResult SubstExpr(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs); /// \brief Substitute the given template arguments into a list of /// expressions, expanding pack expansions if required. /// /// \param Exprs The list of expressions to substitute into. /// /// \param NumExprs The number of expressions in \p Exprs. /// /// \param IsCall Whether this is some form of call, in which case /// default arguments will be dropped. /// /// \param TemplateArgs The set of template arguments to substitute. /// /// \param Outputs Will receive all of the substituted arguments. /// /// \returns true if an error occurred, false otherwise. bool SubstExprs(Expr **Exprs, unsigned NumExprs, bool IsCall, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<Expr *> &Outputs); StmtResult SubstStmt(Stmt *S, const MultiLevelTemplateArgumentList &TemplateArgs); Decl *SubstDecl(Decl *D, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs); ExprResult SubstInitializer(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs, bool CXXDirectInit); bool SubstBaseSpecifiers(CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); bool InstantiateClass(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK, bool Complain = true); bool InstantiateEnum(SourceLocation PointOfInstantiation, EnumDecl *Instantiation, EnumDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); struct LateInstantiatedAttribute { const Attr *TmplAttr; LocalInstantiationScope *Scope; Decl *NewDecl; LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S, Decl *D) : TmplAttr(A), Scope(S), NewDecl(D) { } }; typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec; void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = 0, LocalInstantiationScope *OuterMostScope = 0); bool InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK, bool Complain = true); void InstantiateClassMembers(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); void InstantiateClassTemplateSpecializationMembers( SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK); NestedNameSpecifierLoc SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS, const MultiLevelTemplateArgumentList &TemplateArgs); DeclarationNameInfo SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo, const MultiLevelTemplateArgumentList &TemplateArgs); TemplateName SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name, SourceLocation Loc, const MultiLevelTemplateArgumentList &TemplateArgs); bool Subst(const TemplateArgumentLoc *Args, unsigned NumArgs, TemplateArgumentListInfo &Result, const MultiLevelTemplateArgumentList &TemplateArgs); void InstantiateExceptionSpec(SourceLocation PointOfInstantiation, FunctionDecl *Function); void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation, FunctionDecl *Function, bool Recursive = false, bool DefinitionRequired = false); void InstantiateStaticDataMemberDefinition( SourceLocation PointOfInstantiation, VarDecl *Var, bool Recursive = false, bool DefinitionRequired = false); void InstantiateMemInitializers(CXXConstructorDecl *New, const CXXConstructorDecl *Tmpl, const MultiLevelTemplateArgumentList &TemplateArgs); NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs); DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC, const MultiLevelTemplateArgumentList &TemplateArgs); // Objective-C declarations. enum ObjCContainerKind { OCK_None = -1, OCK_Interface = 0, OCK_Protocol, OCK_Category, OCK_ClassExtension, OCK_Implementation, OCK_CategoryImplementation }; ObjCContainerKind getObjCContainerKind() const; Decl *ActOnStartClassInterface(SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperName, SourceLocation SuperLoc, Decl * const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, AttributeList *AttrList); Decl *ActOnCompatibilityAlias( SourceLocation AtCompatibilityAliasLoc, IdentifierInfo *AliasName, SourceLocation AliasLocation, IdentifierInfo *ClassName, SourceLocation ClassLocation); bool CheckForwardProtocolDeclarationForCircularDependency( IdentifierInfo *PName, SourceLocation &PLoc, SourceLocation PrevLoc, const ObjCList<ObjCProtocolDecl> &PList); Decl *ActOnStartProtocolInterface( SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName, SourceLocation ProtocolLoc, Decl * const *ProtoRefNames, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, AttributeList *AttrList); Decl *ActOnStartCategoryInterface(SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *CategoryName, SourceLocation CategoryLoc, Decl * const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc); Decl *ActOnStartClassImplementation( SourceLocation AtClassImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperClassname, SourceLocation SuperClassLoc); Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *CatName, SourceLocation CatLoc); DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl, ArrayRef<Decl *> Decls); DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc, IdentifierInfo **IdentList, SourceLocation *IdentLocs, unsigned NumElts); DeclGroupPtrTy ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc, const IdentifierLocPair *IdentList, unsigned NumElts, AttributeList *attrList); void FindProtocolDeclaration(bool WarnOnDeclarations, const IdentifierLocPair *ProtocolId, unsigned NumProtocols, SmallVectorImpl<Decl *> &Protocols); /// Ensure attributes are consistent with type. /// \param [in, out] Attributes The attributes to check; they will /// be modified to be consistent with \p PropertyTy. void CheckObjCPropertyAttributes(Decl *PropertyPtrTy, SourceLocation Loc, unsigned &Attributes, bool propertyInPrimaryClass); /// Process the specified property declaration and create decls for the /// setters and getters as needed. /// \param property The property declaration being processed /// \param CD The semantic container for the property /// \param redeclaredProperty Declaration for property if redeclared /// in class extension. /// \param lexicalDC Container for redeclaredProperty. void ProcessPropertyDecl(ObjCPropertyDecl *property, ObjCContainerDecl *CD, ObjCPropertyDecl *redeclaredProperty = 0, ObjCContainerDecl *lexicalDC = 0); void DiagnosePropertyMismatch(ObjCPropertyDecl *Property, ObjCPropertyDecl *SuperProperty, const IdentifierInfo *Name); void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT, ObjCInterfaceDecl *ID); void MatchOneProtocolPropertiesInClass(Decl *CDecl, ObjCProtocolDecl *PDecl); Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd, Decl **allMethods = 0, unsigned allNum = 0, Decl **allProperties = 0, unsigned pNum = 0, DeclGroupPtrTy *allTUVars = 0, unsigned tuvNum = 0); Decl *ActOnProperty(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, ObjCDeclSpec &ODS, Selector GetterSel, Selector SetterSel, bool *OverridingProperty, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = 0); Decl *ActOnPropertyImplDecl(Scope *S, SourceLocation AtLoc, SourceLocation PropertyLoc, bool ImplKind, IdentifierInfo *PropertyId, IdentifierInfo *PropertyIvar, SourceLocation PropertyIvarLoc); enum ObjCSpecialMethodKind { OSMK_None, OSMK_Alloc, OSMK_New, OSMK_Copy, OSMK_RetainingInit, OSMK_NonRetainingInit }; struct ObjCArgInfo { IdentifierInfo *Name; SourceLocation NameLoc; // The Type is null if no type was specified, and the DeclSpec is invalid // in this case. ParsedType Type; ObjCDeclSpec DeclSpec; /// ArgAttrs - Attribute list for this argument. AttributeList *ArgAttrs; }; Decl *ActOnMethodDeclaration( Scope *S, SourceLocation BeginLoc, // location of the + or -. SourceLocation EndLoc, // location of the ; or {. tok::TokenKind MethodType, ObjCDeclSpec &ReturnQT, ParsedType ReturnType, ArrayRef<SourceLocation> SelectorLocs, Selector Sel, // optional arguments. The number of types/arguments is obtained // from the Sel.getNumArgs(). ObjCArgInfo *ArgInfo, DeclaratorChunk::ParamInfo *CParamInfo, unsigned CNumArgs, // c-style args AttributeList *AttrList, tok::ObjCKeywordKind MethodImplKind, bool isVariadic, bool MethodDefinition); ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel, const ObjCObjectPointerType *OPT, bool IsInstance); ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty, bool IsInstance); bool CheckARCMethodDecl(ObjCMethodDecl *method); bool inferObjCARCLifetime(ValueDecl *decl); ExprResult HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT, Expr *BaseExpr, SourceLocation OpLoc, DeclarationName MemberName, SourceLocation MemberLoc, SourceLocation SuperLoc, QualType SuperType, bool Super); ExprResult ActOnClassPropertyRefExpr(IdentifierInfo &receiverName, IdentifierInfo &propertyName, SourceLocation receiverNameLoc, SourceLocation propertyNameLoc); ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc); /// \brief Describes the kind of message expression indicated by a message /// send that starts with an identifier. enum ObjCMessageKind { /// \brief The message is sent to 'super'. ObjCSuperMessage, /// \brief The message is an instance message. ObjCInstanceMessage, /// \brief The message is a class message, and the identifier is a type /// name. ObjCClassMessage }; ObjCMessageKind getObjCMessageKind(Scope *S, IdentifierInfo *Name, SourceLocation NameLoc, bool IsSuper, bool HasTrailingDot, ParsedType &ReceiverType); ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildClassMessageImplicit(QualType ReceiverType, bool isSuperReceiver, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnClassMessage(Scope *S, ParsedType Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildInstanceMessage(Expr *Receiver, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildInstanceMessageImplicit(Expr *Receiver, QualType ReceiverType, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnInstanceMessage(Scope *S, Expr *Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, TypeSourceInfo *TSInfo, Expr *SubExpr); ExprResult ActOnObjCBridgedCast(Scope *S, SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, ParsedType Type, SourceLocation RParenLoc, Expr *SubExpr); bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall); /// \brief Check whether the given new method is a valid override of the /// given overridden method, and set any properties that should be inherited. void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod, const ObjCMethodDecl *Overridden); /// \brief Describes the compatibility of a result type with its method. enum ResultTypeCompatibilityKind { RTC_Compatible, RTC_Incompatible, RTC_Unknown }; void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod, ObjCInterfaceDecl *CurrentClass, ResultTypeCompatibilityKind RTC); enum PragmaOptionsAlignKind { POAK_Native, // #pragma options align=native POAK_Natural, // #pragma options align=natural POAK_Packed, // #pragma options align=packed POAK_Power, // #pragma options align=power POAK_Mac68k, // #pragma options align=mac68k POAK_Reset // #pragma options align=reset }; /// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align. void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind, SourceLocation PragmaLoc); enum PragmaPackKind { PPK_Default, // #pragma pack([n]) PPK_Show, // #pragma pack(show), only supported by MSVC. PPK_Push, // #pragma pack(push, [identifier], [n]) PPK_Pop // #pragma pack(pop, [identifier], [n]) }; enum PragmaMSStructKind { PMSST_OFF, // #pragms ms_struct off PMSST_ON // #pragms ms_struct on }; /// ActOnPragmaPack - Called on well formed \#pragma pack(...). void ActOnPragmaPack(PragmaPackKind Kind, IdentifierInfo *Name, Expr *Alignment, SourceLocation PragmaLoc, SourceLocation LParenLoc, SourceLocation RParenLoc); /// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off]. void ActOnPragmaMSStruct(PragmaMSStructKind Kind); /// ActOnPragmaUnused - Called on well-formed '\#pragma unused'. void ActOnPragmaUnused(const Token &Identifier, Scope *curScope, SourceLocation PragmaLoc); /// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... . void ActOnPragmaVisibility(const IdentifierInfo* VisType, SourceLocation PragmaLoc); NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II, SourceLocation Loc); void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W); /// ActOnPragmaWeakID - Called on well formed \#pragma weak ident. void ActOnPragmaWeakID(IdentifierInfo* WeakName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc); /// ActOnPragmaRedefineExtname - Called on well formed /// \#pragma redefine_extname oldname newname. void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident. void ActOnPragmaWeakAlias(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaFPContract - Called on well formed /// \#pragma {STDC,OPENCL} FP_CONTRACT void ActOnPragmaFPContract(tok::OnOffSwitch OOS); /// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to /// a the record decl, to handle '\#pragma pack' and '\#pragma options align'. void AddAlignmentAttributesForRecord(RecordDecl *RD); /// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record. void AddMsStructLayoutForRecord(RecordDecl *RD); /// FreePackedContext - Deallocate and null out PackContext. void FreePackedContext(); /// PushNamespaceVisibilityAttr - Note that we've entered a /// namespace with a visibility attribute. void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr, SourceLocation Loc); /// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used, /// add an appropriate visibility attribute. void AddPushedVisibilityAttribute(Decl *RD); /// PopPragmaVisibility - Pop the top element of the visibility stack; used /// for '\#pragma GCC visibility' and visibility attributes on namespaces. void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc); /// FreeVisContext - Deallocate and null out VisContext. void FreeVisContext(); /// AddCFAuditedAttribute - Check whether we're currently within /// '\#pragma clang arc_cf_code_audited' and, if so, consider adding /// the appropriate attribute. void AddCFAuditedAttribute(Decl *D); /// AddAlignedAttr - Adds an aligned attribute to a particular declaration. void AddAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E, unsigned SpellingListIndex, bool IsPackExpansion); void AddAlignedAttr(SourceRange AttrRange, Decl *D, TypeSourceInfo *T, unsigned SpellingListIndex, bool IsPackExpansion); // OpenMP directives and clauses. /// \brief Called on well-formed '#pragma omp threadprivate'. DeclGroupPtrTy ActOnOpenMPThreadprivateDirective( SourceLocation Loc, Scope *CurScope, ArrayRef<DeclarationNameInfo> IdList); /// \brief Build a new OpenMPThreadPrivateDecl and check its correctness. OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl( SourceLocation Loc, ArrayRef<DeclRefExpr *> VarList); /// \brief The kind of conversion being performed. enum CheckedConversionKind { /// \brief An implicit conversion. CCK_ImplicitConversion, /// \brief A C-style cast. CCK_CStyleCast, /// \brief A functional-style cast. CCK_FunctionalCast, /// \brief A cast other than a C-style cast. CCK_OtherCast }; /// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit /// cast. If there is already an implicit cast, merge into the existing one. /// If isLvalue, the result of the cast is an lvalue. ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK, ExprValueKind VK = VK_RValue, const CXXCastPath *BasePath = 0, CheckedConversionKind CCK = CCK_ImplicitConversion); /// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding /// to the conversion from scalar type ScalarTy to the Boolean type. static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy); /// IgnoredValueConversions - Given that an expression's result is /// syntactically ignored, perform any conversions that are /// required. ExprResult IgnoredValueConversions(Expr *E); // UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts // functions and arrays to their respective pointers (C99 6.3.2.1). ExprResult UsualUnaryConversions(Expr *E); // DefaultFunctionArrayConversion - converts functions and arrays // to their respective pointers (C99 6.3.2.1). ExprResult DefaultFunctionArrayConversion(Expr *E); // DefaultFunctionArrayLvalueConversion - converts functions and // arrays to their respective pointers and performs the // lvalue-to-rvalue conversion. ExprResult DefaultFunctionArrayLvalueConversion(Expr *E); // DefaultLvalueConversion - performs lvalue-to-rvalue conversion on // the operand. This is DefaultFunctionArrayLvalueConversion, // except that it assumes the operand isn't of function or array // type. ExprResult DefaultLvalueConversion(Expr *E); // DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that // do not have a prototype. Integer promotions are performed on each // argument, and arguments that have type float are promoted to double. ExprResult DefaultArgumentPromotion(Expr *E); // Used for emitting the right warning by DefaultVariadicArgumentPromotion enum VariadicCallType { VariadicFunction, VariadicBlock, VariadicMethod, VariadicConstructor, VariadicDoesNotApply }; VariadicCallType getVariadicCallType(FunctionDecl *FDecl, const FunctionProtoType *Proto, Expr *Fn); // Used for determining in which context a type is allowed to be passed to a // vararg function. enum VarArgKind { VAK_Valid, VAK_ValidInCXX11, VAK_Invalid }; // Determines which VarArgKind fits an expression. VarArgKind isValidVarArgType(const QualType &Ty); /// GatherArgumentsForCall - Collector argument expressions for various /// form of call prototypes. bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl, const FunctionProtoType *Proto, unsigned FirstProtoArg, Expr **Args, unsigned NumArgs, SmallVector<Expr *, 8> &AllArgs, VariadicCallType CallType = VariadicDoesNotApply, bool AllowExplicit = false, bool IsListInitialization = false); // DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but // will create a runtime trap if the resulting type is not a POD type. ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT, FunctionDecl *FDecl); /// Checks to see if the given expression is a valid argument to a variadic /// function, issuing a diagnostic and returning NULL if not. bool variadicArgumentPODCheck(const Expr *E, VariadicCallType CT); // UsualArithmeticConversions - performs the UsualUnaryConversions on it's // operands and then handles various conversions that are common to binary // operators (C99 6.3.1.8). If both operands aren't arithmetic, this // routine returns the first non-arithmetic type found. The client is // responsible for emitting appropriate error diagnostics. QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS, bool IsCompAssign = false); /// AssignConvertType - All of the 'assignment' semantic checks return this /// enum to indicate whether the assignment was allowed. These checks are /// done for simple assignments, as well as initialization, return from /// function, argument passing, etc. The query is phrased in terms of a /// source and destination type. enum AssignConvertType { /// Compatible - the types are compatible according to the standard. Compatible, /// PointerToInt - The assignment converts a pointer to an int, which we /// accept as an extension. PointerToInt, /// IntToPointer - The assignment converts an int to a pointer, which we /// accept as an extension. IntToPointer, /// FunctionVoidPointer - The assignment is between a function pointer and /// void*, which the standard doesn't allow, but we accept as an extension. FunctionVoidPointer, /// IncompatiblePointer - The assignment is between two pointers types that /// are not compatible, but we accept them as an extension. IncompatiblePointer, /// IncompatiblePointer - The assignment is between two pointers types which /// point to integers which have a different sign, but are otherwise /// identical. This is a subset of the above, but broken out because it's by /// far the most common case of incompatible pointers. IncompatiblePointerSign, /// CompatiblePointerDiscardsQualifiers - The assignment discards /// c/v/r qualifiers, which we accept as an extension. CompatiblePointerDiscardsQualifiers, /// IncompatiblePointerDiscardsQualifiers - The assignment /// discards qualifiers that we don't permit to be discarded, /// like address spaces. IncompatiblePointerDiscardsQualifiers, /// IncompatibleNestedPointerQualifiers - The assignment is between two /// nested pointer types, and the qualifiers other than the first two /// levels differ e.g. char ** -> const char **, but we accept them as an /// extension. IncompatibleNestedPointerQualifiers, /// IncompatibleVectors - The assignment is between two vector types that /// have the same size, which we accept as an extension. IncompatibleVectors, /// IntToBlockPointer - The assignment converts an int to a block /// pointer. We disallow this. IntToBlockPointer, /// IncompatibleBlockPointer - The assignment is between two block /// pointers types that are not compatible. IncompatibleBlockPointer, /// IncompatibleObjCQualifiedId - The assignment is between a qualified /// id type and something else (that is incompatible with it). For example, /// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol. IncompatibleObjCQualifiedId, /// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an /// object with __weak qualifier. IncompatibleObjCWeakRef, /// Incompatible - We reject this conversion outright, it is invalid to /// represent it in the AST. Incompatible }; /// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the /// assignment conversion type specified by ConvTy. This returns true if the /// conversion was invalid or false if the conversion was accepted. bool DiagnoseAssignmentResult(AssignConvertType ConvTy, SourceLocation Loc, QualType DstType, QualType SrcType, Expr *SrcExpr, AssignmentAction Action, bool *Complained = 0); /// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant /// integer not in the range of enum values. void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType, Expr *SrcExpr); /// CheckAssignmentConstraints - Perform type checking for assignment, /// argument passing, variable initialization, and function return values. /// C99 6.5.16. AssignConvertType CheckAssignmentConstraints(SourceLocation Loc, QualType LHSType, QualType RHSType); /// Check assignment constraints and prepare for a conversion of the /// RHS to the LHS type. AssignConvertType CheckAssignmentConstraints(QualType LHSType, ExprResult &RHS, CastKind &Kind); // CheckSingleAssignmentConstraints - Currently used by // CheckAssignmentOperands, and ActOnReturnStmt. Prior to type checking, // this routine performs the default function/array converions. AssignConvertType CheckSingleAssignmentConstraints(QualType LHSType, ExprResult &RHS, bool Diagnose = true); // \brief If the lhs type is a transparent union, check whether we // can initialize the transparent union with the given expression. AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType, ExprResult &RHS); bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType); bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit = false); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit, ImplicitConversionSequence& ICS); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const ImplicitConversionSequence& ICS, AssignmentAction Action, CheckedConversionKind CCK = CCK_ImplicitConversion); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const StandardConversionSequence& SCS, AssignmentAction Action, CheckedConversionKind CCK); /// the following "Check" methods will return a valid/converted QualType /// or a null QualType (indicating an error diagnostic was issued). /// type checking binary operators (subroutines of CreateBuiltinBinOp). QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType CheckPointerToMemberOperands( // C++ 5.5 ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, SourceLocation OpLoc, bool isIndirect); QualType CheckMultiplyDivideOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool IsDivide); QualType CheckRemainderOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign = false); QualType CheckAdditionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, unsigned Opc, QualType* CompLHSTy = 0); QualType CheckSubtractionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, QualType* CompLHSTy = 0); QualType CheckShiftOperands( // C99 6.5.7 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, unsigned Opc, bool IsCompAssign = false); QualType CheckCompareOperands( // C99 6.5.8/9 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, unsigned OpaqueOpc, bool isRelational); QualType CheckBitwiseOperands( // C99 6.5.[10...12] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign = false); QualType CheckLogicalOperands( // C99 6.5.[13,14] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, unsigned Opc); // CheckAssignmentOperands is used for both simple and compound assignment. // For simple assignment, pass both expressions and a null converted type. // For compound assignment, pass both expressions and the converted type. QualType CheckAssignmentOperands( // C99 6.5.16.[1,2] Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType); ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opcode, Expr *Op); ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opcode, Expr *LHS, Expr *RHS); ExprResult checkPseudoObjectRValue(Expr *E); Expr *recreateSyntacticForm(PseudoObjectExpr *E); QualType CheckConditionalOperands( // C99 6.5.15 ExprResult &Cond, ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc); QualType CXXCheckConditionalOperands( // C++ 5.16 ExprResult &cond, ExprResult &lhs, ExprResult &rhs, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc); QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2, bool *NonStandardCompositeType = 0); QualType FindCompositePointerType(SourceLocation Loc, ExprResult &E1, ExprResult &E2, bool *NonStandardCompositeType = 0) { Expr *E1Tmp = E1.take(), *E2Tmp = E2.take(); QualType Composite = FindCompositePointerType(Loc, E1Tmp, E2Tmp, NonStandardCompositeType); E1 = Owned(E1Tmp); E2 = Owned(E2Tmp); return Composite; } QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS, SourceLocation QuestionLoc); bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr, SourceLocation QuestionLoc); /// type checking for vector binary operators. QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign); QualType GetSignedVectorType(QualType V); QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool isRelational); QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc); /// type checking declaration initializers (C99 6.7.8) bool CheckForConstantInitializer(Expr *e, QualType t); // type checking C++ declaration initializers (C++ [dcl.init]). /// ReferenceCompareResult - Expresses the result of comparing two /// types (cv1 T1 and cv2 T2) to determine their compatibility for the /// purposes of initialization by reference (C++ [dcl.init.ref]p4). enum ReferenceCompareResult { /// Ref_Incompatible - The two types are incompatible, so direct /// reference binding is not possible. Ref_Incompatible = 0, /// Ref_Related - The two types are reference-related, which means /// that their unqualified forms (T1 and T2) are either the same /// or T1 is a base class of T2. Ref_Related, /// Ref_Compatible_With_Added_Qualification - The two types are /// reference-compatible with added qualification, meaning that /// they are reference-compatible and the qualifiers on T1 (cv1) /// are greater than the qualifiers on T2 (cv2). Ref_Compatible_With_Added_Qualification, /// Ref_Compatible - The two types are reference-compatible and /// have equivalent qualifiers (cv1 == cv2). Ref_Compatible }; ReferenceCompareResult CompareReferenceRelationship(SourceLocation Loc, QualType T1, QualType T2, bool &DerivedToBase, bool &ObjCConversion, bool &ObjCLifetimeConversion); ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType, Expr *CastExpr, CastKind &CastKind, ExprValueKind &VK, CXXCastPath &Path); /// \brief Force an expression with unknown-type to an expression of the /// given type. ExprResult forceUnknownAnyToType(Expr *E, QualType ToType); /// \brief Type-check an expression that's being passed to an /// __unknown_anytype parameter. ExprResult checkUnknownAnyArg(SourceLocation callLoc, Expr *result, QualType &paramType); // CheckVectorCast - check type constraints for vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size. // returns true if the cast is invalid bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty, CastKind &Kind); // CheckExtVectorCast - check type constraints for extended vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size, // or vectors and the element type of that vector. // returns the cast expr ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr, CastKind &Kind); ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo, SourceLocation LParenLoc, Expr *CastExpr, SourceLocation RParenLoc); enum ARCConversionResult { ACR_okay, ACR_unbridged }; /// \brief Checks for invalid conversions and casts between /// retainable pointers and other pointer kinds. ARCConversionResult CheckObjCARCConversion(SourceRange castRange, QualType castType, Expr *&op, CheckedConversionKind CCK); Expr *stripARCUnbridgedCast(Expr *e); void diagnoseARCUnbridgedCast(Expr *e); bool CheckObjCARCUnavailableWeakConversion(QualType castType, QualType ExprType); /// checkRetainCycles - Check whether an Objective-C message send /// might create an obvious retain cycle. void checkRetainCycles(ObjCMessageExpr *msg); void checkRetainCycles(Expr *receiver, Expr *argument); void checkRetainCycles(VarDecl *Var, Expr *Init); /// checkUnsafeAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained type. bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS); /// checkUnsafeExprAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained expression. void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS); /// CheckMessageArgumentTypes - Check types in an Obj-C message send. /// \param Method - May be null. /// \param [out] ReturnType - The return type of the send. /// \return true iff there were any incompatible types. bool CheckMessageArgumentTypes(QualType ReceiverType, Expr **Args, unsigned NumArgs, Selector Sel, ArrayRef<SourceLocation> SelectorLocs, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage, SourceLocation lbrac, SourceLocation rbrac, QualType &ReturnType, ExprValueKind &VK); /// \brief Determine the result of a message send expression based on /// the type of the receiver, the method expected to receive the message, /// and the form of the message send. QualType getMessageSendResultType(QualType ReceiverType, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage); /// \brief If the given expression involves a message send to a method /// with a related result type, emit a note describing what happened. void EmitRelatedResultTypeNote(const Expr *E); /// \brief Given that we had incompatible pointer types in a return /// statement, check whether we're in a method with a related result /// type, and if so, emit a note describing what happened. void EmitRelatedResultTypeNoteForReturn(QualType destType); /// CheckBooleanCondition - Diagnose problems involving the use of /// the given expression as a boolean condition (e.g. in an if /// statement). Also performs the standard function and array /// decays, possibly changing the input variable. /// /// \param Loc - A location associated with the condition, e.g. the /// 'if' keyword. /// \return true iff there were any errors ExprResult CheckBooleanCondition(Expr *E, SourceLocation Loc); ExprResult ActOnBooleanCondition(Scope *S, SourceLocation Loc, Expr *SubExpr); /// DiagnoseAssignmentAsCondition - Given that an expression is /// being used as a boolean condition, warn if it's an assignment. void DiagnoseAssignmentAsCondition(Expr *E); /// \brief Redundant parentheses over an equality comparison can indicate /// that the user intended an assignment used as condition. void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE); /// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid. ExprResult CheckCXXBooleanCondition(Expr *CondExpr); /// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have /// the specified width and sign. If an overflow occurs, detect it and emit /// the specified diagnostic. void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal, unsigned NewWidth, bool NewSign, SourceLocation Loc, unsigned DiagID); /// Checks that the Objective-C declaration is declared in the global scope. /// Emits an error and marks the declaration as invalid if it's not declared /// in the global scope. bool CheckObjCDeclScope(Decl *D); /// \brief Abstract base class used for diagnosing integer constant /// expression violations. class VerifyICEDiagnoser { public: bool Suppress; VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { } virtual void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) =0; virtual void diagnoseFold(Sema &S, SourceLocation Loc, SourceRange SR); virtual ~VerifyICEDiagnoser() { } }; /// VerifyIntegerConstantExpression - Verifies that an expression is an ICE, /// and reports the appropriate diagnostics. Returns false on success. /// Can optionally return the value of the expression. ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, VerifyICEDiagnoser &Diagnoser, bool AllowFold = true); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, unsigned DiagID, bool AllowFold = true); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result=0); /// VerifyBitField - verifies that a bit field expression is an ICE and has /// the correct width, and that the field type is valid. /// Returns false on success. /// Can optionally return whether the bit-field is of width 0 ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName, QualType FieldTy, Expr *BitWidth, bool *ZeroWidth = 0); enum CUDAFunctionTarget { CFT_Device, CFT_Global, CFT_Host, CFT_HostDevice }; CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D); bool CheckCUDATarget(CUDAFunctionTarget CallerTarget, CUDAFunctionTarget CalleeTarget); bool CheckCUDATarget(const FunctionDecl *Caller, const FunctionDecl *Callee) { return CheckCUDATarget(IdentifyCUDATarget(Caller), IdentifyCUDATarget(Callee)); } /// \name Code completion //@{ /// \brief Describes the context in which code completion occurs. enum ParserCompletionContext { /// \brief Code completion occurs at top-level or namespace context. PCC_Namespace, /// \brief Code completion occurs within a class, struct, or union. PCC_Class, /// \brief Code completion occurs within an Objective-C interface, protocol, /// or category. PCC_ObjCInterface, /// \brief Code completion occurs within an Objective-C implementation or /// category implementation PCC_ObjCImplementation, /// \brief Code completion occurs within the list of instance variables /// in an Objective-C interface, protocol, category, or implementation. PCC_ObjCInstanceVariableList, /// \brief Code completion occurs following one or more template /// headers. PCC_Template, /// \brief Code completion occurs following one or more template /// headers within a class. PCC_MemberTemplate, /// \brief Code completion occurs within an expression. PCC_Expression, /// \brief Code completion occurs within a statement, which may /// also be an expression or a declaration. PCC_Statement, /// \brief Code completion occurs at the beginning of the /// initialization statement (or expression) in a for loop. PCC_ForInit, /// \brief Code completion occurs within the condition of an if, /// while, switch, or for statement. PCC_Condition, /// \brief Code completion occurs within the body of a function on a /// recovery path, where we do not have a specific handle on our position /// in the grammar. PCC_RecoveryInFunction, /// \brief Code completion occurs where only a type is permitted. PCC_Type, /// \brief Code completion occurs in a parenthesized expression, which /// might also be a type cast. PCC_ParenthesizedExpression, /// \brief Code completion occurs within a sequence of declaration /// specifiers within a function, method, or block. PCC_LocalDeclarationSpecifiers }; void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path); void CodeCompleteOrdinaryName(Scope *S, ParserCompletionContext CompletionContext); void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS, bool AllowNonIdentifiers, bool AllowNestedNameSpecifiers); struct CodeCompleteExpressionData; void CodeCompleteExpression(Scope *S, const CodeCompleteExpressionData &Data); void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base, SourceLocation OpLoc, bool IsArrow); void CodeCompletePostfixExpression(Scope *S, ExprResult LHS); void CodeCompleteTag(Scope *S, unsigned TagSpec); void CodeCompleteTypeQualifiers(DeclSpec &DS); void CodeCompleteCase(Scope *S); void CodeCompleteCall(Scope *S, Expr *Fn, ArrayRef<Expr *> Args); void CodeCompleteInitializer(Scope *S, Decl *D); void CodeCompleteReturn(Scope *S); void CodeCompleteAfterIf(Scope *S); void CodeCompleteAssignmentRHS(Scope *S, Expr *LHS); void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS, bool EnteringContext); void CodeCompleteUsing(Scope *S); void CodeCompleteUsingDirective(Scope *S); void CodeCompleteNamespaceDecl(Scope *S); void CodeCompleteNamespaceAliasDecl(Scope *S); void CodeCompleteOperatorName(Scope *S); void CodeCompleteConstructorInitializer(Decl *Constructor, CXXCtorInitializer** Initializers, unsigned NumInitializers); void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro, bool AfterAmpersand); void CodeCompleteObjCAtDirective(Scope *S); void CodeCompleteObjCAtVisibility(Scope *S); void CodeCompleteObjCAtStatement(Scope *S); void CodeCompleteObjCAtExpression(Scope *S); void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS); void CodeCompleteObjCPropertyGetter(Scope *S); void CodeCompleteObjCPropertySetter(Scope *S); void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS, bool IsParameter); void CodeCompleteObjCMessageReceiver(Scope *S); void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc, IdentifierInfo **SelIdents, unsigned NumSelIdents, bool AtArgumentExpression); void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver, IdentifierInfo **SelIdents, unsigned NumSelIdents, bool AtArgumentExpression, bool IsSuper = false); void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver, IdentifierInfo **SelIdents, unsigned NumSelIdents, bool AtArgumentExpression, ObjCInterfaceDecl *Super = 0); void CodeCompleteObjCForCollection(Scope *S, DeclGroupPtrTy IterationVar); void CodeCompleteObjCSelector(Scope *S, IdentifierInfo **SelIdents, unsigned NumSelIdents); void CodeCompleteObjCProtocolReferences(IdentifierLocPair *Protocols, unsigned NumProtocols); void CodeCompleteObjCProtocolDecl(Scope *S); void CodeCompleteObjCInterfaceDecl(Scope *S); void CodeCompleteObjCSuperclass(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationDecl(Scope *S); void CodeCompleteObjCInterfaceCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCPropertyDefinition(Scope *S); void CodeCompleteObjCPropertySynthesizeIvar(Scope *S, IdentifierInfo *PropertyName); void CodeCompleteObjCMethodDecl(Scope *S, bool IsInstanceMethod, ParsedType ReturnType); void CodeCompleteObjCMethodDeclSelector(Scope *S, bool IsInstanceMethod, bool AtParameterName, ParsedType ReturnType, IdentifierInfo **SelIdents, unsigned NumSelIdents); void CodeCompletePreprocessorDirective(bool InConditional); void CodeCompleteInPreprocessorConditionalExclusion(Scope *S); void CodeCompletePreprocessorMacroName(bool IsDefinition); void CodeCompletePreprocessorExpression(); void CodeCompletePreprocessorMacroArgument(Scope *S, IdentifierInfo *Macro, MacroInfo *MacroInfo, unsigned Argument); void CodeCompleteNaturalLanguage(); void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator, CodeCompletionTUInfo &CCTUInfo, SmallVectorImpl<CodeCompletionResult> &Results); //@} //===--------------------------------------------------------------------===// // Extra semantic analysis beyond the C type system public: SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL, unsigned ByteNo) const; private: void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr, const ArraySubscriptExpr *ASE=0, bool AllowOnePastEnd=true, bool IndexNegated=false); void CheckArrayAccess(const Expr *E); // Used to grab the relevant information from a FormatAttr and a // FunctionDeclaration. struct FormatStringInfo { unsigned FormatIdx; unsigned FirstDataArg; bool HasVAListArg; }; bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember, FormatStringInfo *FSI); bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc, Expr **Args, unsigned NumArgs); bool CheckBlockCall(NamedDecl *NDecl, CallExpr *TheCall, const FunctionProtoType *Proto); void CheckConstructorCall(FunctionDecl *FDecl, ArrayRef<const Expr *> Args, const FunctionProtoType *Proto, SourceLocation Loc); void checkCall(NamedDecl *FDecl, ArrayRef<const Expr *> Args, unsigned NumProtoArgs, bool IsMemberFunction, SourceLocation Loc, SourceRange Range, VariadicCallType CallType); bool CheckObjCString(Expr *Arg); ExprResult CheckBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStart(CallExpr *TheCall); bool SemaBuiltinUnorderedCompare(CallExpr *TheCall); bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs); public: // Used by C++ template instantiation. ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall); private: bool SemaBuiltinPrefetch(CallExpr *TheCall); bool SemaBuiltinObjectSize(CallExpr *TheCall); bool SemaBuiltinLongjmp(CallExpr *TheCall); ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult); ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult, AtomicExpr::AtomicOp Op); bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum, llvm::APSInt &Result); enum FormatStringType { FST_Scanf, FST_Printf, FST_NSString, FST_Strftime, FST_Strfmon, FST_Kprintf, FST_Unknown }; static FormatStringType GetFormatStringType(const FormatAttr *Format); enum StringLiteralCheckType { SLCT_NotALiteral, SLCT_UncheckedLiteral, SLCT_CheckedLiteral }; StringLiteralCheckType checkFormatStringExpr(const Expr *E, ArrayRef<const Expr *> Args, bool HasVAListArg, unsigned format_idx, unsigned firstDataArg, FormatStringType Type, VariadicCallType CallType, bool inFunctionCall = true); void CheckFormatString(const StringLiteral *FExpr, const Expr *OrigFormatExpr, ArrayRef<const Expr *> Args, bool HasVAListArg, unsigned format_idx, unsigned firstDataArg, FormatStringType Type, bool inFunctionCall, VariadicCallType CallType); bool CheckFormatArguments(const FormatAttr *Format, ArrayRef<const Expr *> Args, bool IsCXXMember, VariadicCallType CallType, SourceLocation Loc, SourceRange Range); bool CheckFormatArguments(ArrayRef<const Expr *> Args, bool HasVAListArg, unsigned format_idx, unsigned firstDataArg, FormatStringType Type, VariadicCallType CallType, SourceLocation Loc, SourceRange range); void CheckNonNullArguments(const NonNullAttr *NonNull, const Expr * const *ExprArgs, SourceLocation CallSiteLoc); void CheckMemaccessArguments(const CallExpr *Call, unsigned BId, IdentifierInfo *FnName); void CheckStrlcpycatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckStrncatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckReturnStackAddr(Expr *RetValExp, QualType lhsType, SourceLocation ReturnLoc); void CheckFloatComparison(SourceLocation Loc, Expr* LHS, Expr* RHS); void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation()); void CheckForIntOverflow(Expr *E); void CheckUnsequencedOperations(Expr *E); /// \brief Perform semantic checks on a completed expression. This will either /// be a full-expression or a default argument expression. void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(), bool IsConstexpr = false); void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field, Expr *Init); public: /// \brief Register a magic integral constant to be used as a type tag. void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind, uint64_t MagicValue, QualType Type, bool LayoutCompatible, bool MustBeNull); struct TypeTagData { TypeTagData() {} TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) : Type(Type), LayoutCompatible(LayoutCompatible), MustBeNull(MustBeNull) {} QualType Type; /// If true, \c Type should be compared with other expression's types for /// layout-compatibility. unsigned LayoutCompatible : 1; unsigned MustBeNull : 1; }; /// A pair of ArgumentKind identifier and magic value. This uniquely /// identifies the magic value. typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue; private: /// \brief A map from magic value to type information. OwningPtr<llvm::DenseMap<TypeTagMagicValue, TypeTagData> > TypeTagForDatatypeMagicValues; /// \brief Peform checks on a call of a function with argument_with_type_tag /// or pointer_with_type_tag attributes. void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr, const Expr * const *ExprArgs); /// \brief The parser's current scope. /// /// The parser maintains this state here. Scope *CurScope; mutable IdentifierInfo *Ident_super; protected: friend class Parser; friend class InitializationSequence; friend class ASTReader; friend class ASTWriter; public: /// \brief Retrieve the parser's current scope. /// /// This routine must only be used when it is certain that semantic analysis /// and the parser are in precisely the same context, which is not the case /// when, e.g., we are performing any kind of template instantiation. /// Therefore, the only safe places to use this scope are in the parser /// itself and in routines directly invoked from the parser and *never* from /// template substitution or instantiation. Scope *getCurScope() const { return CurScope; } IdentifierInfo *getSuperIdentifier() const; Decl *getObjCDeclContext() const; DeclContext *getCurLexicalContext() const { return OriginalLexicalContext ? OriginalLexicalContext : CurContext; } AvailabilityResult getCurContextAvailability() const; const DeclContext *getCurObjCLexicalContext() const { const DeclContext *DC = getCurLexicalContext(); // A category implicitly has the attribute of the interface. if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC)) DC = CatD->getClassInterface(); return DC; } }; /// \brief RAII object that enters a new expression evaluation context. class EnterExpressionEvaluationContext { Sema &Actions; public: EnterExpressionEvaluationContext(Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = 0, bool IsDecltype = false) : Actions(Actions) { Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl, IsDecltype); } EnterExpressionEvaluationContext(Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Sema::ReuseLambdaContextDecl_t, bool IsDecltype = false) : Actions(Actions) { Actions.PushExpressionEvaluationContext(NewContext, Sema::ReuseLambdaContextDecl, IsDecltype); } ~EnterExpressionEvaluationContext() { Actions.PopExpressionEvaluationContext(); } }; } // end namespace clang #endif
spmv_tile_balance.h
#ifndef SPMV_TILE_BALANCE #define SPMV_TILE_BALANCE #include"common.h" // #include"mmio_highlevel.h" //#include"mmio.h" #include"utils.h" #include"tilespmv_warp_bal.h" void tilespmv_balance(Beidou_Tile_Matrix *matrix, int rowblkblock, MAT_VAL_TYPE *x, MAT_VAL_TYPE *y_bal, int *flag_tilerow_start, int *flag_tilerow_stop, MAT_VAL_TYPE *Ysum, MAT_VAL_TYPE *Ypartialsum) { int *rowpointer=matrix->rowpointer; int *columnidx = matrix->columnidx; MAT_VAL_TYPE *value = matrix->value; int m = matrix->m; int n = matrix->n; int tilem = matrix->tilem; int tilen = matrix->tilen; MAT_PTR_TYPE *tile_ptr = matrix->tile_ptr; int numtile = matrix->numtile; int *tile_columnidx = matrix->tile_columnidx; int *tile_nnz = matrix->tile_nnz; char *Format = matrix->Format; int *blknnz = matrix->blknnz; char *blkwidth = matrix->blkwidth; MAT_VAL_TYPE *Tile_csr_Val = matrix->Tile_csr_Val; unsigned char *Tile_csr_Col = matrix->Tile_csr_Col; unsigned char *Tile_csr_Ptr = matrix->Tile_csr_Ptr; MAT_VAL_TYPE *Tile_coo_Val = matrix->Tile_coo_Val; unsigned char *Tile_coo_colIdx = matrix->Tile_coo_colIdx; unsigned char *Tile_coo_rowIdx = matrix->Tile_coo_rowIdx; MAT_VAL_TYPE *Tile_ell_Val = matrix->Tile_ell_Val; unsigned char *Tile_ell_colIdx = matrix->Tile_ell_colIdx; MAT_VAL_TYPE *Tile_hyb_Val = matrix->Tile_hyb_Val; unsigned char *Tile_hyb_ellcolIdx = matrix->Tile_hyb_ellcolIdx; unsigned char *Tile_hyb_coorowIdx = matrix->Tile_hyb_coorowIdx; MAT_VAL_TYPE *Tile_dns_Val = matrix->Tile_dns_Val; MAT_VAL_TYPE *Tile_dnsrow_Val = matrix->Tile_dnsrow_Val; char *Tile_dnsrow_idx = matrix->Tile_dnsrow_idx; MAT_VAL_TYPE *Tile_dnscol_Val = matrix->Tile_dnscol_Val; char *Tile_dnscol_idx = matrix->Tile_dnscol_idx; int *denserowptr = matrix->denserowptr; int *densecolptr = matrix->densecolptr; unsigned int *flag_bal_tile_rowidx = matrix->flag_bal_tile_rowidx; int *tile_bal_rowidx_colstart = matrix->tile_bal_rowidx_colstart ; int *tile_bal_rowidx_colstop = matrix->tile_bal_rowidx_colstop; unsigned char *csr_ptr = matrix->csr_ptr; int *hyb_coocount = matrix->hyb_coocount; int *csr_offset = matrix->csr_offset; int *csrptr_offset = matrix->csrptr_offset; int *coo_offset = matrix->coo_offset; int *ell_offset = matrix->ell_offset; int *hyb_offset = matrix->hyb_offset; int *dns_offset = matrix->dns_offset; int *dnsrow_offset = matrix->dnsrow_offset; int *dnscol_offset = matrix->dnscol_offset; int nthreads = omp_get_max_threads(); MAT_VAL_TYPE *y_temp_g = (MAT_VAL_TYPE *)malloc(sizeof(MAT_VAL_TYPE) * BLOCK_SIZE * nthreads); memset(y_temp_g, 0, sizeof(MAT_VAL_TYPE) * BLOCK_SIZE * nthreads); int *flag_lastgroup_rowidx = (int *)malloc(nthreads * sizeof(int)); memset(flag_lastgroup_rowidx, 0, nthreads * sizeof(int)); // int *flag_tilerow_start = (int *)malloc(nthreads * sizeof(int)); // memset(flag_tilerow_start, 0, nthreads * sizeof(int)); // int *flag_tilerow_end = (int *)malloc(nthreads * sizeof(int)); // memset(flag_tilerow_end, 0, nthreads * sizeof(int)); // printf("balance rowblkblock = %i\n",rowblkblock); // int rowblk_ave = rowblkblock / nthreads; // int rowblk_ave_rest = rowblkblock % nthreads ; // // printf("tile group_ave = %i\n",rowblk_ave); // #pragma omp parallel for // for (int i =0; i < nthreads; i ++) // { // if (i < rowblk_ave_rest) // { // flag_tilerow_start[i] = i * (rowblk_ave +1); // flag_tilerow_stop[i] = (i +1) * (rowblk_ave +1); // } // else{ // flag_tilerow_start[i] = (rowblk_ave +1) * rowblk_ave_rest + (i - rowblk_ave_rest) * rowblk_ave; // flag_tilerow_stop[i] = (rowblk_ave +1) * rowblk_ave_rest + (i +1 - rowblk_ave_rest) * rowblk_ave; // } // } // } //printf("aaaaaaa0\n"); #pragma omp parallel for for (int ti =0; ti < nthreads; ti ++) { int start_groupid = flag_tilerow_start[ti]; int end_groupid = flag_tilerow_stop[ti]; int thread_id = omp_get_thread_num(); // if (ti ==nthreads -1) // { // printf("thread %i, start = %i, stop = %i\n",thread_id, start_groupid,end_groupid ); // } //printf("aaaaaaa1\n"); MAT_VAL_TYPE *y_local = (MAT_VAL_TYPE *)malloc (BLOCK_SIZE * sizeof(MAT_VAL_TYPE)); memset(y_local, 0, BLOCK_SIZE * sizeof(MAT_VAL_TYPE)); //printf("start_groupid=%d end_groupid=%d\n",start_groupid,end_groupid); for (int blki = start_groupid; blki < end_groupid ; blki ++) { int tile_rowidx_current = flag_bal_tile_rowidx[blki]; int tile_rowidx_next = blki == rowblkblock -1 ? -1: flag_bal_tile_rowidx[blki +1]; int rowlen= tile_rowidx_current==tilem-1 ? m-(tilem-1)*BLOCK_SIZE : BLOCK_SIZE ; // if (blki == end_groupid -1) // { // printf("tile_rowidx_current = %i,tile_rowidx_next = %i, rowlen= %i\n", tile_rowidx_current, tile_rowidx_next, rowlen); // } //printf("aaaaaaa2\n"); for (int blkj = tile_bal_rowidx_colstart[blki]; blkj < tile_bal_rowidx_colstop[blki]; blkj ++) { int collen = tile_columnidx[blkj] == tilen-1 ? n - (tilen-1 ) * BLOCK_SIZE : BLOCK_SIZE ; int tilennz = tile_nnz[blkj +1] - tile_nnz[blkj]; char format = Format[blkj]; int x_offset = tile_columnidx[blkj] * BLOCK_SIZE; //printf("format=%d\n",format); switch (format) { case 0: { warplevel_csr_bal(matrix, tile_rowidx_current, blkj, csr_offset, csrptr_offset, x, y_local, x_offset); break; } case 1: { // warplevel_coo_bal(matrix, tile_rowidx_current, blkj, coo_offset, // x, y_local, x_offset); break; } case 2: { warplevel_ell_bal(matrix, tile_rowidx_current, blkj, ell_offset, x, y_local, x_offset); break; } case 3: { warplevel_hyb_bal(matrix, tile_rowidx_current, blkj,hyb_coocount, hyb_offset, x, y_local, x_offset); break; } case 4: { warplevel_dns_bal(matrix, tile_rowidx_current, blkj, dns_offset, x, y_local, x_offset); break; } case 5: { warplevel_dnsrow_bal(matrix, tile_rowidx_current, blkj, dnsrow_offset, x, y_local, x_offset); break; } case 6: { warplevel_dnscol_bal(matrix, tile_rowidx_current, blkj, dnscol_offset, x, y_local, x_offset); break; } default: break; } } //printf("aaaaaaa2\n"); if (blki == end_groupid - 1) { if(tile_rowidx_current != tile_rowidx_next ) { for (int ri =0; ri < rowlen; ri ++) { y_bal[tile_rowidx_current * BLOCK_SIZE +ri] = y_local[ri]; } } else { flag_lastgroup_rowidx[thread_id] = tile_rowidx_current; for (int ri =0; ri < rowlen; ri ++) { y_temp_g[thread_id * BLOCK_SIZE + ri] = y_local[ri]; } memset(y_local, 0, BLOCK_SIZE * sizeof(MAT_VAL_TYPE)); } } else { if(tile_rowidx_current != tile_rowidx_next) { for (int ri =0; ri < rowlen; ri ++) { y_bal[tile_rowidx_current * BLOCK_SIZE +ri] = y_local[ri]; } memset(y_local, 0, BLOCK_SIZE * sizeof(MAT_VAL_TYPE)); } } } } //printf("aaaaaaa3\n"); for (int ti =0; ti < nthreads; ti ++) { int rowidx_temp = flag_lastgroup_rowidx[ti]; int rowlen = rowidx_temp == tilem-1 ? m-(tilem-1)*BLOCK_SIZE : BLOCK_SIZE ; for (int ri =0; ri < rowlen; ri ++) { y_bal[rowidx_temp * BLOCK_SIZE + ri] += y_temp_g[ti * BLOCK_SIZE +ri]; } } //printf("aaaaaaa4\n"); #pragma omp parallel for for (int tid = 0; tid < nthreads; tid++) { if (matrix->Yid[tid] == -1 ) { for (int u = matrix->csrSplitter_yid[tid]; u < matrix->csrSplitter_yid[tid+1]; u++) { //printf("u=%d\n",u); int rowidx = matrix->coo_new_rowidx[u]; double sum = 0; for (int j = matrix->coo_new_matrix_ptr[u]; j < matrix->coo_new_matrix_ptr[u + 1]; j++) { int csrcolidx = matrix->coo_new_matrix_colidx[j]; sum += matrix->coo_new_matrix_value[j] * x[csrcolidx]; } y_bal[rowidx] += sum; } } else if (matrix->label[tid] != 0) { for (int u = matrix->Start1[tid]; u < matrix->End1[tid]; u++) { int rowidx = matrix->coo_new_rowidx[u]; double sum = 0; for (int j = matrix->coo_new_matrix_ptr[u]; j < matrix->coo_new_matrix_ptr[u + 1]; j++) { int csrcolidx = matrix->coo_new_matrix_colidx[j]; sum += matrix->coo_new_matrix_value[j] * x[csrcolidx]; } y_bal[rowidx] += sum; } } else if (matrix->Yid[tid] != -1 && matrix->label[tid] == 0)//youwenti { Ysum[tid] = 0; Ypartialsum[tid] = 0; for (int j = matrix->Start2[tid]; j < matrix->End2[tid]; j++) { int csrcolidx = matrix->coo_new_matrix_colidx[j]; Ypartialsum[tid] += matrix->coo_new_matrix_value[j] * x[csrcolidx]; } Ysum[tid] += Ypartialsum[tid]; y_bal[matrix->Yid[tid]] += Ysum[tid]; } } } #endif
omp_strsm_batch.c
/** * @file omp_strsm_batch.c * * @brief BBLAS omp_strsm_batch float routine. * * BBLAS is a software package provided by Univ. of Manchester, * Univ. of Tennessee. * * @version 1.0.0 * @author Samuel D. Relton * @author Pedro V. Lara * @author Mawussi Zounon * @date 2016-02-20 * **/ #ifndef DOXYGEN_SHOULD_SKIP_THIS /** * Code generation * @generated from ./bblas_omp/omp_ztrsm_batch.c normal z -> s, Mon Jun 6 09:44:14 2016 **/ #endif #include<cblas.h> #include "bblas_omp.h" #include "bblas.h" #include <omp.h> #define REAL /** Purpose ------- <b>strsm_batch</b> is an OpenMP version of strsm_batch. It solves for X in one of the matrix equations op( arrayA[i] )*X = alpha*arrayB[i], or X*op( arrayA[i] ) = alpha[i]*arrayB[i], where op( X ) is one of - op( X ) = X or - op( X ) = X**T or - op( X ) = X**H, alpha[i] is a scalar, X and B are M[i] by N[i] matrices, and arrayA[i] is a unit or non-unit, upper or lower triangular matrix. The solution matrix X overwrites arrayB[i] on exit. Fixed and Variable Batch Operations ----------------------------------- Two types of batch operation are supported depending upon the value of batch_opts. When <tt>batch_opts = BBLAS_VARIABLE</tt> - all parameters that are arrays must have length at least batch_count. - all parameters that are arrays must have all values set. When <tt>batch_opts = BBLAS_FIXED</tt> - all parameters that are arrays (except for arrayA, arrayB, and info) must have length at least one. - all parameters that are arrays (except for arrayA, arrayB, and info) need only to have their first value set. This means that for a <tt>BBLAS_FIXED</tt> batch, the values of side[0], uplo[0], transA[0], diag[0], M[0], N[0], alpha[0], lda[0], and ldb[0] are used for all computations. Parameters ---------- @param[in] side Array of <tt>enum BBLAS_SIDE</tt>. Each element side[i] specifies whether op( arrayA[i] ) appears on the left or right side of the operation as follows: - = 'BblasLeft' op( arrayA[i] )*X = alpha[i]*arrayB[i]. - = 'BblasRight' X*op( arrayA[i] ) = alpha[i]*arrayB[i]. @param[in] uplo Array of <tt>enum BBLAS_UPLO</tt>. On entry, uplo[i] specifies whether the matrix arrayA[i] is upper or lower triangular as follows: - = 'BblasUpper' arrayA[i] is an upper triangular matrix. - = 'BblasLower' arrayA[i] is a lower triangular matrix. @param[in] transA Array of <tt>enum BBLAS_TRANS</tt>. On entry, trans[i] specifies the form of op( arrayA[i] ) to be used in the operation as follows: - = 'BblasNoTrans' op( arrayA[i] ) = arrayA[i]. - = 'BblasTrans' op( arrayA[i] ) = arrayA[i]**T. - = 'BblasConjTrans' op( arrayA[i] ) = arrayA'[i]**H. @param[in] diag - Array of <tt>enum BBLAS_DIAG</tt>. On entry, diag[i] specifies whether or not arrayA[i] is unit triangular as follows: - = 'BblasUnit' arrayA[i] is assumed to be unit triangular. - = 'BblasNonUnit' arrayA[i] is not assumed to be unit triangular. @param[in] M Array of <tt>int</tt>. Each element M[i] specifies the number of rows of the matrix arrayB[i]. M[i] must be greater than zero. @param[in] N Array of <tt>int</tt>. Each element N[i] specifies the number of columns of the matrix arrayB[i]. N[i] must be greater than zero. @param[in] alpha Array of REAL When alpha[i] is set to zero arrayA[i] is not referenced and arrayB[i] need not be set before entry. @param[in] arrayA Array of pointers. Each element arrayA[i] is a pointer to a REAL matrix of dimension lda[i] by Ka[i], where Ka[i] = M[i] when side[i] = BblasLeft and is N[i] otherwise. When using side[i] = BblasLeft the M[i] by M[i] part of arrayA[i] must contain the triangular matrix: when uplo[i] = BblasUpper, the upper triangular part of arrayA[i] must contain the matrix whilst the strictly lower triangular part is not used; similarly when uplo[i] = BblasLower, the lower triangular part of arrayA[i] must contain the matrix whilst the strictly upper triangular part is not used. When using side[i] = BblasRight the N[i] by N[i] part of arrayA[i] must contain the symmetric matrix: when uplo[i] = BblasUpper, the upper triangular part of arrayA[i] must contain the matrix whilst the strictly lower triangular part is not used; similarly when uplo[i] = BblasLower, the lower triangular part of arrayA[i] must contain the matrix whilst the strictly upper triangular part is not used. Note that when diag = BblasUnit the diagonal elements of arrayA[i] are not used either, they are assumed to be equal to one. @param[in] lda Array of <tt>int</tt>. On entry, lda[i] specifies the first dimension of arrayA[i] as declared in the calling (sub) program. When side[i] = BblasLeft then lda[i] must be at least max( 1, M[i] ), otherwise lda[i] must be at least max( 1, N[i] ). @param[in,out] arrayB Array of pointers. Each element arrayB[i] is a pointer to a REAL matrix of dimension ldb[i] by N[i]. The leading M[i] by N[i] part of arrayB[i] must contain the matrix elements. On exit is arrayB[i] overwritten by the solution matrix X. @param[in] ldb Array of <tt>int</tt>. Each element ldb[i] specifies the first dimension of arrayB[i] as declared in the calling (sub) program. Each element ldb[i] must be at least max( 1, M[i] ). @param[in] batch_count <tt>int</tt> The number of matrices to operate on. @param[in] batch_opts <tt>enum BBLAS_OPTS</tt> One of BBLAS_FIXED or BBLAS_VARIABLE depending upon the type of batch operation required. @param[out] info Array of <tt>int</tt>. Each element info[i] is the error return code of the ith strsm in the batch, these need not be set on entry. The error codes can be found in bblas_macros.h. **/ void omp_strsm_batch( const enum BBLAS_SIDE *side, const enum BBLAS_UPLO *uplo, const enum BBLAS_TRANS *transA, const enum BBLAS_DIAG *diag, const int *M, const int *N, const float *alpha, const float **arrayA, const int *lda, float **arrayB, const int *ldb, const int batch_count, enum BBLAS_OPTS batch_opts, int *info) { /*Local variables */ int first_index = 0; int batch_iter; int LDA; char func_name[15] = "strsm_batch"; /* Check input arguments */ if (batch_count < 0) { xerbla_batch(func_name, BBLAS_ERR_BATCH_COUNT, -1); } if (batch_opts == BBLAS_FIXED) { if ((side[first_index] != BblasLeft) && (side[first_index] != BblasRight)) { xerbla_batch(func_name, BBLAS_ERR_SIDE, first_index); for (batch_iter = 0; batch_iter < batch_count; batch_iter++) { info[batch_iter] = BBLAS_ERR_SIDE; } return; } if ((uplo[first_index] != BblasUpper) && (uplo[first_index] != BblasLower)) { xerbla_batch(func_name, BBLAS_ERR_UPLO, first_index); for (batch_iter = 0; batch_iter < batch_count; batch_iter++) { info[batch_iter] = BBLAS_ERR_UPLO; } return; } if ((transA[first_index] != BblasNoTrans) && (transA[first_index] != BblasTrans) && (transA[first_index] != BblasConjTrans)) { xerbla_batch(func_name, BBLAS_ERR_TRANSA, first_index); for (batch_iter = 0; batch_iter < batch_count; batch_iter++) { info[batch_iter] = BBLAS_ERR_TRANSA; } return; } if ((diag[first_index] != BblasNonUnit) && (diag[first_index] != BblasUnit)) { xerbla_batch(func_name, BBLAS_ERR_DIAG, first_index); for (batch_iter = 0; batch_iter < batch_count; batch_iter++) { info[batch_iter] = BBLAS_ERR_DIAG; } return; } if (M[first_index] < 0) { xerbla_batch(func_name, BBLAS_ERR_M, first_index); for (batch_iter = 0; batch_iter < batch_count; batch_iter++) { info[batch_iter] = BBLAS_ERR_M; } return; } if (N[first_index] < 0) { xerbla_batch(func_name, BBLAS_ERR_N, first_index); for (batch_iter = 0; batch_iter < batch_count; batch_iter++) { info[batch_iter] = BBLAS_ERR_N; } return; } if (side[first_index] == BblasLeft) { LDA = M[first_index]; } else { LDA = N[first_index]; } if (lda[first_index] < max(1, LDA)) { xerbla_batch(func_name, BBLAS_ERR_LDA, first_index); for (batch_iter = 0; batch_iter < batch_count; batch_iter++) { info[batch_iter] = BBLAS_ERR_LDA; } return; } if (ldb[first_index] < max(1, M[first_index])) { xerbla_batch(func_name, BBLAS_ERR_LDB, first_index); for (batch_iter = 0; batch_iter < batch_count; batch_iter++) { info[batch_iter] = BBLAS_ERR_LDB; } return; } /* particular case */ if (min(M[first_index], N[first_index]) == 0) { for (batch_iter = 0; batch_iter < batch_count; batch_iter++) { info[batch_iter] = BBLAS_SUCCESS; } return; } #pragma omp parallel for private(batch_iter) for (batch_iter = 0; batch_iter < batch_count; batch_iter++) { /*Call to cblas_strsm */ cblas_strsm( BblasColMajor, side[first_index], uplo[first_index], transA[first_index], diag[first_index], M[first_index], N[first_index], (alpha[first_index]), arrayA[batch_iter], lda[first_index], arrayB[batch_iter], ldb[first_index]); /* Successful */ info[batch_iter] = BBLAS_SUCCESS; } /*END FIXED SIZE FOR LOOP */ }else if (batch_opts == BBLAS_VARIABLE) { #pragma omp parallel for private(batch_iter,LDA) for (batch_iter = 0; batch_iter < batch_count; batch_iter++) { /* Check input arguments */ if ((side[batch_iter] != BblasLeft) && (side[batch_iter] != BblasRight)) { xerbla_batch(func_name, BBLAS_ERR_SIDE, batch_iter); info[batch_iter] = BBLAS_ERR_SIDE; continue; } if ((uplo[batch_iter] != BblasUpper) && (uplo[batch_iter] != BblasLower)) { xerbla_batch(func_name, BBLAS_ERR_UPLO, batch_iter); info[batch_iter] = BBLAS_ERR_UPLO; continue; } if ((transA[batch_iter] != BblasNoTrans) && (transA[batch_iter] != BblasTrans) && (transA[batch_iter] != BblasConjTrans)) { xerbla_batch(func_name, BBLAS_ERR_TRANSA, batch_iter); info[batch_iter] = BBLAS_ERR_TRANSA; continue; } if (M[batch_iter] < 0) { xerbla_batch(func_name, BBLAS_ERR_M, batch_iter); info[batch_iter] = BBLAS_ERR_M; continue; } if (N[batch_iter] < 0) { xerbla_batch(func_name, BBLAS_ERR_N, batch_iter); info[batch_iter] = BBLAS_ERR_N; continue; } if (side[batch_iter] == BblasLeft) { LDA = M[batch_iter]; } else { LDA = N[batch_iter]; } if (lda[batch_iter] < max(1, LDA)) { xerbla_batch(func_name, BBLAS_ERR_LDA, batch_iter); info[batch_iter] = BBLAS_ERR_LDA; continue; } if (ldb[batch_iter] < max(1, M[batch_iter])) { xerbla_batch(func_name, BBLAS_ERR_LDC, batch_iter); info[batch_iter] = BBLAS_ERR_LDC; continue; } /* particular case */ if (min(M[batch_iter], N[batch_iter]) == 0) { info[batch_iter] = BBLAS_SUCCESS; continue; } cblas_strsm( BblasColMajor, side[batch_iter], uplo[batch_iter], transA[batch_iter], diag[batch_iter], M[batch_iter], N[batch_iter], (alpha[batch_iter]), arrayA[batch_iter], lda[batch_iter], arrayB[batch_iter], ldb[batch_iter]); /* Successful */ info[batch_iter] = BBLAS_SUCCESS; } } else { xerbla_batch(func_name, BBLAS_ERR_BATCH_OPTS, -1); } } #undef REAL
GB_assign_zombie5.c
//------------------------------------------------------------------------------ // GB_assign_zombie5: delete entries in C for C_replace_phase //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // For GrB_Matrix_assign, C(I,J)<M,repl>=..., if C_replace is true, and mask M // is present, then any entry C(i,j) outside IxJ must be be deleted, if // M(i,j)=0. // See also GB_assign_zombie3 and GB_assign_zombie4. // C must be sparse or hypersparse. #include "GB_assign.h" #include "GB_assign_zombie.h" #include "GB_subassign_methods.h" #include "GB_ek_slice.h" #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_WERK_POP (C_ek_slicing, int64_t) ; \ } GrB_Info GB_assign_zombie5 ( GrB_Matrix C, // the matrix C, or a copy const GrB_Matrix M, const bool Mask_comp, const bool Mask_struct, const GrB_Index *I, const int64_t nI, const int Ikind, const int64_t Icolon [3], const GrB_Index *J, const int64_t nJ, const int Jkind, const int64_t Jcolon [3], GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- ASSERT (!GB_IS_FULL (C)) ; ASSERT (!GB_IS_BITMAP (C)) ; ASSERT (GB_ZOMBIES_OK (C)) ; ASSERT (GB_JUMBLED_OK (C)) ; ASSERT (!GB_PENDING (C)) ; ASSERT (!GB_ZOMBIES (M)) ; ASSERT (!GB_JUMBLED (M)) ; // binary search on M ASSERT (!GB_PENDING (M)) ; ASSERT (!GB_aliased (C, M)) ; // NO ALIAS of C==M //-------------------------------------------------------------------------- // get C //-------------------------------------------------------------------------- const int64_t *restrict Ch = C->h ; const int64_t *restrict Cp = C->p ; // const int64_t Cnvec = C->nvec ; int64_t *restrict Ci = C->i ; int64_t nzombies = C->nzombies ; const int64_t zvlen = C->vlen ; //-------------------------------------------------------------------------- // get M //-------------------------------------------------------------------------- const int64_t *restrict Mp = M->p ; const int64_t *restrict Mh = M->h ; const int8_t *restrict Mb = M->b ; const int64_t *restrict Mi = M->i ; const GB_void *restrict Mx = (GB_void *) (Mask_struct ? NULL : (M->x)) ; const size_t msize = M->type->size ; const int64_t Mnvec = M->nvec ; const int64_t Mvlen = M->vlen ; const bool M_is_hyper = GB_IS_HYPERSPARSE (M) ; const bool M_is_bitmap = GB_IS_BITMAP (M) ; const bool M_is_full = GB_IS_FULL (M) ; //-------------------------------------------------------------------------- // determine the number of threads to use //-------------------------------------------------------------------------- GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ; //-------------------------------------------------------------------------- // slice the entries for each task //-------------------------------------------------------------------------- int C_ntasks, C_nthreads ; GB_WERK_DECLARE (C_ek_slicing, int64_t) ; GB_SLICE_MATRIX (C, 64, chunk) ; //-------------------------------------------------------------------------- // each task creates its own zombies //-------------------------------------------------------------------------- int tid ; #pragma omp parallel for num_threads(C_nthreads) schedule(dynamic,1) \ reduction(+:nzombies) for (tid = 0 ; tid < C_ntasks ; tid++) { //---------------------------------------------------------------------- // get the task description //---------------------------------------------------------------------- int64_t kfirst = kfirst_Cslice [tid] ; int64_t klast = klast_Cslice [tid] ; //---------------------------------------------------------------------- // scan vectors kfirst to klast for entries to delete //---------------------------------------------------------------------- for (int64_t k = kfirst ; k <= klast ; k++) { //------------------------------------------------------------------ // get C(:,j) and determine if j is outside the list J //------------------------------------------------------------------ int64_t j = GBH (Ch, k) ; // j_outside is true if column j is outside the C(I,J) submatrix bool j_outside = !GB_ij_is_in_list (J, nJ, j, Jkind, Jcolon) ; int64_t pC_start, pC_end ; GB_get_pA (&pC_start, &pC_end, tid, k, kfirst, klast, pstart_Cslice, Cp, zvlen) ; //------------------------------------------------------------------ // get M(:,j) //------------------------------------------------------------------ // this works for M with any sparsity structure int64_t pM_start, pM_end ; int64_t pright = Mnvec - 1 ; int64_t pleft = 0 ; GB_lookup (M_is_hyper, Mh, Mp, Mvlen, &pleft, pright, j, &pM_start, &pM_end) ; bool mjdense = (pM_end - pM_start) == Mvlen ; //------------------------------------------------------------------ // iterate over all entries in C(:,j) //------------------------------------------------------------------ for (int64_t pC = pC_start ; pC < pC_end ; pC++) { //-------------------------------------------------------------- // consider C(i,j) //-------------------------------------------------------------- // C(i,j) is outside the C(I,J) submatrix if either i is // not in the list I, or j is not in J, or both. int64_t i = Ci [pC] ; if (!GB_IS_ZOMBIE (i) && (j_outside || !GB_ij_is_in_list (I, nI, i, Ikind, Icolon))) { //---------------------------------------------------------- // C(i,j) is a live entry not in the C(I,J) submatrix //---------------------------------------------------------- // Check the mask M to see if it should be deleted. GB_MIJ_BINARY_SEARCH_OR_DENSE_LOOKUP (i) ; if (Mask_comp) { // negate the mask if Mask_comp is true mij = !mij ; } if (!mij) { // delete C(i,j) by marking it as a zombie nzombies++ ; Ci [pC] = GB_FLIP (i) ; } } } } } //-------------------------------------------------------------------------- // free workspace and return result //-------------------------------------------------------------------------- C->nzombies = nzombies ; GB_FREE_ALL ; return (GrB_SUCCESS) ; }
7969.c
/* POLYBENCH/GPU-OPENMP * * This file is a part of the Polybench/GPU-OpenMP suite * * Contact: * William Killian <killian@udel.edu> * * Copyright 2013, The University of Delaware */ #define EXTRALARGE_DATASET #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4000. */ #include "correlation.h" /* Array initialization. */ static void init_array (int m, int n, DATA_TYPE *float_n, DATA_TYPE POLYBENCH_2D(data,M,N,m,n)) { int i, j; *float_n = 1.2; for (i = 0; i < m; i++) for (j = 0; j < n; j++) data[i][j] = ((DATA_TYPE) i*j) / M; } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int m, DATA_TYPE POLYBENCH_2D(symmat,M,M,m,m)) { int i, j; for (i = 0; i < m; i++) for (j = 0; j < m; j++) { fprintf (stderr, DATA_PRINTF_MODIFIER, symmat[i][j]); if ((i * m + j) % 20 == 0) fprintf (stderr, "\n"); } fprintf (stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_correlation(int m, int n, DATA_TYPE float_n, DATA_TYPE POLYBENCH_2D(data,M,N,m,n), DATA_TYPE POLYBENCH_2D(symmat,M,M,m,m), DATA_TYPE POLYBENCH_1D(mean,M,m), DATA_TYPE POLYBENCH_1D(stddev,M,m)) { int i, j, j1, j2; DATA_TYPE eps = 0.1f; #define sqrt_of_array_cell(x,j) sqrt(x[j]) #pragma scop /* Determine mean of column vectors of input data matrix */ #pragma omp parallel private(i, j, j2) num_threads(1) { #pragma omp for schedule(static, 16) for (j = 0; j < _PB_M; j++) { mean[j] = 0.0; for (i = 0; i < _PB_N; i++) mean[j] += data[i][j]; mean[j] /= float_n; } /* Determine standard deviations of column vectors of data matrix. */ #pragma omp for schedule(static, 16) for (j = 0; j < _PB_M; j++) { stddev[j] = 0.0; for (i = 0; i < _PB_N; i++) stddev[j] += (data[i][j] - mean[j]) * (data[i][j] - mean[j]); stddev[j] /= float_n; stddev[j] = sqrt_of_array_cell(stddev, j); /* The following in an inelegant but usual way to handle near-zero std. dev. values, which below would cause a zero- divide. */ stddev[j] = stddev[j] <= eps ? 1.0 : stddev[j]; } /* Center and reduce the column vectors. */ #pragma omp for schedule(static, 16) for (i = 0; i < _PB_N; i++) for (j = 0; j < _PB_M; j++) { data[i][j] -= mean[j]; data[i][j] /= sqrt(float_n) * stddev[j]; } /* Calculate the m * m correlation matrix. */ #pragma omp for schedule(static, 16) for (j1 = 0; j1 < _PB_M-1; j1++) { symmat[j1][j1] = 1.0; for (j2 = j1+1; j2 < _PB_M; j2++) { symmat[j1][j2] = 0.0; for (i = 0; i < _PB_N; i++) symmat[j1][j2] += (data[i][j1] * data[i][j2]); symmat[j2][j1] = symmat[j1][j2]; } } } #pragma endscop symmat[_PB_M-1][_PB_M-1] = 1.0; } int main(int argc, char** argv) { /* Retrieve problem size. */ int n = N; int m = M; /* Variable declaration/allocation. */ DATA_TYPE float_n; POLYBENCH_2D_ARRAY_DECL(data,DATA_TYPE,M,N,m,n); POLYBENCH_2D_ARRAY_DECL(symmat,DATA_TYPE,M,M,m,m); POLYBENCH_1D_ARRAY_DECL(mean,DATA_TYPE,M,m); POLYBENCH_1D_ARRAY_DECL(stddev,DATA_TYPE,M,m); /* Initialize array(s). */ init_array (m, n, &float_n, POLYBENCH_ARRAY(data)); /* Start timer. */ polybench_start_instruments; /* Run kernel. */ kernel_correlation (m, n, float_n, POLYBENCH_ARRAY(data), POLYBENCH_ARRAY(symmat), POLYBENCH_ARRAY(mean), POLYBENCH_ARRAY(stddev)); /* Stop and print timer. */ polybench_stop_instruments; polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(m, POLYBENCH_ARRAY(symmat))); /* Be clean. */ POLYBENCH_FREE_ARRAY(data); POLYBENCH_FREE_ARRAY(symmat); POLYBENCH_FREE_ARRAY(mean); POLYBENCH_FREE_ARRAY(stddev); return 0; }
3d7pt_var.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 7 point stencil with variable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*7); for(m=0; m<7;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 32; tile_size[1] = 32; tile_size[2] = 32; tile_size[3] = 32; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<7; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) { for (t1=-1;t1<=floord(Nt-2,16);t1++) { lbp=max(ceild(t1,2),ceild(32*t1-Nt+3,32)); ubp=min(floord(Nt+Nz-4,32),floord(16*t1+Nz+13,32)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(0,ceild(t1-1,2)),ceild(32*t2-Nz-28,32));t3<=min(min(min(floord(Nt+Ny-4,32),floord(16*t1+Ny+29,32)),floord(32*t2+Ny+28,32)),floord(32*t1-32*t2+Nz+Ny+27,32));t3++) { for (t4=max(max(max(0,ceild(t1-1,2)),ceild(32*t2-Nz-28,32)),ceild(32*t3-Ny-28,32));t4<=min(min(min(min(floord(Nt+Nx-4,32),floord(16*t1+Nx+29,32)),floord(32*t2+Nx+28,32)),floord(32*t3+Nx+28,32)),floord(32*t1-32*t2+Nz+Nx+27,32));t4++) { for (t5=max(max(max(max(max(0,16*t1),32*t1-32*t2+1),32*t2-Nz+2),32*t3-Ny+2),32*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,16*t1+31),32*t2+30),32*t3+30),32*t4+30),32*t1-32*t2+Nz+29);t5++) { for (t6=max(max(32*t2,t5+1),-32*t1+32*t2+2*t5-31);t6<=min(min(32*t2+31,-32*t1+32*t2+2*t5),t5+Nz-2);t6++) { for (t7=max(32*t3,t5+1);t7<=min(32*t3+31,t5+Ny-2);t7++) { lbv=max(32*t4,t5+1); ubv=min(32*t4+31,t5+Nx-2); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = (((((((coef[0][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (coef[1][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)])) + (coef[2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)])) + (coef[3][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1])) + (coef[4][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)])) + (coef[5][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)])) + (coef[6][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1]));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "variable no-symmetry") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<7;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
task_types.c
// RUN: %libomp-compile-and-run | FileCheck %s // REQUIRES: ompt #include "callback.h" #include <omp.h> #include <math.h> int main() { //initialize the OpenMP runtime omp_get_num_threads(); // initial task print_ids(0); int x; // implicit task #pragma omp parallel num_threads(1) { print_ids(0); x++; } #pragma omp parallel num_threads(2) { // explicit task #pragma omp single #pragma omp task { print_ids(0); x++; } // explicit task with undeferred #pragma omp single #pragma omp task if (0) { print_ids(0); x++; } // explicit task with untied #pragma omp single #pragma omp task untied { // Output of thread_id is needed to know on which thread task is executed printf("%" PRIu64 ": explicit_untied\n", ompt_get_thread_data()->value); print_ids(0); print_frame(1); x++; #pragma omp taskyield printf("%" PRIu64 ": explicit_untied(2)\n", ompt_get_thread_data()->value); print_ids(0); print_frame(1); x++; #pragma omp taskwait printf("%" PRIu64 ": explicit_untied(3)\n", ompt_get_thread_data()->value); print_ids(0); print_frame(1); x++; } // explicit task with final #pragma omp single #pragma omp task final(1) { print_ids(0); x++; // nested explicit task with final and undeferred #pragma omp task { print_ids(0); x++; } } // Mergeable task test deactivated for now // explicit task with mergeable /* #pragma omp task mergeable if((int)sin(0)) { print_ids(0); x++; } */ // TODO: merged task } // Check if libomp supports the callbacks for this test. // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_task_create' // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_implicit_task' // CHECK: {{^}}0: NULL_POINTER=[[NULL:.*$]] // CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_initial_task_begin: parallel_id={{[0-9]+}} // CHECK-SAME: task_id=[[INITIAL_TASK_ID:[0-9]+]], actual_parallelism=1, index=1, flags=1 // CHECK-NOT: 0: parallel_data initially not null // initial task // CHECK: {{^}}[[MASTER_ID]]: task level 0: parallel_id={{[0-9]+}} // CHECK-SAME: task_id=[[INITIAL_TASK_ID]], exit_frame=[[NULL]] // CHECK-SAME: reenter_frame=[[NULL]] // CHECK-SAME: task_type=ompt_task_initial=1, thread_num=0 // implicit task // CHECK: {{^}}[[MASTER_ID]]: task level 0: parallel_id={{[0-9]+}} // CHECK-SAME: task_id={{[0-9]+}}, exit_frame={{0x[0-f]+}} // CHECK-SAME: reenter_frame=[[NULL]] // CHECK-SAME: task_type=ompt_task_implicit|ompt_task_undeferred=134217730 // CHECK-SAME: thread_num=0 // explicit task // CHECK: {{^[0-9]+}}: ompt_event_task_create: parent_task_id={{[0-9]+}} // CHECK-SAME: parent_task_frame.exit={{0x[0-f]+}} // CHECK-SAME: parent_task_frame.reenter={{0x[0-f]+}} // CHECK-SAME: new_task_id=[[EXPLICIT_TASK_ID:[0-9]+]] // CHECK-SAME: codeptr_ra={{0x[0-f]+}} // CHECK-SAME: task_type=ompt_task_explicit=4 // CHECK-SAME: has_dependences=no // CHECK: [[THREAD_ID_1:[0-9]+]]: ompt_event_task_schedule: // CHECK-SAME: second_task_id=[[EXPLICIT_TASK_ID]] // CHECK: [[THREAD_ID_1]]: task level 0: parallel_id=[[PARALLEL_ID:[0-9]+]] // CHECK-SAME: task_id=[[EXPLICIT_TASK_ID]], exit_frame={{0x[0-f]+}} // CHECK-SAME: reenter_frame=[[NULL]], task_type=ompt_task_explicit=4 // CHECK-SAME: thread_num={{[01]}} // explicit task with undeferred // CHECK: {{^[0-9]+}}: ompt_event_task_create: parent_task_id={{[0-9]+}} // CHECK-SAME: parent_task_frame.exit={{0x[0-f]+}} // CHECK-SAME: parent_task_frame.reenter={{0x[0-f]+}} // CHECK-SAME: new_task_id=[[EXPLICIT_UNDEFERRED_TASK_ID:[0-9]+]] // CHECK-SAME: codeptr_ra={{0x[0-f]+}} // CHECK-SAME: task_type=ompt_task_explicit|ompt_task_undeferred=134217732 // CHECK-SAME: has_dependences=no // CHECK: [[THREAD_ID_2:[0-9]+]]: ompt_event_task_schedule: // CHECK-SAME: second_task_id=[[EXPLICIT_UNDEFERRED_TASK_ID]] // CHECK: [[THREAD_ID_2]]: task level 0: parallel_id=[[PARALLEL_ID]] // CHECK-SAME: task_id=[[EXPLICIT_UNDEFERRED_TASK_ID]] // CHECK-SAME: exit_frame={{0x[0-f]+}}, reenter_frame=[[NULL]] // CHECK-SAME: task_type=ompt_task_explicit|ompt_task_undeferred=134217732 // CHECK-SAME: thread_num={{[01]}} // explicit task with untied // CHECK: {{^[0-9]+}}: ompt_event_task_create: parent_task_id={{[0-9]+}} // CHECK-SAME: parent_task_frame.exit={{0x[0-f]+}} // CHECK-SAME: parent_task_frame.reenter={{0x[0-f]+}} // CHECK-SAME: new_task_id=[[EXPLICIT_UNTIED_TASK_ID:[0-9]+]] // CHECK-SAME: codeptr_ra={{0x[0-f]+}} // CHECK-SAME: task_type=ompt_task_explicit|ompt_task_untied=268435460 // CHECK-SAME: has_dependences=no // Here the thread_id cannot be taken from a schedule event as there // may be multiple of those // CHECK: [[THREAD_ID_3:[0-9]+]]: explicit_untied // CHECK: [[THREAD_ID_3]]: task level 0: parallel_id=[[PARALLEL_ID]] // CHECK-SAME: task_id=[[EXPLICIT_UNTIED_TASK_ID]] // CHECK-SAME: exit_frame={{0x[0-f]+}}, reenter_frame=[[NULL]] // CHECK-SAME: task_type=ompt_task_explicit|ompt_task_untied=268435460 // CHECK-SAME: thread_num={{[01]}} // after taskyield // CHECK: [[THREAD_ID_3_2:[0-9]+]]: explicit_untied(2) // CHECK: [[THREAD_ID_3_2]]: task level 0: parallel_id=[[PARALLEL_ID]] // CHECK-SAME: task_id=[[EXPLICIT_UNTIED_TASK_ID]] // CHECK-SAME: exit_frame={{0x[0-f]+}}, reenter_frame=[[NULL]] // CHECK-SAME: task_type=ompt_task_explicit|ompt_task_untied=268435460 // CHECK-SAME: thread_num={{[01]}} // after taskwait // CHECK: [[THREAD_ID_3_3:[0-9]+]]: explicit_untied(3) // CHECK: [[THREAD_ID_3_3]]: task level 0: parallel_id=[[PARALLEL_ID]] // CHECK-SAME: task_id=[[EXPLICIT_UNTIED_TASK_ID]] // CHECK-SAME: exit_frame={{0x[0-f]+}}, reenter_frame=[[NULL]] // CHECK-SAME: task_type=ompt_task_explicit|ompt_task_untied=268435460 // CHECK-SAME: thread_num={{[01]}} // explicit task with final // CHECK: {{^[0-9]+}}: ompt_event_task_create: parent_task_id={{[0-9]+}} // CHECK-SAME: parent_task_frame.exit={{0x[0-f]+}} // CHECK-SAME: parent_task_frame.reenter={{0x[0-f]+}} // CHECK-SAME: new_task_id=[[EXPLICIT_FINAL_TASK_ID:[0-9]+]] // CHECK-SAME: codeptr_ra={{0x[0-f]+}} // CHECK-SAME: task_type=ompt_task_explicit|ompt_task_final=536870916 // CHECK-SAME: has_dependences=no // CHECK: [[THREAD_ID_4:[0-9]+]]: ompt_event_task_schedule: // CHECK-SAME: second_task_id=[[EXPLICIT_FINAL_TASK_ID]] // CHECK: [[THREAD_ID_4]]: task level 0: parallel_id=[[PARALLEL_ID]] // CHECK-SAME: task_id=[[EXPLICIT_FINAL_TASK_ID]] // CHECK-SAME: exit_frame={{0x[0-f]+}}, reenter_frame=[[NULL]] // CHECK-SAME: task_type=ompt_task_explicit|ompt_task_final=536870916 // CHECK-SAME: thread_num={{[01]}} // nested explicit task with final and undeferred // CHECK: {{^[0-9]+}}: ompt_event_task_create: parent_task_id={{[0-9]+}} // CHECK-SAME: parent_task_frame.exit={{0x[0-f]+}} // CHECK-SAME: parent_task_frame.reenter={{0x[0-f]+}} // CHECK-SAME: new_task_id=[[NESTED_FINAL_UNDEFERRED_TASK_ID:[0-9]+]] // CHECK-SAME: codeptr_ra={{0x[0-f]+}} // CHECK-SAME: task_type=ompt_task_explicit|ompt_task_undeferred // CHECK-SAME:|ompt_task_final=671088644 // CHECK-SAME: has_dependences=no // CHECK: [[THREAD_ID_5:[0-9]+]]: ompt_event_task_schedule: // CHECK-SAME: second_task_id=[[NESTED_FINAL_UNDEFERRED_TASK_ID]] // CHECK: [[THREAD_ID_5]]: task level 0: parallel_id=[[PARALLEL_ID]] // CHECK-SAME: task_id=[[NESTED_FINAL_UNDEFERRED_TASK_ID]] // CHECK-SAME: exit_frame={{0x[0-f]+}}, reenter_frame=[[NULL]] // CHECK-SAME: task_type=ompt_task_explicit|ompt_task_undeferred // CHECK-SAME:|ompt_task_final=671088644 // CHECK-SAME: thread_num={{[01]}} return 0; }
3d25pt_var.c
/* * Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*13); for(m=0; m<13;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 16; tile_size[1] = 16; tile_size[2] = 16; tile_size[3] = 1024; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<13; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt; t++) { for (i = 4; i < Nz-4; i++) { for (j = 4; j < Ny-4; j++) { for (k = 4; k < Nx-4; k++) { A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[(t)%2][i ][j ][k ] + coef[1][i][j][k] * (A[(t)%2][i-1][j ][k ] + A[(t)%2][i+1][j ][k ]) + coef[2][i][j][k] * (A[(t)%2][i ][j-1][k ] + A[(t)%2][i ][j+1][k ]) + coef[3][i][j][k] * (A[(t)%2][i ][j ][k-1] + A[(t)%2][i ][j ][k+1]) + coef[4][i][j][k] * (A[(t)%2][i-2][j ][k ] + A[(t)%2][i+2][j ][k ]) + coef[5][i][j][k] * (A[(t)%2][i ][j-2][k ] + A[(t)%2][i ][j+2][k ]) + coef[6][i][j][k] * (A[(t)%2][i ][j ][k-2] + A[(t)%2][i ][j ][k+2]) + coef[7][i][j][k] * (A[(t)%2][i-3][j ][k ] + A[(t)%2][i+3][j ][k ]) + coef[8][i][j][k] * (A[(t)%2][i ][j-3][k ] + A[(t)%2][i ][j+3][k ]) + coef[9][i][j][k] * (A[(t)%2][i ][j ][k-3] + A[(t)%2][i ][j ][k+3]) + coef[10][i][j][k]* (A[(t)%2][i-4][j ][k ] + A[(t)%2][i+4][j ][k ]) + coef[11][i][j][k]* (A[(t)%2][i ][j-4][k ] + A[(t)%2][i ][j+4][k ]) + coef[12][i][j][k]* (A[(t)%2][i ][j ][k-4] + A[(t)%2][i ][j ][k+4]) ; } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "variable axis-symmetric") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<13;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }