source
stringlengths
3
92
c
stringlengths
26
2.25M
mandelbrot.c
/* * Author: Pablololo12 */ /* * Order to compile with gcc: * gcc -O3 -o mandelbrot -fopenmp mandelbrot.c * * To execute first set number of threads * export OMP_NUM_THREADS=4 */ #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <math.h> const int SIZEX=4500; //const int SIZEY=3000; //const int SIZEX=450; const int SIZEY=4500; const int max_iter=2000; int mandelbrot(double ix, double iy) { int iter = 0; double x = ix, y =iy; double x2 = x*x, y2 = y*y; while ((x2 + y2 < 4) && (iter < max_iter)) { y = 2*x*y + iy; x = x2 - y2 + ix; x2 = x*x; y2 = y*y; iter++; } return(iter); } int julia(double ix, double iy) { int iter=0; double x = ix, y =iy; double x2 = x*x, y2 = y*y; double c1=0.285; double c2=-0.01; while ((x2 + y2 < 4) && (iter < max_iter)) { y = 2*x*y + c2; x = x2 - y2 + c1; x2 = x*x; y2 = y*y; iter++; } return(iter); } int write_picture(int *img[SIZEY], int x, int y) { FILE * imagen; int i,d, r, g, b, val; double color; imagen = fopen("mandelbrot.ppm", "w"); fprintf(imagen, "P3 %d %d 255\n", x, y); for (i=0; i<y; i++) { for (d=0; d<x; d++) { color = (double) img[i][d]+1; color = log10(color); color = color/log10(max_iter); color = color*255; r = color; g = color; b = color; fprintf(imagen, "%d %d %d ", r,g,b); } } fclose(imagen); return 1; } int main() { int i, d, j, y; double xv, yv; int *matrix[SIZEY]; int valores[max_iter+1]; for (i = 0; i < SIZEY; i++) matrix[i] = (int *) malloc(SIZEX*sizeof(int)); int is_mandelbrot=0; if (is_mandelbrot) { #pragma omp parallel for for(y=0; y<SIZEY; y++) { for(int x=0; x<SIZEX;x++) { matrix[y][x] = mandelbrot(((3.0/SIZEX)*x)-2.0, ((2.0/SIZEY)*y)-1.0); //if (matrix[y][x]>255) matrix[y][x]=255; //matrix[y][x] = 255-matrix[y][x]; } } } else { #pragma omp parallel for for(y=0; y<SIZEY; y++) { for(int x=0; x<SIZEX;x++) { matrix[y][x] = julia(((2.0/SIZEX)*x)-1.0, ((3.0/SIZEY)*y)-1.5); } } } write_picture(matrix, SIZEX, SIZEY); }
GB_binop__bxor_int16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__bxor_int16 // A.*B function (eWiseMult): GB_AemultB__bxor_int16 // A*D function (colscale): (none) // D*A function (rowscale): (node) // C+=B function (dense accum): GB_Cdense_accumB__bxor_int16 // C+=b function (dense accum): GB_Cdense_accumb__bxor_int16 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__bxor_int16 // C=scalar+B GB_bind1st__bxor_int16 // C=scalar+B' GB_bind1st_tran__bxor_int16 // C=A+scalar GB_bind2nd__bxor_int16 // C=A'+scalar GB_bind2nd_tran__bxor_int16 // C type: int16_t // A type: int16_t // B,b type: int16_t // BinaryOp: cij = (aij) ^ (bij) #define GB_ATYPE \ int16_t #define GB_BTYPE \ int16_t #define GB_CTYPE \ int16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int16_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = (x) ^ (y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BXOR || GxB_NO_INT16 || GxB_NO_BXOR_INT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__bxor_int16 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__bxor_int16 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__bxor_int16 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int16_t int16_t bwork = (*((int16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (none) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *GB_RESTRICT Cx = (int16_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (node) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *GB_RESTRICT Cx = (int16_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__bxor_int16 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__bxor_int16 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__bxor_int16 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *Cx = (int16_t *) Cx_output ; int16_t x = (*((int16_t *) x_input)) ; int16_t *Bx = (int16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int16_t bij = Bx [p] ; Cx [p] = (x) ^ (bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__bxor_int16 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int16_t *Cx = (int16_t *) Cx_output ; int16_t *Ax = (int16_t *) Ax_input ; int16_t y = (*((int16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int16_t aij = Ax [p] ; Cx [p] = (aij) ^ (y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = Ax [pA] ; \ Cx [pC] = (x) ^ (aij) ; \ } GrB_Info GB_bind1st_tran__bxor_int16 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t x = (*((const int16_t *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = Ax [pA] ; \ Cx [pC] = (aij) ^ (y) ; \ } GrB_Info GB_bind2nd_tran__bxor_int16 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t y = (*((const int16_t *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
rootfinder_initial_guess_mex.c
#include "math.h" #include "mex.h" #include "matrix.h" #include "linequad.h" /* The gateway function */ void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) { // Read input int n = mxGetNumberOfElements(prhs[0]); double* tj = mxGetPr(prhs[0]); double* xj = mxGetPr(prhs[1]); double* yj = mxGetPr(prhs[2]); double* zj = mxGetPr(prhs[3]); int N = mxGetNumberOfElements(prhs[4]); double* x0 = mxGetPr(prhs[4]); double* y0 = mxGetPr(prhs[5]); double* z0 = mxGetPr(prhs[6]); // Prepare output plhs[0] = mxCreateDoubleMatrix(N, 1, mxCOMPLEX); double* tinit_re_p = mxGetPr(plhs[0]); double* tinit_im_p = mxGetPi(plhs[0]); // Call function for all inputs #pragma omp parallel for schedule(guided) for (int i=0; i<N; i++) { double complex tinit = rootfinder_initial_guess(tj, xj, yj, zj, n, x0[i], y0[i], z0[i]); tinit_re_p[i] = creal(tinit); tinit_im_p[i] = cimag(tinit); } }
mc.c
/** * mc.c * Authors: Yizhao Gao <yizhaotsccsj@gmail.com> * Date: {08/01/2017} */ #include <stdio.h> #include <stdlib.h> #include <random> #include <omp.h> #include "scan.h" using namespace std; void simulateCases(double * intensity, int * simCases, int locCount, int casCount) { static std::random_device rd; static std::mt19937 rng(rd()); static std::discrete_distribution<int> d (intensity, intensity + locCount); for(int i = 0; i < locCount; i++) { simCases[i] = 0; } for(int i = 0; i < casCount; i++) { simCases[d(rng)] ++; } } int * monteCarlo(double * x, double * y, double * intensity, double * intenInW, int locCount, int casCount, double wSize, int wCount, int highLow, double * clusterLL, int nClusters, int nSim) { int * nExtreme; if(NULL == (nExtreme = (int *) malloc (nClusters * sizeof(int)))) { printf("ERROR: Out of memory at line %d in file %s\n", __LINE__, __FILE__); exit(1); } for(int i = 0; i < nClusters; i++) nExtreme[i] = 0; int * simCass; if(NULL == (simCass = (int *) malloc (locCount * sizeof(int)))) { printf("ERROR: Out of memory at line %d in file %s\n", __LINE__, __FILE__); exit(1); } int * simCasInW; double * simll; if(NULL == (simCasInW = (int *) malloc (locCount * wCount * sizeof(int)))) { printf("ERROR: Out of memory at line %d in file %s\n", __LINE__, __FILE__); exit(1); } if(NULL == (simll = (double *) malloc (locCount * wCount * sizeof(double)))) { printf("ERROR: Out of memory at line %d in file %s\n", __LINE__, __FILE__); exit(1); } double simMaxLL; for(int i = 0; i < nSim; i++) { simulateCases(intensity, simCass, locCount, casCount); getWindowCOnly(x, y, simCass, locCount, wSize, wCount, simCasInW); loglikelihood(simll, simCasInW, intenInW, locCount * wCount, casCount, highLow); simMaxLL = -9999; int k = 0; for(; k < locCount * wCount; k++) { if(simll[k] > 0) { simMaxLL = simll[k]; k++; break; } } for(; k < locCount * wCount; k++) { if(simll[k] > 0 && simll[k] > simMaxLL) { simMaxLL = simll[k]; } } if(simMaxLL > 0) { for(int j = 0; j < nClusters; j++) { if(simMaxLL > clusterLL[j]) { nExtreme[j] ++; } } } } free(simCasInW); free(simll); free(simCass); return nExtreme; } int * monteCarloOld(double * x, double * y, double * intensity, int locCount, int casCount, int * clusterCase, int * centerID, double * cRadius, bool * highCluster, int nClusters, int nSim) { int * nExtreme; if(NULL == (nExtreme = (int *) malloc (nClusters * sizeof(int)))) { printf("ERROR: Out of memory at line %d in file %s\n", __LINE__, __FILE__); exit(1); } for(int i = 0; i < nClusters; i++) { nExtreme[i] = 0; } int * simCass; if(NULL == (simCass = (int *) malloc (locCount * sizeof(int)))) { printf("ERROR: Out of memory at line %d in file %s\n", __LINE__, __FILE__); exit(1); } for(int i = 0; i < nSim; i++) { simulateCases(intensity, simCass, locCount, casCount); #pragma omp parallel for for(int j = 0; j < nClusters; j++) { double xC = x[centerID[j]]; double yC = y[centerID[j]]; double rad2 = cRadius[j] * cRadius[j]; int simCasInc = 0; for(int k = 0; k < locCount; k++) { if((x[k] - xC) * (x[k] - xC) + (y[k] - yC) * (y[k] - yC) <= rad2) { simCasInc += simCass[k]; } } if(highCluster[j] && simCasInc >= clusterCase[j]) nExtreme[j] ++; else if(!highCluster[j] && simCasInc <= clusterCase[j]) nExtreme[j] ++; } } free(simCass); return nExtreme; }
GB_unop__identity_uint16_fp64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_uint16_fp64) // op(A') function: GB (_unop_tran__identity_uint16_fp64) // C type: uint16_t // A type: double // cast: uint16_t cij = GB_cast_to_uint16_t ((double) (aij)) // unaryop: cij = aij #define GB_ATYPE \ double #define GB_CTYPE \ uint16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ uint16_t z = GB_cast_to_uint16_t ((double) (aij)) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ double aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint16_t z = GB_cast_to_uint16_t ((double) (aij)) ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT16 || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_uint16_fp64) ( uint16_t *Cx, // Cx and Ax may be aliased const double *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { double aij = Ax [p] ; uint16_t z = GB_cast_to_uint16_t ((double) (aij)) ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; double aij = Ax [p] ; uint16_t z = GB_cast_to_uint16_t ((double) (aij)) ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_uint16_fp64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
cpu_adagrad.h
#pragma once #include <cuda_fp16.h> #include <cuda_runtime_api.h> #include <stdio.h> #include <cassert> #include "cuda.h" #include "custom_cuda_layers.h" #include "simd.h" #define STEP(SPAN) \ void Step_##SPAN(float* _params, \ float* grads, \ float* _exp_avg_sq, \ size_t _param_size, \ __half* dev_param = nullptr, \ bool half_precision = false); class Adagrad_Optimizer { public: Adagrad_Optimizer(float alpha = 1e-2, float eps = 1e-8, float weight_decay = 0) : _alpha(alpha), _eps(eps), _weight_decay(weight_decay), _buf_index(false) { cudaMallocHost((void**)_doubled_buffer, TILE * sizeof(float)); cudaMallocHost((void**)(_doubled_buffer + 1), TILE * sizeof(float)); _streams[0] = Context::Instance().GetCurrentStream(); _streams[1] = Context::Instance().GetNewStream(); } ~Adagrad_Optimizer() { cudaFreeHost(_doubled_buffer[0]); cudaFreeHost(_doubled_buffer[1]); } #if defined(__AVX512__) or defined(__AVX256__) template <int span> void Step_AVX(size_t* rounded_size, float* _params, float* grads, float* _exp_avg_sq, size_t param_size, __half* dev_param = nullptr, bool half_precision = false); #endif STEP(1) STEP(4) STEP(8) inline void SynchronizeStreams() { for (int i = 0; i < 2; i++) cudaStreamSynchronize(_streams[i]); } inline void IncrementStep(size_t step) { _step++; if (_step != step) { _step = step; } } inline void update_state(float lr, float epsilon, float weight_decay) { _alpha = lr; _eps = epsilon; _weight_decay = weight_decay; } private: float _alpha; float _eps; float _weight_decay; float _betta1_t; float _betta2_t; size_t _step; float* _doubled_buffer[2]; bool _buf_index; cudaStream_t _streams[2]; }; #if defined(__AVX512__) or defined(__AVX256__) template <int span> void Adagrad_Optimizer::Step_AVX(size_t* rounded_size, float* _params, float* grads, float* _exp_avg_sq, size_t _param_size, __half* dev_params, bool half_precision) { size_t new_rounded_size = 0; AVX_Data eps_4; eps_4.data = SIMD_SET(_eps); float step_size = -1 * _alpha; AVX_Data step_size_4; step_size_4.data = SIMD_SET(step_size); AVX_Data weight_decay4; if (_weight_decay > 0) weight_decay4.data = SIMD_SET(_weight_decay); new_rounded_size = ROUND_DOWN(_param_size, SIMD_WIDTH * span); for (size_t t = 0; t < new_rounded_size; t += TILE) { size_t copy_size = TILE; if ((t + TILE) > new_rounded_size) copy_size = new_rounded_size - t; size_t offset = copy_size + t; if ((t / TILE) >= 2) { cudaStreamSynchronize(_streams[_buf_index]); } #pragma omp parallel for for (size_t i = t; i < offset; i += SIMD_WIDTH * span) { AVX_Data grad_4[span]; simd_load<span>(grad_4, grads + i, half_precision); AVX_Data momentum_4[span]; simd_load<span>(momentum_4, grads + i, false); AVX_Data variance_4[span]; simd_load<span>(variance_4, _exp_avg_sq + i, false); AVX_Data param_4[span]; simd_load<span>(param_4, _params + i, half_precision); if (_weight_decay > 0) { simd_fma<span>(grad_4, param_4, weight_decay4, grad_4); } simd_fma<span>(variance_4, grad_4, grad_4, variance_4); simd_sqrt<span>(grad_4, variance_4); simd_add<span>(grad_4, grad_4, eps_4); simd_div<span>(grad_4, momentum_4, grad_4); simd_fma<span>(param_4, grad_4, step_size_4, param_4); simd_store<span>(_params + i, param_4, half_precision); if (dev_params) { simd_store<span>(_doubled_buffer[_buf_index] + (i - t), param_4, half_precision); } simd_store<span>(_exp_avg_sq + i, variance_4, false); } if (dev_params) { if (half_precision) launch_param_update_half( _doubled_buffer[_buf_index], dev_params + t, copy_size, _streams[_buf_index]); else launch_param_update( _doubled_buffer[_buf_index], dev_params + t, copy_size, _streams[_buf_index]); _buf_index = !_buf_index; } } *rounded_size = new_rounded_size; } #endif
laplace_par.h
#ifndef _LAPLACE_PAR_ #define _LAPLACE_PAR_ #include<omp.h> template<int SIZE> inline void initialize(double a[SIZE + 2][SIZE + 2], double b[SIZE + 2][SIZE + 2]) { #pragma omp parallel for schedule(static) proc_bind(spread) for (int i = 0; i < SIZE + 2; i++) for (int j = 0; j < SIZE + 2; j++) { a[i][j] = 0.0; b[i][j] = 0.0; } } template<int SIZE> inline void time_step(double a[SIZE + 2][SIZE + 2], double b[SIZE + 2][SIZE + 2], int n) { if (n % 2 == 0) { #pragma omp parallel for schedule(static) proc_bind(spread) for (int i = 1; i < SIZE + 1; i++) for (int j = 1; j < SIZE + 1; j++) b[i][j] = (a[i + 1][j] + a[i - 1][j] + a[i][j - 1] + a[i][j + 1]) / 4.0; } else { #pragma omp parallel for schedule(static) proc_bind(spread) for (int i = 1; i < SIZE + 1; i++) for (int j = 1; j < SIZE + 1; j++) a[i][j] = (b[i + 1][j] + b[i - 1][j] + b[i][j - 1] + b[i][j + 1]) / 4.0; } } #endif // !_LAPLACE_PAR_
mmult.c
#include <stdio.h> #include <time.h> #include <sys/time.h> #include <stdlib.h> #define NRA 512 /* number of rows in matrix A */ #define NCA 512 /* number of columns in matrix A */ #define NCB 512 /* number of columns in matrix B */ struct timeval startTime; struct timeval finishTime; double timeIntervalLength; __sw_global__ double **a; /* [NRA][NCA] */ __sw_global__ double **b; /* [NCA][NCB] */ __sw_global__ double **c; /* [NRA][NCB] */ __sw_global__ double sum; void* myMalloc(int size, int info) { void* t = (void*)malloc(size); if(!t) { printf("\nMemory allocation error [%d]",info); fflush(stdout); exit(0); } return t; } int main (int argc, char *argv[]) { __sw_global__ long i, j, k; sum = 0; a = (double**)myMalloc(NRA*sizeof(double*),1); for (i=0;i<NCA;i++) a[i]=(double*)myMalloc(NCA*sizeof(double),2); b = (double**)myMalloc(NCA*sizeof(double*),3); for (i=0;i<NCB;i++) b[i]=(double*)myMalloc(NCB*sizeof(double),4); c = (double**)myMalloc(NRA*sizeof(double*),5); for (i=0;i<NCB;i++) c[i]=(double*)myMalloc(NCB*sizeof(double),6); /*** Initialize matrices ***/ for(i = 0; i < NRA; i++) for(j = 0; j < NCA; j++) a[i][j] = i + j; for(i = 0; i < NCA; i++) for(j = 0; j < NCB; j++) b[i][j] = i * j; for(i = 0; i < NRA; i++) for(j = 0; j < NCB; j++) c[i][j] = 0; // Start timers gettimeofday(&startTime, NULL); #pragma omp parallel private (i, j ,k) { #pragma omp for schedule (static, 8) { for(i = 0; i < NRA; i++) for(j = 0; j < NCB; j++) for(k = 0; k < NCA; k++) c[i][j] += a[i][k] * b[k][j]; } } // End timers gettimeofday(&finishTime, NULL); //Calculate the interval length timeIntervalLength = (double)(finishTime.tv_sec-startTime.tv_sec) * 1000000 + (double)(finishTime.tv_usec-startTime.tv_usec); timeIntervalLength=timeIntervalLength/1000; //Print the interval lenght printf("__aid_Time: %g msec.\n", timeIntervalLength); /*** Print results ***/ for(i = 0; i < NRA; i++) for(j = 0; j < NCB; j++) sum += c[i][j]; printf("__aid_Result: %g\n\n", sum); return 0; }
GB_unop__identity_int8_int8.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_int8_int8) // op(A') function: GB (_unop_tran__identity_int8_int8) // C type: int8_t // A type: int8_t // cast: int8_t cij = aij // unaryop: cij = aij #define GB_ATYPE \ int8_t #define GB_CTYPE \ int8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ int8_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ int8_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ int8_t z = aij ; \ Cx [pC] = z ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 1 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_int8_int8) ( int8_t *Cx, // Cx and Ax may be aliased const int8_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; // TODO: if OP is ONE and uniform-valued matrices are exploited, then // do this in O(1) time if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (int8_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int8_t aij = Ax [p] ; int8_t z = aij ; Cx [p] = z ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; int8_t aij = Ax [p] ; int8_t z = aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_int8_int8) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
reduction-task-1.c
int v; extern void foo (int); void bar (void) { int i; #pragma omp for reduction (task, +: v) for (i = 0; i < 64; i++) foo (i); #pragma omp sections reduction (task, +: v) { foo (-2); #pragma omp section foo (-3); } #pragma omp parallel reduction (task, +: v) foo (-1); #pragma omp parallel for reduction (task, +: v) for (i = 0; i < 64; i++) foo (i); #pragma omp parallel sections reduction (task, +: v) { foo (-2); #pragma omp section foo (-3); } #pragma omp teams distribute parallel for reduction (task, +: v) for (i = 0; i < 64; i++) foo (i); #pragma omp for reduction (default, +: v) for (i = 0; i < 64; i++) foo (i); #pragma omp sections reduction (default, +: v) { foo (-2); #pragma omp section foo (-3); } #pragma omp parallel reduction (default, +: v) foo (-1); #pragma omp parallel for reduction (default, +: v) for (i = 0; i < 64; i++) foo (i); #pragma omp parallel sections reduction (default, +: v) { foo (-2); #pragma omp section foo (-3); } #pragma omp teams distribute parallel for reduction (default, +: v) for (i = 0; i < 64; i++) foo (i); #pragma omp for reduction (default, +: v) nowait for (i = 0; i < 64; i++) foo (i); #pragma omp sections nowait reduction (default, +: v) { foo (-2); #pragma omp section foo (-3); } #pragma omp simd reduction (default, +: v) for (i = 0; i < 64; i++) v++; #pragma omp for simd reduction (default, +: v) for (i = 0; i < 64; i++) v++; #pragma omp parallel for simd reduction (default, +: v) for (i = 0; i < 64; i++) v++; #pragma omp teams distribute parallel for simd reduction (default, +: v) for (i = 0; i < 64; i++) v++; #pragma omp taskloop reduction (default, +: v) for (i = 0; i < 64; i++) foo (i); #pragma omp taskloop simd reduction (default, +: v) for (i = 0; i < 64; i++) v++; #pragma omp teams reduction (default, +: v) foo (i); #pragma omp teams distribute reduction (default, +: v) for (i = 0; i < 64; i++) foo (i); }
st_naive_mask.c
#include <stdlib.h> #include <stdio.h> #include <math.h> #include <sys/time.h> #include <omp.h> /* multiprocessor support */ //#include "../PaulSwissPNG/png/mypng.h" #include "mypng.h" /***********************************************************************/ double* SteepestTangent(const uint8 *Image, const size_t Nr, const size_t Nc) { /* This implements the basic Steepest Tangent Algorithm. Loops over all other pixels and remembers the greatest absolute slope. Paul @ 09 July 2018. Allocates memory for a double precision image, fills it with the Steepest Tangent and returns a pointer to it. */ /***********************************************************************/ double *SteepestTangentImage = (double *)calloc(Nr*Nc, sizeof(double)); if (!SteepestTangentImage) { fprintf(stderr, "SteepestTangent(): cannot allocate memory for return image.\n"); return NULL; } #pragma omp parallel for collapse(2) for (long r=0L; r<Nr; r++) { for (long c=0L; c<Nc; c++) { const long index = r*Nc + c; const double centerPixel = (double)Image[index]; double MaxSlopeSq = 0.0D; for (long r2=0L; r2<Nr; r2++) { for (long c2=0L; c2<Nc; c2++) { const double pixelDiff = (double)Image[r2*Nc + c2] - centerPixel; if (pixelDiff != 0.0D) { const double slopeSq = (pixelDiff * pixelDiff) / (double)((c2-c)*(c2-c) + (r2-r)*(r2-r)); if (slopeSq > MaxSlopeSq) MaxSlopeSq = slopeSq; } } /* c2 */ } /* r2 */ SteepestTangentImage[index] = sqrt(MaxSlopeSq); } /* c */ } /* r */ return SteepestTangentImage; } /* SteepestTangent() */
correlation.c
/* POLYBENCH/GPU-OPENMP * * This file is a part of the Polybench/GPU-OpenMP suite * * Contact: * William Killian <killian@udel.edu> * * Copyright 2013, The University of Delaware */ #define EXTRALARGE_DATASET #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4000. */ #include "correlation.h" /* Array initialization. */ static void init_array (int m, int n, DATA_TYPE *float_n, DATA_TYPE POLYBENCH_2D(data,M,N,m,n)) { int i, j; *float_n = 1.2; for (i = 0; i < m; i++) for (j = 0; j < n; j++) data[i][j] = ((DATA_TYPE) i*j) / M; } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int m, DATA_TYPE POLYBENCH_2D(symmat,M,M,m,m)) { int i, j; for (i = 0; i < m; i++) for (j = 0; j < m; j++) { fprintf (stderr, DATA_PRINTF_MODIFIER, symmat[i][j]); if ((i * m + j) % 20 == 0) fprintf (stderr, "\n"); } fprintf (stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_correlation(int m, int n, DATA_TYPE float_n, DATA_TYPE POLYBENCH_2D(data,M,N,m,n), DATA_TYPE POLYBENCH_2D(symmat,M,M,m,m), DATA_TYPE POLYBENCH_1D(mean,M,m), DATA_TYPE POLYBENCH_1D(stddev,M,m)) { int i, j, j1, j2; DATA_TYPE eps = 0.1f; #define sqrt_of_array_cell(x,j) sqrt(x[j]) #pragma scop /* Determine mean of column vectors of input data matrix */ #pragma omp parallel private(i, j, j2) num_threads(#P3) { #pragma omp for schedule(#P1, #P2) for (j = 0; j < _PB_M; j++) { mean[j] = 0.0; for (i = 0; i < _PB_N; i++) mean[j] += data[i][j]; mean[j] /= float_n; } /* Determine standard deviations of column vectors of data matrix. */ #pragma omp for schedule(#P1, #P2) for (j = 0; j < _PB_M; j++) { stddev[j] = 0.0; for (i = 0; i < _PB_N; i++) stddev[j] += (data[i][j] - mean[j]) * (data[i][j] - mean[j]); stddev[j] /= float_n; stddev[j] = sqrt_of_array_cell(stddev, j); /* The following in an inelegant but usual way to handle near-zero std. dev. values, which below would cause a zero- divide. */ stddev[j] = stddev[j] <= eps ? 1.0 : stddev[j]; } /* Center and reduce the column vectors. */ #pragma omp for schedule(#P1, #P2) for (i = 0; i < _PB_N; i++) for (j = 0; j < _PB_M; j++) { data[i][j] -= mean[j]; data[i][j] /= sqrt(float_n) * stddev[j]; } /* Calculate the m * m correlation matrix. */ #pragma omp for schedule(#P1, #P2) for (j1 = 0; j1 < _PB_M-1; j1++) { symmat[j1][j1] = 1.0; for (j2 = j1+1; j2 < _PB_M; j2++) { symmat[j1][j2] = 0.0; for (i = 0; i < _PB_N; i++) symmat[j1][j2] += (data[i][j1] * data[i][j2]); symmat[j2][j1] = symmat[j1][j2]; } } } #pragma endscop symmat[_PB_M-1][_PB_M-1] = 1.0; } int main(int argc, char** argv) { /* Retrieve problem size. */ int n = N; int m = M; /* Variable declaration/allocation. */ DATA_TYPE float_n; POLYBENCH_2D_ARRAY_DECL(data,DATA_TYPE,M,N,m,n); POLYBENCH_2D_ARRAY_DECL(symmat,DATA_TYPE,M,M,m,m); POLYBENCH_1D_ARRAY_DECL(mean,DATA_TYPE,M,m); POLYBENCH_1D_ARRAY_DECL(stddev,DATA_TYPE,M,m); /* Initialize array(s). */ init_array (m, n, &float_n, POLYBENCH_ARRAY(data)); /* Start timer. */ polybench_start_instruments; /* Run kernel. */ kernel_correlation (m, n, float_n, POLYBENCH_ARRAY(data), POLYBENCH_ARRAY(symmat), POLYBENCH_ARRAY(mean), POLYBENCH_ARRAY(stddev)); /* Stop and print timer. */ polybench_stop_instruments; polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(m, POLYBENCH_ARRAY(symmat))); /* Be clean. */ POLYBENCH_FREE_ARRAY(data); POLYBENCH_FREE_ARRAY(symmat); POLYBENCH_FREE_ARRAY(mean); POLYBENCH_FREE_ARRAY(stddev); return 0; }
matrix.c
#include <stdlib.h> #include <stdio.h> #include <assert.h> #include <string.h> #include <omp.h> #include "matrix.h" matrix_t *matrix_create(int rows, int cols) { matrix_t *ret = NULL; double *raw_data = NULL; double **data = NULL; int r; raw_data = (double *) malloc(sizeof(double) * rows * cols); data = (double **) malloc(sizeof(double*) * rows); for (r = 0; r < rows; r++) { data[r] = raw_data + r * cols; } ret = (matrix_t*) malloc(sizeof(matrix_t)); ret->rows = rows; ret->cols = cols; ret->data = data; return ret; } void matrix_destroy(matrix_t *m) { free((void*)(m->data[0])); free((void*)(m->data)); free((void*)(m)); return; } void matrix_randfill(matrix_t *m) { int r, c; for (r = 0; r < m->rows; r++) { for (c = 0; c < m->cols; c++) { m->data[r][c] = random(); } } } void matrix_fill(matrix_t *m, double val) { int r, c; for (r = 0; r < m->rows; r++) { for (c = 0; c < m->cols; c++) { m->data[r][c] = val; } } } matrix_t *matrix_sum(matrix_t *matrix_a, matrix_t *matrix_b) { int r, c; matrix_t* matrix_r = matrix_create(matrix_a->rows, matrix_a->cols); #pragma omp parallel for schedule(static) for (r = 0; r < matrix_a->rows; r++) { for (c = 0; c < matrix_a->cols; c++) { matrix_r->data[r][c] = matrix_a->data[r][c] + matrix_b->data[r][c]; } } return matrix_r; } matrix_t *matrix_multiply(matrix_t *matrix_a, matrix_t *matrix_b) { int r, c, i; matrix_t* matrix_r = matrix_create(matrix_a->rows, matrix_b->cols); #pragma omp parallel for schedule(static) for(r = 0; r < matrix_r->rows; r++) { for(c = 0; c < matrix_r->cols; c++) { matrix_r->data[r][c] = 0; for (i = 0; i < matrix_a->cols; i++) { matrix_r->data[r][c] += matrix_a->data[r][i] * matrix_b->data[i][c]; } } } return matrix_r; } void swap(double *a, double *b) { double aux = *a; *a = *b; *b = aux; } int partition(double *vector, int low, int high) { double pivot = vector[high]; int i = (low - 1); for (int j = low; j <= high - 1; j++) { if (vector[j] <= pivot) { i++; swap(&vector[i], &vector[j]); } } swap(&vector[i + 1], &vector[high]); return (i + 1); } void quick_sort(double *vector, int low, int high, int deep) { if (low < high) { int part = partition(vector, low, high); if (deep * 2 > omp_get_num_threads()) { quick_sort(vector, low, part - 1, deep + 1); quick_sort(vector, part + 1, high, deep + 1); return; } #pragma omp parallel { #pragma omp sections { #pragma omp section { quick_sort(vector, low, part - 1, deep++); } #pragma omp section { quick_sort(vector, part + 1, high, deep++); } } } } } matrix_t *matrix_sort(matrix_t *matrix) { int rows_final = matrix->rows; int cols_final = matrix->cols; matrix_t *resultado = matrix_create(rows_final, cols_final); memcpy(resultado->data[0], matrix->data[0], cols_final * rows_final * sizeof(double)); quick_sort(resultado->data[0], 0, rows_final * cols_final - 1, 0); return resultado; } void matrix_print(matrix_t *m) { int i, j; printf("\n--\n"); for (i = 0; i < m->rows; i++) { for (j = 0; j < m->cols; j++) { printf("%.1f ", m->data[i][j]); } printf("\n"); } printf("\n--\n"); fflush(stdout); }
convolution_3x3.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. #if __ARM_NEON #include <arm_neon.h> #endif // __ARM_NEON static void conv3x3s1_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const float* kernel = _kernel; const float* bias = _bias; int nn_outch = outch >> 1; int remain_outch_start = nn_outch << 1; #pragma omp parallel for for (int pp=0; pp<nn_outch; pp++) { int p = pp * 2; Mat out0 = top_blob.channel(p); Mat out1 = top_blob.channel(p+1); const float bias0 = bias ? bias[p] : 0.f; const float bias1 = bias ? bias[p+1] : 0.f; out0.fill(bias0); out1.fill(bias1); const float* k0 = kernel + p*inch*9; const float* k1 = kernel + (p+1)*inch*9; for (int q=0; q<inch; q++) { float* outptr0 = out0; float* outptr1 = out1; float* outptr0n = outptr0 + outw; float* outptr1n = outptr1 + outw; const float* img0 = bottom_blob.channel(q); const float* r0 = img0; const float* r1 = img0 + w; const float* r2 = img0 + w*2; const float* r3 = img0 + w*3; #if __ARM_NEON float32x4_t _k00 = vld1q_f32(k0); float32x4_t _k03 = vld1q_f32(k0+3); float32x4_t _k06 = vld1q_f32(k0+6); float32x4_t _k10 = vld1q_f32(k1); float32x4_t _k13 = vld1q_f32(k1+3); float32x4_t _k16 = vld1q_f32(k1+6); #endif // __ARM_NEON int i = 0; for (; i+1 < outh; i+=2) { #if __ARM_NEON int nn = outw >> 2; int remain = outw & 3; #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ for (; nn>0; nn--) { float32x4_t _sum0 = vld1q_f32(outptr0); float32x4_t _sum1 = vld1q_f32(outptr1); float32x4_t _sum0n = vld1q_f32(outptr0n); float32x4_t _sum1n = vld1q_f32(outptr1n); float32x4_t _r00 = vld1q_f32(r0); float32x4_t _r00n = vld1q_f32(r0 + 4); float32x4_t _r01 = vextq_f32(_r00, _r00n, 1); float32x4_t _r02 = vextq_f32(_r00, _r00n, 2); float32x4_t _r10 = vld1q_f32(r1); float32x4_t _r10n = vld1q_f32(r1 + 4); float32x4_t _r11 = vextq_f32(_r10, _r10n, 1); float32x4_t _r12 = vextq_f32(_r10, _r10n, 2); float32x4_t _r20 = vld1q_f32(r2); float32x4_t _r20n = vld1q_f32(r2 + 4); float32x4_t _r21 = vextq_f32(_r20, _r20n, 1); float32x4_t _r22 = vextq_f32(_r20, _r20n, 2); float32x4_t _r30 = vld1q_f32(r3); float32x4_t _r30n = vld1q_f32(r3 + 4); float32x4_t _r31 = vextq_f32(_r30, _r30n, 1); float32x4_t _r32 = vextq_f32(_r30, _r30n, 2); _sum0 = vfmaq_laneq_f32(_sum0, _r00, _k00, 0); _sum0 = vfmaq_laneq_f32(_sum0, _r01, _k00, 1); _sum0 = vfmaq_laneq_f32(_sum0, _r02, _k00, 2); _sum0 = vfmaq_laneq_f32(_sum0, _r10, _k03, 0); _sum0 = vfmaq_laneq_f32(_sum0, _r11, _k03, 1); _sum0 = vfmaq_laneq_f32(_sum0, _r12, _k03, 2); _sum0 = vfmaq_laneq_f32(_sum0, _r20, _k06, 0); _sum0 = vfmaq_laneq_f32(_sum0, _r21, _k06, 1); _sum0 = vfmaq_laneq_f32(_sum0, _r22, _k06, 2); _sum1 = vfmaq_laneq_f32(_sum1, _r00, _k10, 0); _sum1 = vfmaq_laneq_f32(_sum1, _r01, _k10, 1); _sum1 = vfmaq_laneq_f32(_sum1, _r02, _k10, 2); _sum1 = vfmaq_laneq_f32(_sum1, _r10, _k13, 0); _sum1 = vfmaq_laneq_f32(_sum1, _r11, _k13, 1); _sum1 = vfmaq_laneq_f32(_sum1, _r12, _k13, 2); _sum1 = vfmaq_laneq_f32(_sum1, _r20, _k16, 0); _sum1 = vfmaq_laneq_f32(_sum1, _r21, _k16, 1); _sum1 = vfmaq_laneq_f32(_sum1, _r22, _k16, 2); _sum0n = vfmaq_laneq_f32(_sum0n, _r10, _k00, 0); _sum0n = vfmaq_laneq_f32(_sum0n, _r11, _k00, 1); _sum0n = vfmaq_laneq_f32(_sum0n, _r12, _k00, 2); _sum0n = vfmaq_laneq_f32(_sum0n, _r20, _k03, 0); _sum0n = vfmaq_laneq_f32(_sum0n, _r21, _k03, 1); _sum0n = vfmaq_laneq_f32(_sum0n, _r22, _k03, 2); _sum0n = vfmaq_laneq_f32(_sum0n, _r30, _k06, 0); _sum0n = vfmaq_laneq_f32(_sum0n, _r31, _k06, 1); _sum0n = vfmaq_laneq_f32(_sum0n, _r32, _k06, 2); _sum1n = vfmaq_laneq_f32(_sum1n, _r10, _k10, 0); _sum1n = vfmaq_laneq_f32(_sum1n, _r11, _k10, 1); _sum1n = vfmaq_laneq_f32(_sum1n, _r12, _k10, 2); _sum1n = vfmaq_laneq_f32(_sum1n, _r20, _k13, 0); _sum1n = vfmaq_laneq_f32(_sum1n, _r21, _k13, 1); _sum1n = vfmaq_laneq_f32(_sum1n, _r22, _k13, 2); _sum1n = vfmaq_laneq_f32(_sum1n, _r30, _k16, 0); _sum1n = vfmaq_laneq_f32(_sum1n, _r31, _k16, 1); _sum1n = vfmaq_laneq_f32(_sum1n, _r32, _k16, 2); vst1q_f32(outptr0, _sum0); vst1q_f32(outptr1, _sum1); vst1q_f32(outptr0n, _sum0n); vst1q_f32(outptr1n, _sum1n); r0 += 4; r1 += 4; r2 += 4; r3 += 4; outptr0 += 4; outptr1 += 4; outptr0n += 4; outptr1n += 4; } #else if (nn > 0) { asm volatile( "pld [%5, #192] \n" "vld1.f32 {d16-d18}, [%5 :64] \n"// r0 "add %5, #16 \n" "pld [%8, #192] \n" "vld1.f32 {d28-d30}, [%8] \n"// r3 "add %8, #16 \n" "vext.32 q10, q8, q9, #1 \n" "vext.32 q11, q14, q15, #2 \n" "0: \n" "pld [%1, #128] \n" "vld1.f32 {d12-d13}, [%1 :64] \n"// _sum0 "pld [%2, #128] \n" "vld1.f32 {d14-d15}, [%2 :64] \n"// _sum1 "vmla.f32 q6, q8, %e18[0] \n" "vmla.f32 q7, q8, %e21[0] \n" "pld [%3, #128] \n" "vld1.f32 {d24-d25}, [%3] \n"// _sum0n "pld [%4, #128] \n" "vld1.f32 {d26-d27}, [%4] \n"// _sum1n "vmla.f32 q12, q14, %e20[0] \n" "vmla.f32 q13, q14, %e23[0] \n" "vext.32 q8, q8, q9, #2 \n" "vext.32 q9, q14, q15, #1 \n" "vmla.f32 q6, q10, %e18[1] \n" "vmla.f32 q7, q10, %e21[1] \n" "vmla.f32 q12, q11, %f20[0] \n" "vmla.f32 q13, q11, %f23[0] \n" "pld [%6, #192] \n" "vld1.f32 {d28-d30}, [%6] \n"// r1 "add %6, #16 \n" "vmla.f32 q6, q8, %f18[0] \n" "vmla.f32 q7, q8, %f21[0] \n" "vmla.f32 q12, q9, %e20[1] \n" "vmla.f32 q13, q9, %e23[1] \n" "vext.32 q10, q14, q15, #1 \n" "vmla.f32 q6, q14, %e19[0] \n" "vmla.f32 q7, q14, %e22[0] \n" "vmla.f32 q12, q14, %e18[0] \n" "vmla.f32 q13, q14, %e21[0] \n" "vext.32 q11, q14, q15, #2 \n" "vmla.f32 q6, q10, %e19[1] \n" "vmla.f32 q7, q10, %e22[1] \n" "vmla.f32 q12, q10, %e18[1] \n" "vmla.f32 q13, q10, %e21[1] \n" "pld [%7, #192] \n" "vld1.f32 {d16-d18}, [%7 :64] \n"// r2 "add %7, #16 \n" "vmla.f32 q6, q11, %f19[0] \n" "vmla.f32 q7, q11, %f22[0] \n" "vmla.f32 q12, q11, %f18[0] \n" "vmla.f32 q13, q11, %f21[0] \n" "vext.32 q10, q8, q9, #1 \n" "vmla.f32 q6, q8, %e20[0] \n" "vmla.f32 q7, q8, %e23[0] \n" "vmla.f32 q12, q8, %e19[0] \n" "vmla.f32 q13, q8, %e22[0] \n" "vext.32 q11, q8, q9, #2 \n" "vmla.f32 q6, q10, %e20[1] \n" "vmla.f32 q7, q10, %e23[1] \n" "vmla.f32 q12, q10, %e19[1] \n" "vmla.f32 q13, q10, %e22[1] \n" "pld [%5, #192] \n" "vld1.f32 {d16-d18}, [%5 :64] \n"// r0 "add %5, #16 \n" "vmla.f32 q6, q11, %f20[0] \n" "vmla.f32 q7, q11, %f23[0] \n" "vmla.f32 q12, q11, %f19[0] \n" "vmla.f32 q13, q11, %f22[0] \n" "pld [%8, #192] \n" "vld1.f32 {d28-d30}, [%8] \n"// r3 "add %8, #16 \n" "vext.32 q10, q8, q9, #1 \n" "vst1.f32 {d12-d13}, [%1 : 64]!\n" "vst1.f32 {d14-d15}, [%2 : 64]!\n" "vext.32 q11, q14, q15, #2 \n" "vst1.f32 {d24-d25}, [%3]! \n" "vst1.f32 {d26-d27}, [%4]! \n" "subs %0, #1 \n" "bne 0b \n" "sub %5, #16 \n" "sub %8, #16 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(outptr0n), // %3 "=r"(outptr1n), // %4 "=r"(r0), // %5 "=r"(r1), // %6 "=r"(r2), // %7 "=r"(r3) // %8 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(outptr0n), "4"(outptr1n), "5"(r0), "6"(r1), "7"(r2), "8"(r3), "w"(_k00), // %18 "w"(_k03), // %19 "w"(_k06), // %20 "w"(_k10), // %21 "w"(_k13), // %22 "w"(_k16) // %23 : "cc", "memory", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { #if __ARM_NEON float32x4_t _r00 = vld1q_f32(r0); float32x4_t _r10 = vld1q_f32(r1); float32x4_t _r20 = vld1q_f32(r2); float32x4_t _r30 = vld1q_f32(r3); float32x4_t _sum0 = vmulq_f32(_r00, _k00); float32x4_t _sum1 = vmulq_f32(_r00, _k10); _sum0 = vmlaq_f32(_sum0, _r10, _k03); _sum1 = vmlaq_f32(_sum1, _r10, _k13); _sum0 = vmlaq_f32(_sum0, _r20, _k06); _sum1 = vmlaq_f32(_sum1, _r20, _k16); float32x4_t _sum0n = vmulq_f32(_r10, _k00); float32x4_t _sum1n = vmulq_f32(_r10, _k10); _sum0n = vmlaq_f32(_sum0n, _r20, _k03); _sum1n = vmlaq_f32(_sum1n, _r20, _k13); _sum0n = vmlaq_f32(_sum0n, _r30, _k06); _sum1n = vmlaq_f32(_sum1n, _r30, _k16); _sum0 = vsetq_lane_f32(*outptr0, _sum0, 3); _sum1 = vsetq_lane_f32(*outptr1, _sum1, 3); _sum0n = vsetq_lane_f32(*outptr0n, _sum0n, 3); _sum1n = vsetq_lane_f32(*outptr1n, _sum1n, 3); #if __aarch64__ *outptr0 = vaddvq_f32(_sum0); *outptr1 = vaddvq_f32(_sum1); *outptr0n = vaddvq_f32(_sum0n); *outptr1n = vaddvq_f32(_sum1n); #else float32x2_t _ss0 = vadd_f32(vget_low_f32(_sum0), vget_high_f32(_sum0)); float32x2_t _ss1 = vadd_f32(vget_low_f32(_sum1), vget_high_f32(_sum1)); float32x2_t _ss0n = vadd_f32(vget_low_f32(_sum0n), vget_high_f32(_sum0n)); float32x2_t _ss1n = vadd_f32(vget_low_f32(_sum1n), vget_high_f32(_sum1n)); float32x2_t _ss01 = vpadd_f32(_ss0, _ss1); float32x2_t _ss01n = vpadd_f32(_ss0n, _ss1n); *outptr0 = vget_lane_f32(_ss01, 0); *outptr1 = vget_lane_f32(_ss01, 1); *outptr0n = vget_lane_f32(_ss01n, 0); *outptr1n = vget_lane_f32(_ss01n, 1); #endif // __aarch64__ #else float sum0 = 0.f; float sum0n = 0.f; float sum1 = 0.f; float sum1n = 0.f; sum0 += r0[0] * k0[0]; sum0 += r0[1] * k0[1]; sum0 += r0[2] * k0[2]; sum0 += r1[0] * k0[3]; sum0 += r1[1] * k0[4]; sum0 += r1[2] * k0[5]; sum0 += r2[0] * k0[6]; sum0 += r2[1] * k0[7]; sum0 += r2[2] * k0[8]; sum1 += r0[0] * k1[0]; sum1 += r0[1] * k1[1]; sum1 += r0[2] * k1[2]; sum1 += r1[0] * k1[3]; sum1 += r1[1] * k1[4]; sum1 += r1[2] * k1[5]; sum1 += r2[0] * k1[6]; sum1 += r2[1] * k1[7]; sum1 += r2[2] * k1[8]; sum0n += r1[0] * k0[0]; sum0n += r1[1] * k0[1]; sum0n += r1[2] * k0[2]; sum0n += r2[0] * k0[3]; sum0n += r2[1] * k0[4]; sum0n += r2[2] * k0[5]; sum0n += r3[0] * k0[6]; sum0n += r3[1] * k0[7]; sum0n += r3[2] * k0[8]; sum1n += r1[0] * k1[0]; sum1n += r1[1] * k1[1]; sum1n += r1[2] * k1[2]; sum1n += r2[0] * k1[3]; sum1n += r2[1] * k1[4]; sum1n += r2[2] * k1[5]; sum1n += r3[0] * k1[6]; sum1n += r3[1] * k1[7]; sum1n += r3[2] * k1[8]; *outptr0 += sum0; *outptr1 += sum1; *outptr0n += sum0n; *outptr1n += sum1n; #endif // __ARM_NEON r0++; r1++; r2++; r3++; outptr0++; outptr1++; outptr0n++; outptr1n++; } r0 += 2 + w; r1 += 2 + w; r2 += 2 + w; r3 += 2 + w; outptr0 += outw; outptr1 += outw; outptr0n += outw; outptr1n += outw; } for (; i < outh; i++) { #if __ARM_NEON int nn = outw >> 2; int remain = outw & 3; #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ for (; nn>0; nn--) { float32x4_t _sum0 = vld1q_f32(outptr0); float32x4_t _sum1 = vld1q_f32(outptr1); float32x4_t _r00 = vld1q_f32(r0); float32x4_t _r00n = vld1q_f32(r0 + 4); float32x4_t _r01 = vextq_f32(_r00, _r00n, 1); float32x4_t _r02 = vextq_f32(_r00, _r00n, 2); float32x4_t _r10 = vld1q_f32(r1); float32x4_t _r10n = vld1q_f32(r1 + 4); float32x4_t _r11 = vextq_f32(_r10, _r10n, 1); float32x4_t _r12 = vextq_f32(_r10, _r10n, 2); float32x4_t _r20 = vld1q_f32(r2); float32x4_t _r20n = vld1q_f32(r2 + 4); float32x4_t _r21 = vextq_f32(_r20, _r20n, 1); float32x4_t _r22 = vextq_f32(_r20, _r20n, 2); _sum0 = vfmaq_laneq_f32(_sum0, _r00, _k00, 0); _sum0 = vfmaq_laneq_f32(_sum0, _r01, _k00, 1); _sum0 = vfmaq_laneq_f32(_sum0, _r02, _k00, 2); _sum0 = vfmaq_laneq_f32(_sum0, _r10, _k03, 0); _sum0 = vfmaq_laneq_f32(_sum0, _r11, _k03, 1); _sum0 = vfmaq_laneq_f32(_sum0, _r12, _k03, 2); _sum0 = vfmaq_laneq_f32(_sum0, _r20, _k06, 0); _sum0 = vfmaq_laneq_f32(_sum0, _r21, _k06, 1); _sum0 = vfmaq_laneq_f32(_sum0, _r22, _k06, 2); _sum1 = vfmaq_laneq_f32(_sum1, _r00, _k10, 0); _sum1 = vfmaq_laneq_f32(_sum1, _r01, _k10, 1); _sum1 = vfmaq_laneq_f32(_sum1, _r02, _k10, 2); _sum1 = vfmaq_laneq_f32(_sum1, _r10, _k13, 0); _sum1 = vfmaq_laneq_f32(_sum1, _r11, _k13, 1); _sum1 = vfmaq_laneq_f32(_sum1, _r12, _k13, 2); _sum1 = vfmaq_laneq_f32(_sum1, _r20, _k16, 0); _sum1 = vfmaq_laneq_f32(_sum1, _r21, _k16, 1); _sum1 = vfmaq_laneq_f32(_sum1, _r22, _k16, 2); vst1q_f32(outptr0, _sum0); vst1q_f32(outptr1, _sum1); r0 += 4; r1 += 4; r2 += 4; outptr0 += 4; outptr1 += 4; } #else if (nn > 0) { asm volatile( "0: \n" "pld [%3, #192] \n" "vld1.f32 {d16-d18}, [%3] \n"// r0 "add %3, #16 \n" "pld [%1, #128] \n" "vld1.f32 {d12-d13}, [%1] \n"// _sum0 "pld [%2, #128] \n" "vld1.f32 {d14-d15}, [%2] \n"// _sum1 "vmul.f32 q14, q8, %e12[0] \n" "vmul.f32 q15, q8, %e15[0] \n" "vext.32 q10, q8, q9, #1 \n" "vext.32 q11, q8, q9, #2 \n" "vmla.f32 q6, q10, %e12[1] \n" "vmla.f32 q7, q10, %e15[1] \n" "pld [%4, #192] \n" "vld1.f32 {d16-d18}, [%4] \n"// r1 "add %4, #16 \n" "vmla.f32 q14, q11, %f12[0] \n" "vmla.f32 q15, q11, %f15[0] \n" "vmla.f32 q6, q8, %e13[0] \n" "vmla.f32 q7, q8, %e16[0] \n" "vext.32 q10, q8, q9, #1 \n" "vext.32 q11, q8, q9, #2 \n" "vmla.f32 q14, q10, %e13[1] \n" "vmla.f32 q15, q10, %e16[1] \n" "pld [%5, #192] \n" "vld1.f32 {d16-d18}, [%5] \n"// r2 "add %5, #16 \n" "vmla.f32 q6, q11, %f13[0] \n" "vmla.f32 q7, q11, %f16[0] \n" "vmla.f32 q14, q8, %e14[0] \n" "vmla.f32 q15, q8, %e17[0] \n" "vext.32 q10, q8, q9, #1 \n" "vext.32 q11, q8, q9, #2 \n" "vmla.f32 q6, q10, %e14[1] \n" "vmla.f32 q7, q10, %e17[1] \n" "vmla.f32 q14, q11, %f14[0] \n" "vmla.f32 q15, q11, %f17[0] \n" "vadd.f32 q6, q6, q14 \n" "vadd.f32 q7, q7, q15 \n" "vst1.f32 {d12-d13}, [%1]! \n" "vst1.f32 {d14-d15}, [%2]! \n" "subs %0, #1 \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(r0), // %3 "=r"(r1), // %4 "=r"(r2) // %5 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(r0), "4"(r1), "5"(r2), "w"(_k00), // %12 "w"(_k03), // %13 "w"(_k06), // %14 "w"(_k10), // %15 "w"(_k13), // %16 "w"(_k16) // %17 : "cc", "memory", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { #if __ARM_NEON float32x4_t _r00 = vld1q_f32(r0); float32x4_t _r10 = vld1q_f32(r1); float32x4_t _r20 = vld1q_f32(r2); float32x4_t _sum0 = vmulq_f32(_r00, _k00); float32x4_t _sum1 = vmulq_f32(_r00, _k10); _sum0 = vmlaq_f32(_sum0, _r10, _k03); _sum1 = vmlaq_f32(_sum1, _r10, _k13); _sum0 = vmlaq_f32(_sum0, _r20, _k06); _sum1 = vmlaq_f32(_sum1, _r20, _k16); _sum0 = vsetq_lane_f32(*outptr0, _sum0, 3); _sum1 = vsetq_lane_f32(*outptr1, _sum1, 3); #if __aarch64__ *outptr0 = vaddvq_f32(_sum0); *outptr1 = vaddvq_f32(_sum1); #else float32x2_t _ss0 = vadd_f32(vget_low_f32(_sum0), vget_high_f32(_sum0)); float32x2_t _ss1 = vadd_f32(vget_low_f32(_sum1), vget_high_f32(_sum1)); float32x2_t _ss01 = vpadd_f32(_ss0, _ss1); *outptr0 = vget_lane_f32(_ss01, 0); *outptr1 = vget_lane_f32(_ss01, 1); #endif // __aarch64__ #else float sum0 = 0.f; float sum1 = 0.f; sum0 += r0[0] * k0[0]; sum0 += r0[1] * k0[1]; sum0 += r0[2] * k0[2]; sum0 += r1[0] * k0[3]; sum0 += r1[1] * k0[4]; sum0 += r1[2] * k0[5]; sum0 += r2[0] * k0[6]; sum0 += r2[1] * k0[7]; sum0 += r2[2] * k0[8]; sum1 += r0[0] * k1[0]; sum1 += r0[1] * k1[1]; sum1 += r0[2] * k1[2]; sum1 += r1[0] * k1[3]; sum1 += r1[1] * k1[4]; sum1 += r1[2] * k1[5]; sum1 += r2[0] * k1[6]; sum1 += r2[1] * k1[7]; sum1 += r2[2] * k1[8]; *outptr0 += sum0; *outptr1 += sum1; #endif // __ARM_NEON r0++; r1++; r2++; outptr0++; outptr1++; } r0 += 2; r1 += 2; r2 += 2; } k0 += 9; k1 += 9; } } #pragma omp parallel for for (int p=remain_outch_start; p<outch; p++) { Mat out = top_blob.channel(p); const float bias0 = bias ? bias[p] : 0.f; out.fill(bias0); const float* kernel0 = kernel + p*inch*9; for (int q=0; q<inch; q++) { float* outptr = out; float* outptr2 = outptr + outw; const float* img0 = bottom_blob.channel(q); const float* r0 = img0; const float* r1 = img0 + w; const float* r2 = img0 + w*2; const float* r3 = img0 + w*3; #if __ARM_NEON float32x4_t _k0123 = vld1q_f32(kernel0); float32x4_t _k3456 = vld1q_f32(kernel0+3); float32x4_t _k6789 = vld1q_f32(kernel0+6); #else const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; #endif // __ARM_NEON int i = 0; for (; i+1 < outh; i+=2) { #if __ARM_NEON int nn = outw >> 2; int remain = outw & 3; #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ for (; nn>0; nn--) { float32x4_t _sum1 = vld1q_f32(outptr); float32x4_t _sum3 = vld1q_f32(outptr2); float32x4_t _r00 = vld1q_f32(r0); float32x4_t _r00n = vld1q_f32(r0 + 4); float32x4_t _r01 = vextq_f32(_r00, _r00n, 1); float32x4_t _r02 = vextq_f32(_r00, _r00n, 2); float32x4_t _r10 = vld1q_f32(r1); float32x4_t _r10n = vld1q_f32(r1 + 4); float32x4_t _r11 = vextq_f32(_r10, _r10n, 1); float32x4_t _r12 = vextq_f32(_r10, _r10n, 2); float32x4_t _r20 = vld1q_f32(r2); float32x4_t _r20n = vld1q_f32(r2 + 4); float32x4_t _r21 = vextq_f32(_r20, _r20n, 1); float32x4_t _r22 = vextq_f32(_r20, _r20n, 2); float32x4_t _r30 = vld1q_f32(r3); float32x4_t _r30n = vld1q_f32(r3 + 4); float32x4_t _r31 = vextq_f32(_r30, _r30n, 1); float32x4_t _r32 = vextq_f32(_r30, _r30n, 2); _sum1 = vfmaq_laneq_f32(_sum1, _r00, _k0123, 0); float32x4_t _sum2 = vmulq_laneq_f32(_r01, _k0123, 1); _sum1 = vfmaq_laneq_f32(_sum1, _r02, _k0123, 2); _sum2 = vfmaq_laneq_f32(_sum2, _r10, _k3456, 0); _sum1 = vfmaq_laneq_f32(_sum1, _r11, _k3456, 1); _sum2 = vfmaq_laneq_f32(_sum2, _r12, _k3456, 2); _sum1 = vfmaq_laneq_f32(_sum1, _r20, _k6789, 0); _sum2 = vfmaq_laneq_f32(_sum2, _r21, _k6789, 1); _sum1 = vfmaq_laneq_f32(_sum1, _r22, _k6789, 2); _sum3 = vfmaq_laneq_f32(_sum3, _r10, _k0123, 0); float32x4_t _sum4 = vmulq_laneq_f32(_r11, _k0123, 1); _sum3 = vfmaq_laneq_f32(_sum3, _r12, _k0123, 2); _sum4 = vfmaq_laneq_f32(_sum4, _r20, _k3456, 0); _sum3 = vfmaq_laneq_f32(_sum3, _r21, _k3456, 1); _sum4 = vfmaq_laneq_f32(_sum4, _r22, _k3456, 2); _sum3 = vfmaq_laneq_f32(_sum3, _r30, _k6789, 0); _sum4 = vfmaq_laneq_f32(_sum4, _r31, _k6789, 1); _sum3 = vfmaq_laneq_f32(_sum3, _r32, _k6789, 2); _sum1 = vaddq_f32(_sum1, _sum2); _sum3 = vaddq_f32(_sum3, _sum4); vst1q_f32(outptr, _sum1); vst1q_f32(outptr2, _sum3); r0 += 4; r1 += 4; r2 += 4; r3 += 4; outptr += 4; outptr2 += 4; } #else if (nn > 0) { asm volatile( "pld [%3, #192] \n" "vld1.f32 {d18-d20}, [%3 :64] \n"// r0 "add %3, #16 \n" "vext.32 q11, q9, q10, #1 \n" "vext.32 q12, q9, q10, #2 \n" "0: \n" "pld [%1, #128] \n" "vld1.f32 {d14-d15}, [%1 :64] \n"// _sum "vmla.f32 q7, q9, %e14[0] \n" "vmul.f32 q6, q11, %e14[1] \n" "vmul.f32 q13, q12, %f14[0] \n" "pld [%4, #192] \n" "vld1.f32 {d18-d20}, [%4] \n"// r1 "add %4, #16 \n" "vmla.f32 q7, q9, %e15[0] \n" "vext.32 q11, q9, q10, #1 \n" "vext.32 q12, q9, q10, #2 \n" "vmla.f32 q6, q11, %e15[1] \n" "vmla.f32 q13, q12, %f15[0] \n" "pld [%2, #128] \n" "vld1.f32 {d16-d17}, [%2] \n"// _sum2 "vmla.f32 q8, q9, %e14[0] \n" "vmul.f32 q14, q11, %e14[1] \n" "vmul.f32 q15, q12, %f14[0] \n" "pld [%5, #192] \n" "vld1.f32 {d18-d20}, [%5 :64] \n"// r2 "add %5, #16 \n" "vmla.f32 q7, q9, %e16[0] \n" "vext.32 q11, q9, q10, #1 \n" "vext.32 q12, q9, q10, #2 \n" "vmla.f32 q6, q11, %e16[1] \n" "vmla.f32 q13, q12, %f16[0] \n" "vmla.f32 q8, q9, %e15[0] \n" "vmla.f32 q14, q11, %e15[1] \n" "vmla.f32 q15, q12, %f15[0] \n" "pld [%6, #192] \n" "vld1.f32 {d18-d20}, [%6] \n"// r3 "add %6, #16 \n" "vmla.f32 q8, q9, %e16[0] \n" "vext.32 q11, q9, q10, #1 \n" "vext.32 q12, q9, q10, #2 \n" "vmla.f32 q14, q11, %e16[1] \n" "vmla.f32 q15, q12, %f16[0] \n" "vadd.f32 q7, q7, q6 \n" "pld [%3, #192] \n" "vld1.f32 {d18-d20}, [%3 :64] \n"// r0 "vadd.f32 q8, q8, q14 \n" "vadd.f32 q7, q7, q13 \n" "vadd.f32 q8, q8, q15 \n" "vext.32 q11, q9, q10, #1 \n" "vext.32 q12, q9, q10, #2 \n" "add %3, #16 \n" "vst1.f32 {d14-d15}, [%1]! \n" "vst1.f32 {d16-d17}, [%2]! \n" "subs %0, #1 \n" "bne 0b \n" "sub %3, #16 \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(outptr2), // %2 "=r"(r0), // %3 "=r"(r1), // %4 "=r"(r2), // %5 "=r"(r3) // %6 : "0"(nn), "1"(outptr), "2"(outptr2), "3"(r0), "4"(r1), "5"(r2), "6"(r3), "w"(_k0123), // %14 "w"(_k3456), // %15 "w"(_k6789) // %16 : "cc", "memory", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { #if __ARM_NEON float32x4_t _r00 = vld1q_f32(r0); float32x4_t _r10 = vld1q_f32(r1); float32x4_t _r20 = vld1q_f32(r2); float32x4_t _r30 = vld1q_f32(r3); float32x4_t _sum = vmulq_f32(_r00, _k0123); _sum = vmlaq_f32(_sum, _r10, _k3456); _sum = vmlaq_f32(_sum, _r20, _k6789); float32x4_t _sum2 = vmulq_f32(_r10, _k0123); _sum2 = vmlaq_f32(_sum2, _r20, _k3456); _sum2 = vmlaq_f32(_sum2, _r30, _k6789); _sum = vsetq_lane_f32(*outptr, _sum, 3); _sum2 = vsetq_lane_f32(*outptr2, _sum2, 3); #if __aarch64__ *outptr = vaddvq_f32(_sum); *outptr2 = vaddvq_f32(_sum2); #else float32x2_t _ss = vadd_f32(vget_low_f32(_sum), vget_high_f32(_sum)); float32x2_t _ss2 = vadd_f32(vget_low_f32(_sum2), vget_high_f32(_sum2)); float32x2_t _sss2 = vpadd_f32(_ss, _ss2); *outptr = vget_lane_f32(_sss2, 0); *outptr2 = vget_lane_f32(_sss2, 1); #endif // __aarch64__ #else float sum = 0; float sum2 = 0; sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; sum2 += r1[0] * k0[0]; sum2 += r1[1] * k0[1]; sum2 += r1[2] * k0[2]; sum2 += r2[0] * k1[0]; sum2 += r2[1] * k1[1]; sum2 += r2[2] * k1[2]; sum2 += r3[0] * k2[0]; sum2 += r3[1] * k2[1]; sum2 += r3[2] * k2[2]; *outptr += sum; *outptr2 += sum2; #endif r0++; r1++; r2++; r3++; outptr++; outptr2++; } r0 += 2 + w; r1 += 2 + w; r2 += 2 + w; r3 += 2 + w; outptr += outw; outptr2 += outw; } for (; i < outh; i++) { #if __ARM_NEON int nn = outw >> 2; int remain = outw & 3; #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ for (; nn>0; nn--) { float32x4_t _sum1 = vld1q_f32(outptr); float32x4_t _r00 = vld1q_f32(r0); float32x4_t _r00n = vld1q_f32(r0 + 4); float32x4_t _r01 = vextq_f32(_r00, _r00n, 1); float32x4_t _r02 = vextq_f32(_r00, _r00n, 2); float32x4_t _r10 = vld1q_f32(r1); float32x4_t _r10n = vld1q_f32(r1 + 4); float32x4_t _r11 = vextq_f32(_r10, _r10n, 1); float32x4_t _r12 = vextq_f32(_r10, _r10n, 2); float32x4_t _r20 = vld1q_f32(r2); float32x4_t _r20n = vld1q_f32(r2 + 4); float32x4_t _r21 = vextq_f32(_r20, _r20n, 1); float32x4_t _r22 = vextq_f32(_r20, _r20n, 2); _sum1 = vfmaq_laneq_f32(_sum1, _r00, _k0123, 0); float32x4_t _sum2 = vmulq_laneq_f32(_r01, _k0123, 1); _sum1 = vfmaq_laneq_f32(_sum1, _r02, _k0123, 2); _sum2 = vfmaq_laneq_f32(_sum2, _r10, _k3456, 0); _sum1 = vfmaq_laneq_f32(_sum1, _r11, _k3456, 1); _sum2 = vfmaq_laneq_f32(_sum2, _r12, _k3456, 2); _sum1 = vfmaq_laneq_f32(_sum1, _r20, _k6789, 0); _sum2 = vfmaq_laneq_f32(_sum2, _r21, _k6789, 1); _sum1 = vfmaq_laneq_f32(_sum1, _r22, _k6789, 2); _sum1 = vaddq_f32(_sum1, _sum2); vst1q_f32(outptr, _sum1); r0 += 4; r1 += 4; r2 += 4; outptr += 4; } #else if (nn > 0) { asm volatile( "pld [%2, #192] \n" "vld1.f32 {d16-d18}, [%2] \n"// r0 "add %2, #16 \n" "vext.32 q10, q8, q9, #1 \n" "vext.32 q11, q8, q9, #2 \n" "0: \n" "pld [%1, #128] \n" "vld1.f32 {d14-d15}, [%1] \n"// _sum "vmla.f32 q7, q8, %e10[0] \n" "vmul.f32 q13, q10, %e10[1] \n" "vmul.f32 q14, q11, %f10[0] \n" "pld [%3, #192] \n" "vld1.f32 {d16-d18}, [%3] \n"// r1 "add %3, #16 \n" "vmla.f32 q7, q8, %e11[0] \n" "vext.32 q10, q8, q9, #1 \n" "vext.32 q11, q8, q9, #2 \n" "vmla.f32 q13, q10, %e11[1] \n" "vmla.f32 q14, q11, %f11[0] \n" "pld [%4, #192] \n" "vld1.f32 {d16-d18}, [%4] \n"// r2 "add %4, #16 \n" "vmla.f32 q7, q8, %e12[0] \n" "vext.32 q10, q8, q9, #1 \n" "vext.32 q11, q8, q9, #2 \n" "vmla.f32 q13, q10, %e12[1] \n" "vmla.f32 q14, q11, %f12[0] \n" "pld [%2, #192] \n" "vld1.f32 {d16-d18}, [%2] \n"// r0 "add %2, #16 \n" "vadd.f32 q7, q7, q13 \n" "vadd.f32 q7, q7, q14 \n" "vext.32 q10, q8, q9, #1 \n" "vext.32 q11, q8, q9, #2 \n" "vst1.f32 {d14-d15}, [%1]! \n" "subs %0, #1 \n" "bne 0b \n" "sub %2, #16 \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2) // %4 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "w"(_k0123), // %10 "w"(_k3456), // %11 "w"(_k6789) // %12 : "cc", "memory", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { #if __ARM_NEON float32x4_t _r00 = vld1q_f32(r0); float32x4_t _r10 = vld1q_f32(r1); float32x4_t _r20 = vld1q_f32(r2); float32x4_t _sum = vmulq_f32(_r00, _k0123); _sum = vmlaq_f32(_sum, _r10, _k3456); _sum = vmlaq_f32(_sum, _r20, _k6789); _sum = vsetq_lane_f32(*outptr, _sum, 3); #if __aarch64__ *outptr = vaddvq_f32(_sum); #else float32x2_t _ss = vadd_f32(vget_low_f32(_sum), vget_high_f32(_sum)); _ss = vpadd_f32(_ss, _ss); *outptr = vget_lane_f32(_ss, 0); #endif // __aarch64__ #else float sum = 0; sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; *outptr += sum; #endif r0++; r1++; r2++; outptr++; } r0 += 2; r1 += 2; r2 += 2; } kernel0 += 9; } } } static void conv3x3s1_winograd64_transform_kernel_neon(const Mat& kernel, Mat& kernel_tm, int inch, int outch) { kernel_tm.create(8*8, inch, outch); const float ktm[8][3] = { { 1.0f, 0.0f, 0.0f}, {-2.0f/9, -2.0f/9, -2.0f/9}, {-2.0f/9, 2.0f/9, -2.0f/9}, {1.0f/90, 1.0f/45, 2.0f/45}, {1.0f/90, -1.0f/45, 2.0f/45}, {1.0f/45, 1.0f/90, 1.0f/180}, {1.0f/45, -1.0f/90, 1.0f/180}, { 0.0f, 0.0f, 1.0f} }; #pragma omp parallel for for (int p = 0; p<outch; p++) { for (int q = 0; q<inch; q++) { const float* kernel0 = (const float*)kernel + p*inch * 9 + q * 9; float* kernel_tm0 = kernel_tm.channel(p).row(q); // transform kernel, transposed const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; // h float tmp[8][3]; for (int i=0; i<8; i++) { tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // v for (int j=0; j<8; j++) { float* tmpp = &tmp[j][0]; for (int i=0; i<8; i++) { kernel_tm0[j*8 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } // optimized layout for winograd4 // interleave weights int nn_outch = outch >> 2; int remain_outch_start = nn_outch << 2; Mat kernel_tm2(8*8 * inch * 4, 1, nn_outch + (outch % 4 + 3) / 4); #pragma omp parallel for for (int pp=0; pp<nn_outch; pp++) { int p = pp * 4; float* ktm2 = kernel_tm2.channel(pp); const Mat kernel0_tm = kernel_tm.channel(p); const Mat kernel1_tm = kernel_tm.channel(p+1); const Mat kernel2_tm = kernel_tm.channel(p+2); const Mat kernel3_tm = kernel_tm.channel(p+3); int q=0; #if __ARM_NEON && __aarch64__ for (; q+3<inch; q+=4) { const float* k00 = kernel0_tm.row(q); const float* k01 = kernel0_tm.row(q+1); const float* k02 = kernel0_tm.row(q+2); const float* k03 = kernel0_tm.row(q+3); const float* k10 = kernel1_tm.row(q); const float* k11 = kernel1_tm.row(q+1); const float* k12 = kernel1_tm.row(q+2); const float* k13 = kernel1_tm.row(q+3); const float* k20 = kernel2_tm.row(q); const float* k21 = kernel2_tm.row(q+1); const float* k22 = kernel2_tm.row(q+2); const float* k23 = kernel2_tm.row(q+3); const float* k30 = kernel3_tm.row(q); const float* k31 = kernel3_tm.row(q+1); const float* k32 = kernel3_tm.row(q+2); const float* k33 = kernel3_tm.row(q+3); for (int r=0; r<16; r++) { // split into two asm blocks for gcc reject over 30 oprands :( asm volatile( "ld1 {v0.4s}, [%1], #16 \n" "ld1 {v1.4s}, [%2], #16 \n" "ld1 {v2.4s}, [%3], #16 \n" "ld1 {v3.4s}, [%4], #16 \n" "st1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0], #64 \n" "ld1 {v0.4s}, [%5], #16 \n" "ld1 {v1.4s}, [%6], #16 \n" "ld1 {v2.4s}, [%7], #16 \n" "ld1 {v3.4s}, [%8], #16 \n" "st1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0], #64 \n" : "=r"(ktm2), // %0 "=r"(k00), // %1 "=r"(k01), // %2 "=r"(k02), // %3 "=r"(k03), // %4 "=r"(k10), // %5 "=r"(k11), // %6 "=r"(k12), // %7 "=r"(k13) // %8 : "0"(ktm2), "1"(k00), "2"(k01), "3"(k02), "4"(k03), "5"(k10), "6"(k11), "7"(k12), "8"(k13) : "cc", "memory", "v0", "v1", "v2", "v3" ); asm volatile( "ld1 {v0.4s}, [%1], #16 \n" "ld1 {v1.4s}, [%2], #16 \n" "ld1 {v2.4s}, [%3], #16 \n" "ld1 {v3.4s}, [%4], #16 \n" "st1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0], #64 \n" "ld1 {v0.4s}, [%5], #16 \n" "ld1 {v1.4s}, [%6], #16 \n" "ld1 {v2.4s}, [%7], #16 \n" "ld1 {v3.4s}, [%8], #16 \n" "st1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0], #64 \n" : "=r"(ktm2), // %0 "=r"(k20), // %1 "=r"(k21), // %2 "=r"(k22), // %3 "=r"(k23), // %4 "=r"(k30), // %5 "=r"(k31), // %6 "=r"(k32), // %7 "=r"(k33) // %8 : "0"(ktm2), "1"(k20), "2"(k21), "3"(k22), "4"(k23), "5"(k30), "6"(k31), "7"(k32), "8"(k33) : "cc", "memory", "v0", "v1", "v2", "v3" ); } } #endif // __ARM_NEON && __aarch64__ for (; q+1<inch; q+=2) { const float* k00 = kernel0_tm.row(q); const float* k01 = kernel0_tm.row(q+1); const float* k10 = kernel1_tm.row(q); const float* k11 = kernel1_tm.row(q+1); const float* k20 = kernel2_tm.row(q); const float* k21 = kernel2_tm.row(q+1); const float* k30 = kernel3_tm.row(q); const float* k31 = kernel3_tm.row(q+1); for (int r=0; r<16; r++) { #if __ARM_NEON #if __aarch64__ asm volatile( "ld1 {v0.4s}, [%1], #16 \n" "ld1 {v1.4s}, [%2], #16 \n" "st1 {v0.4s, v1.4s}, [%0], #32 \n" "ld1 {v0.4s}, [%3], #16 \n" "ld1 {v1.4s}, [%4], #16 \n" "st1 {v0.4s, v1.4s}, [%0], #32 \n" "ld1 {v0.4s}, [%5], #16 \n" "ld1 {v1.4s}, [%6], #16 \n" "st1 {v0.4s, v1.4s}, [%0], #32 \n" "ld1 {v0.4s}, [%7], #16 \n" "ld1 {v1.4s}, [%8], #16 \n" "st1 {v0.4s, v1.4s}, [%0], #32 \n" : "=r"(ktm2), // %0 "=r"(k00), // %1 "=r"(k01), // %2 "=r"(k10), // %3 "=r"(k11), // %4 "=r"(k20), // %5 "=r"(k21), // %6 "=r"(k30), // %7 "=r"(k31) // %8 : "0"(ktm2), "1"(k00), "2"(k01), "3"(k10), "4"(k11), "5"(k20), "6"(k21), "7"(k30), "8"(k31) : "cc", "memory", "v0", "v1" ); #else asm volatile( "vld1.f32 {d0-d1}, [%1 :128]! \n" "vld1.f32 {d2-d3}, [%2 :128]! \n" "vst1.f32 {d0-d3}, [%0 :128]! \n" "vld1.f32 {d0-d1}, [%3 :128]! \n" "vld1.f32 {d2-d3}, [%4 :128]! \n" "vst1.f32 {d0-d3}, [%0 :128]! \n" "vld1.f32 {d0-d1}, [%5 :128]! \n" "vld1.f32 {d2-d3}, [%6 :128]! \n" "vst1.f32 {d0-d3}, [%0 :128]! \n" "vld1.f32 {d0-d1}, [%7 :128]! \n" "vld1.f32 {d2-d3}, [%8 :128]! \n" "vst1.f32 {d0-d3}, [%0 :128]! \n" : "=r"(ktm2), // %0 "=r"(k00), // %1 "=r"(k01), // %2 "=r"(k10), // %3 "=r"(k11), // %4 "=r"(k20), // %5 "=r"(k21), // %6 "=r"(k30), // %7 "=r"(k31) // %8 : "0"(ktm2), "1"(k00), "2"(k01), "3"(k10), "4"(k11), "5"(k20), "6"(k21), "7"(k30), "8"(k31) : "cc", "memory", "q0", "q1" ); #endif // __aarch64__ #else for (int m=0; m<4; m++) { ktm2[0 +m] = k00[m]; ktm2[4 +m] = k01[m]; ktm2[8 +m] = k10[m]; ktm2[12+m] = k11[m]; ktm2[16+m] = k20[m]; ktm2[20+m] = k21[m]; ktm2[24+m] = k30[m]; ktm2[28+m] = k31[m]; } k00 += 4; k01 += 4; k10 += 4; k11 += 4; k20 += 4; k21 += 4; k30 += 4; k31 += 4; ktm2 += 32; #endif // __ARM_NEON } } for (; q<inch; q++) { const float* k00 = kernel0_tm.row(q); const float* k10 = kernel1_tm.row(q); const float* k20 = kernel2_tm.row(q); const float* k30 = kernel3_tm.row(q); for (int r=0; r<16; r++) { #if __ARM_NEON #if __aarch64__ asm volatile( "ld1 {v0.4s}, [%1], #16 \n" "ld1 {v1.4s}, [%2], #16 \n" "st1 {v0.4s, v1.4s}, [%0], #32 \n" "ld1 {v0.4s}, [%3], #16 \n" "ld1 {v1.4s}, [%4], #16 \n" "st1 {v0.4s, v1.4s}, [%0], #32 \n" : "=r"(ktm2), // %0 "=r"(k00), // %1 "=r"(k10), // %2 "=r"(k20), // %3 "=r"(k30) // %4 : "0"(ktm2), "1"(k00), "2"(k10), "3"(k20), "4"(k30) : "cc", "memory", "v0", "v1" ); #else asm volatile( "vld1.f32 {d0-d1}, [%1 :128]! \n" "vld1.f32 {d2-d3}, [%2 :128]! \n" "vst1.f32 {d0-d3}, [%0 :128]! \n" "vld1.f32 {d0-d1}, [%3 :128]! \n" "vld1.f32 {d2-d3}, [%4 :128]! \n" "vst1.f32 {d0-d3}, [%0 :128]! \n" : "=r"(ktm2), // %0 "=r"(k00), // %1 "=r"(k10), // %2 "=r"(k20), // %3 "=r"(k30) // %4 : "0"(ktm2), "1"(k00), "2"(k10), "3"(k20), "4"(k30) : "cc", "memory", "q0", "q1" ); #endif // __aarch64__ #else for (int m=0; m<4; m++) { ktm2[0 +m] = k00[m]; ktm2[4 +m] = k10[m]; ktm2[8 +m] = k20[m]; ktm2[12+m] = k30[m]; } k00 += 4; k10 += 4; k20 += 4; k30 += 4; ktm2 += 16; #endif // __ARM_NEON } } } #pragma omp parallel for for (int p = remain_outch_start; p<outch; p++) { float* ktm2 = (float*)kernel_tm2.channel(nn_outch) + 8*8 * inch * (p-remain_outch_start); const Mat kernel0_tm = kernel_tm.channel(p); int q = 0; for (; q<inch; q++) { const float* k00 = kernel0_tm.row(q); for (int r=0; r<16; r++) { #if __ARM_NEON #if __aarch64__ asm volatile( "ld1 {v0.4s}, [%1], #16 \n" "st1 {v0.4s}, [%0], #16 \n" : "=r"(ktm2), // %0 "=r"(k00) // %1 : "0"(ktm2), "1"(k00) : "cc", "memory", "v0" ); #else asm volatile( "vld1.f32 {d0-d1}, [%1 :128]! \n" "vst1.f32 {d0-d1}, [%0 :128]! \n" : "=r"(ktm2), // %0 "=r"(k00) // %1 : "0"(ktm2), "1"(k00) : "cc", "memory", "q0" ); #endif // __aarch64__ #else for (int m=0; m<4; m++) { ktm2[m] = k00[m]; } k00 += 4; ktm2 += 4; #endif // __ARM_NEON } } } kernel_tm = kernel_tm2; } #if 0//TODO remove old code sometime later static void conv3x3s1_winograd64_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& _bias) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; // pad to 6n+2 Mat bottom_blob_bordered = bottom_blob; outw = (outw + 5) / 6 * 6; outh = (outh + 5) / 6 * 6; w = outw + 2; h = outh + 2; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, 0, 0.f); const float* bias = _bias; // BEGIN transform input Mat bottom_blob_tm; { int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; bottom_blob_tm.create(8*8, w_tm/8 * h_tm/8, inch); // const float itm[8][8] = { // {1.0f, 0.0f, -5.25f, 0.00f, 5.25f, 0.00f, -1.0f, 0.0f}, // // {0.0f, 1.0f, 1.00f, -4.25f, -4.25f, 1.00f, 1.0f, 0.0f}, // {0.0f, -1.0f, 1.00f, 4.25f, -4.25f, -1.00f, 1.0f, 0.0f}, // // {0.0f, 0.5f, 0.25f, -2.50f, -1.25f, 2.00f, 1.0f, 0.0f}, // {0.0f, -0.5f, 0.25f, 2.50f, -1.25f, -2.00f, 1.0f, 0.0f}, // // {0.0f, 2.0f, 4.00f, -2.50f, -5.00f, 0.50f, 1.0f, 0.0f}, // {0.0f, -2.0f, 4.00f, 2.50f, -5.00f, -0.50f, 1.0f, 0.0f}, // // {0.0f, -1.0f, 0.00f, 5.25f, 0.00f, -5.25f, 0.0f, 1.0f} // }; // 0 = r00 - r06 + (r04 - r02) * 5.25 // 7 = r07 - r01 + (r03 - r05) * 5.25 // 1 = (r02 + r06 - r04 * 4.25) + (r01 - r03 * 4.25 + r05) // 2 = (r02 + r06 - r04 * 4.25) - (r01 - r03 * 4.25 + r05) // 3 = (r06 + r02 * 0.25 - r04 * 1.25) + (r01 * 0.5 - r03 * 2.5 + r05 * 2) // 4 = (r06 + r02 * 0.25 - r04 * 1.25) - (r01 * 0.5 - r03 * 2.5 + r05 * 2) // reuse r04 * 1.25 // reuse r03 * 2.5 // 5 = (r06 + (r02 - r04 * 1.25) * 4) + (r01 * 2 - r03 * 2.5 + r05 * 0.5) // 6 = (r06 + (r02 - r04 * 1.25) * 4) - (r01 * 2 - r03 * 2.5 + r05 * 0.5) #pragma omp parallel for for (int q = 0; q<inch; q++) { const Mat img0 = bottom_blob_bordered.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); float tmp[8][8]; // tile for (int i=0; i<h_tm/8; i++) { for (int j=0; j<w_tm/8; j++) { const float* r0 = img0.row(i * 6) + j * 6; float* r0_tm = img0_tm.row(i * w_tm/8 + j); // TODO neon optimize for (int m=0; m<8; m++) { tmp[0][m] = r0[0] - r0[6] + (r0[4] - r0[2]) * 5.25; tmp[7][m] = r0[7] - r0[1] + (r0[3] - r0[5]) * 5.25; float tmp12a = (r0[2] + r0[6] - r0[4] * 4.25); float tmp12b = (r0[1] + r0[5] - r0[3] * 4.25); tmp[1][m] = tmp12a + tmp12b; tmp[2][m] = tmp12a - tmp12b; float tmp34a = (r0[6] + r0[2] * 0.25 - r0[4] * 1.25); float tmp34b = (r0[1] * 0.5 - r0[3] * 2.5 + r0[5] * 2); tmp[3][m] = tmp34a + tmp34b; tmp[4][m] = tmp34a - tmp34b; float tmp56a = (r0[6] + (r0[2] - r0[4] * 1.25) * 4); float tmp56b = (r0[1] * 2 - r0[3] * 2.5 + r0[5] * 0.5); tmp[5][m] = tmp56a + tmp56b; tmp[6][m] = tmp56a - tmp56b; r0 += w; } for (int m=0; m<8; m++) { const float* tmp0 = tmp[m]; r0_tm[0] = tmp0[0] - tmp0[6] + (tmp0[4] - tmp0[2]) * 5.25; r0_tm[7] = tmp0[7] - tmp0[1] + (tmp0[3] - tmp0[5]) * 5.25; float tmp12a = (tmp0[2] + tmp0[6] - tmp0[4] * 4.25); float tmp12b = (tmp0[1] - tmp0[3] * 4.25 + tmp0[5]); r0_tm[1] = tmp12a + tmp12b; r0_tm[2] = tmp12a - tmp12b; float tmp34a = (tmp0[6] + tmp0[2] * 0.25 - tmp0[4] * 1.25); float tmp34b = (tmp0[1] * 0.5 - tmp0[3] * 2.5 + tmp0[5] * 2); r0_tm[3] = tmp34a + tmp34b; r0_tm[4] = tmp34a - tmp34b; float tmp56a = (tmp0[6] + (tmp0[2] - tmp0[4] * 1.25) * 4); float tmp56b = (tmp0[1] * 2 - tmp0[3] * 2.5 + tmp0[5] * 0.5); r0_tm[5] = tmp56a + tmp56b; r0_tm[6] = tmp56a - tmp56b; r0_tm += 8; } } } } } bottom_blob_bordered = Mat(); // END transform input // BEGIN dot Mat top_blob_tm; { int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; top_blob_tm.create(8*8, w_tm/8 * h_tm/8, outch); int nn_outch = outch >> 2; int remain_outch_start = nn_outch << 2; #pragma omp parallel for for (int pp=0; pp<nn_outch; pp++) { int p = pp * 4; Mat out0_tm = top_blob_tm.channel(p); Mat out1_tm = top_blob_tm.channel(p+1); Mat out2_tm = top_blob_tm.channel(p+2); Mat out3_tm = top_blob_tm.channel(p+3); const Mat kernel0_tm = kernel_tm.channel(p); const Mat kernel1_tm = kernel_tm.channel(p+1); const Mat kernel2_tm = kernel_tm.channel(p+2); const Mat kernel3_tm = kernel_tm.channel(p+3); out0_tm.fill(0.f); out1_tm.fill(0.f); out2_tm.fill(0.f); out3_tm.fill(0.f); int q = 0; for (; q+3<inch; q+=4) { const float* r0 = bottom_blob_tm.channel(q); const float* r1 = bottom_blob_tm.channel(q+1); const float* r2 = bottom_blob_tm.channel(q+2); const float* r3 = bottom_blob_tm.channel(q+3); const float* k00 = kernel0_tm.row(q); const float* k10 = kernel1_tm.row(q); const float* k20 = kernel2_tm.row(q); const float* k30 = kernel3_tm.row(q); float* output0_tm = out0_tm; float* output1_tm = out1_tm; float* output2_tm = out2_tm; float* output3_tm = out3_tm; // tile for (int i=0; i<h_tm/8 * w_tm/8; i++) { #if __ARM_NEON #if __aarch64__ for (int m=0; m+7<64; m+=8) { float32x4_t _output0_tm = vld1q_f32(output0_tm); float32x4_t _output1_tm = vld1q_f32(output1_tm); float32x4_t _output2_tm = vld1q_f32(output2_tm); float32x4_t _output3_tm = vld1q_f32(output3_tm); float32x4_t _r0 = vld1q_f32(r0); float32x4_t _r1 = vld1q_f32(r1); float32x4_t _r2 = vld1q_f32(r2); float32x4_t _r3 = vld1q_f32(r3); float32x4_t _k00 = vld1q_f32(k00); k00 += 64; float32x4_t _k01 = vld1q_f32(k00); k00 += 64; float32x4_t _k02 = vld1q_f32(k00); k00 += 64; float32x4_t _k03 = vld1q_f32(k00); k00 += 64; k00 -= 64*4; _output0_tm = vmlaq_f32(_output0_tm, _r0, _k00); _output0_tm = vmlaq_f32(_output0_tm, _r1, _k01); _output0_tm = vmlaq_f32(_output0_tm, _r2, _k02); _output0_tm = vmlaq_f32(_output0_tm, _r3, _k03); float32x4_t _k10 = vld1q_f32(k10); k10 += 64; float32x4_t _k11 = vld1q_f32(k10); k10 += 64; float32x4_t _k12 = vld1q_f32(k10); k10 += 64; float32x4_t _k13 = vld1q_f32(k10); k10 += 64; k10 -= 64*4; _output1_tm = vmlaq_f32(_output1_tm, _r0, _k10); _output1_tm = vmlaq_f32(_output1_tm, _r1, _k11); _output1_tm = vmlaq_f32(_output1_tm, _r2, _k12); _output1_tm = vmlaq_f32(_output1_tm, _r3, _k13); float32x4_t _k20 = vld1q_f32(k20); k20 += 64; float32x4_t _k21 = vld1q_f32(k20); k20 += 64; float32x4_t _k22 = vld1q_f32(k20); k20 += 64; float32x4_t _k23 = vld1q_f32(k20); k20 += 64; k20 -= 64*4; _output2_tm = vmlaq_f32(_output2_tm, _r0, _k20); _output2_tm = vmlaq_f32(_output2_tm, _r1, _k21); _output2_tm = vmlaq_f32(_output2_tm, _r2, _k22); _output2_tm = vmlaq_f32(_output2_tm, _r3, _k23); float32x4_t _k30 = vld1q_f32(k30); k30 += 64; float32x4_t _k31 = vld1q_f32(k30); k30 += 64; float32x4_t _k32 = vld1q_f32(k30); k30 += 64; float32x4_t _k33 = vld1q_f32(k30); k30 += 64; k30 -= 64*4; _output3_tm = vmlaq_f32(_output3_tm, _r0, _k30); _output3_tm = vmlaq_f32(_output3_tm, _r1, _k31); _output3_tm = vmlaq_f32(_output3_tm, _r2, _k32); _output3_tm = vmlaq_f32(_output3_tm, _r3, _k33); vst1q_f32(output0_tm, _output0_tm); vst1q_f32(output1_tm, _output1_tm); vst1q_f32(output2_tm, _output2_tm); vst1q_f32(output3_tm, _output3_tm); output0_tm += 4; output1_tm += 4; output2_tm += 4; output3_tm += 4; r0 += 4; r1 += 4; r2 += 4; r3 += 4; k00 += 4; k10 += 4; k20 += 4; k30 += 4; float32x4_t _output0_tmn = vld1q_f32(output0_tm); float32x4_t _output1_tmn = vld1q_f32(output1_tm); float32x4_t _output2_tmn = vld1q_f32(output2_tm); float32x4_t _output3_tmn = vld1q_f32(output3_tm); float32x4_t _r0n = vld1q_f32(r0); float32x4_t _r1n = vld1q_f32(r1); float32x4_t _r2n = vld1q_f32(r2); float32x4_t _r3n = vld1q_f32(r3); float32x4_t _k00n = vld1q_f32(k00); k00 += 64; float32x4_t _k01n = vld1q_f32(k00); k00 += 64; float32x4_t _k02n = vld1q_f32(k00); k00 += 64; float32x4_t _k03n = vld1q_f32(k00); k00 += 64; k00 -= 64*4; _output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k00n); _output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k01n); _output0_tmn = vmlaq_f32(_output0_tmn, _r2n, _k02n); _output0_tmn = vmlaq_f32(_output0_tmn, _r3n, _k03n); float32x4_t _k10n = vld1q_f32(k10); k10 += 64; float32x4_t _k11n = vld1q_f32(k10); k10 += 64; float32x4_t _k12n = vld1q_f32(k10); k10 += 64; float32x4_t _k13n = vld1q_f32(k10); k10 += 64; k10 -= 64*4; _output1_tmn = vmlaq_f32(_output1_tmn, _r0n, _k10n); _output1_tmn = vmlaq_f32(_output1_tmn, _r1n, _k11n); _output1_tmn = vmlaq_f32(_output1_tmn, _r2n, _k12n); _output1_tmn = vmlaq_f32(_output1_tmn, _r3n, _k13n); float32x4_t _k20n = vld1q_f32(k20); k20 += 64; float32x4_t _k21n = vld1q_f32(k20); k20 += 64; float32x4_t _k22n = vld1q_f32(k20); k20 += 64; float32x4_t _k23n = vld1q_f32(k20); k20 += 64; k20 -= 64*4; _output2_tmn = vmlaq_f32(_output2_tmn, _r0n, _k20n); _output2_tmn = vmlaq_f32(_output2_tmn, _r1n, _k21n); _output2_tmn = vmlaq_f32(_output2_tmn, _r2n, _k22n); _output2_tmn = vmlaq_f32(_output2_tmn, _r3n, _k23n); float32x4_t _k30n = vld1q_f32(k30); k30 += 64; float32x4_t _k31n = vld1q_f32(k30); k30 += 64; float32x4_t _k32n = vld1q_f32(k30); k30 += 64; float32x4_t _k33n = vld1q_f32(k30); k30 += 64; k30 -= 64*4; _output3_tmn = vmlaq_f32(_output3_tmn, _r0n, _k30n); _output3_tmn = vmlaq_f32(_output3_tmn, _r1n, _k31n); _output3_tmn = vmlaq_f32(_output3_tmn, _r2n, _k32n); _output3_tmn = vmlaq_f32(_output3_tmn, _r3n, _k33n); vst1q_f32(output0_tm, _output0_tmn); vst1q_f32(output1_tm, _output1_tmn); vst1q_f32(output2_tm, _output2_tmn); vst1q_f32(output3_tm, _output3_tmn); output0_tm += 4; output1_tm += 4; output2_tm += 4; output3_tm += 4; r0 += 4; r1 += 4; r2 += 4; r3 += 4; k00 += 4; k10 += 4; k20 += 4; k30 += 4; } #else // __aarch64__ asm volatile( "mov r4, #8 \n" "pld [%0, #256] \n" "vld1.f32 {d16-d19}, [%0 :128]\n"//q8 q9 = _output0_tm "0: \n" "pld [%4, #256] \n" "vld1.f32 {d0-d3}, [%4 :128]! \n"//q0 q1 = _r0 "pld [%8, #256] \n" "vld1.f32 {d20-d23}, [%8 :128]\n"//q10 q11 = _k00 "add %8, %8, #256 \n" "vmla.f32 q8, q0, q10 \n" "vmla.f32 q9, q1, q11 \n" "pld [%1, #256] \n" "vld1.f32 {d24-d27}, [%1 :128]\n"//q12 q13 = _output1_tm "pld [%9, #256] \n" "vld1.f32 {d28-d31}, [%9 :128]\n"//q14 q15 = _k10 "add %9, %9, #256 \n" "vmla.f32 q12, q0, q14 \n" "vmla.f32 q13, q1, q15 \n" "pld [%5, #256] \n" "vld1.f32 {d4-d7}, [%5 :128]! \n"//q2 q3 = _r1 "pld [%8, #256] \n" "vld1.f32 {d20-d23}, [%8 :128]\n"//q10 q11 = _k01 "add %8, %8, #256 \n" "vmla.f32 q8, q2, q10 \n" "vmla.f32 q9, q3, q11 \n" "pld [%9, #256] \n" "vld1.f32 {d28-d31}, [%9 :128]\n"//q14 q15 = _k11 "add %9, %9, #256 \n" "vmla.f32 q12, q2, q14 \n" "vmla.f32 q13, q3, q15 \n" "pld [%6, #256] \n" "vld1.f32 {d8-d11}, [%6 :128]!\n"//q4 q5 = _r2 "pld [%8, #256] \n" "vld1.f32 {d20-d23}, [%8 :128]\n"//q10 q11 = _k02 "add %8, %8, #256 \n" "vmla.f32 q8, q4, q10 \n" "vmla.f32 q9, q5, q11 \n" "pld [%9, #256] \n" "vld1.f32 {d28-d31}, [%9 :128]\n"//q14 q15 = _k12 "add %9, %9, #256 \n" "vmla.f32 q12, q4, q14 \n" "vmla.f32 q13, q5, q15 \n" "pld [%7, #256] \n" "vld1.f32 {d12-d15}, [%7 :128]!\n"//q6 q7 = _r3 "pld [%8, #256] \n" "vld1.f32 {d20-d23}, [%8 :128]\n"//q10 q11 = _k03 "sub %8, %8, #736 \n" "vmla.f32 q8, q6, q10 \n" "vmla.f32 q9, q7, q11 \n" "pld [%9, #256] \n" "vld1.f32 {d28-d31}, [%9 :128]\n"//q14 q15 = _k13 "sub %9, %9, #736 \n" "vmla.f32 q12, q6, q14 \n" "vmla.f32 q13, q7, q15 \n" "vst1.f32 {d16-d19}, [%0 :128]!\n" "pld [%2, #256] \n" "vld1.f32 {d16-d19}, [%2 :128]\n"//q8 q9 = _output2_tm "pld [%10, #256] \n" "vld1.f32 {d20-d23}, [%10 :128]\n"//q10 q11 = _k20 "add %10, %10, #256 \n" "vmla.f32 q8, q0, q10 \n" "vmla.f32 q9, q1, q11 \n" "vst1.f32 {d24-d27}, [%1 :128]!\n" "pld [%3, #256] \n" "vld1.f32 {d24-d27}, [%3 :128]\n"//q12 q13 = _output3_tm "pld [%11, #256] \n" "vld1.f32 {d28-d31}, [%11 :128]\n"//q14 q15 = _k30 "add %11, %11, #256 \n" "vmla.f32 q12, q0, q14 \n" "vmla.f32 q13, q1, q15 \n" "pld [%10, #256] \n" "vld1.f32 {d20-d23}, [%10 :128]\n"//q10 q11 = _k21 "add %10, %10, #256 \n" "vmla.f32 q8, q2, q10 \n" "vmla.f32 q9, q3, q11 \n" "pld [%11, #256] \n" "vld1.f32 {d28-d31}, [%11 :128]\n"//q14 q15 = _k31 "add %11, %11, #256 \n" "vmla.f32 q12, q2, q14 \n" "vmla.f32 q13, q3, q15 \n" "pld [%10, #256] \n" "vld1.f32 {d20-d23}, [%10 :128]\n"//q10 q11 = _k22 "add %10, %10, #256 \n" "vmla.f32 q8, q4, q10 \n" "vmla.f32 q9, q5, q11 \n" "pld [%11, #256] \n" "vld1.f32 {d28-d31}, [%11 :128]\n"//q14 q15 = _k32 "add %11, %11, #256 \n" "vmla.f32 q12, q4, q14 \n" "vmla.f32 q13, q5, q15 \n" "pld [%10, #256] \n" "vld1.f32 {d20-d23}, [%10 :128]\n"//q10 q11 = _k23 "sub %10, %10, #736 \n" "vmla.f32 q8, q6, q10 \n" "vmla.f32 q9, q7, q11 \n" "pld [%11, #256] \n" "vld1.f32 {d28-d31}, [%11 :128]\n"//q14 q15 = _k33 "sub %11, %11, #736 \n" "vmla.f32 q12, q6, q14 \n" "vmla.f32 q13, q7, q15 \n" "vst1.f32 {d16-d19}, [%2 :128]!\n" "pld [%0, #256] \n" "vld1.f32 {d16-d19}, [%0 :128]\n"//q8 q9 = _output0_tm "subs r4, r4, #1 \n" "vst1.f32 {d24-d27}, [%3 :128]!\n" "bne 0b \n" : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(r0), // %4 "=r"(r1), // %5 "=r"(r2), // %6 "=r"(r3), // %7 "=r"(k00), // %8 "=r"(k10), // %9 "=r"(k20), // %10 "=r"(k30) // %11 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(r0), "5"(r1), "6"(r2), "7"(r3), "8"(k00), "9"(k10), "10"(k20), "11"(k30) : "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); #endif // __aarch64__ k00 -= 64; k10 -= 64; k20 -= 64; k30 -= 64; #else for (int m=0; m<64; m++) { output0_tm[m] += r0[m] * k00[m]; k00 += 64; output0_tm[m] += r1[m] * k00[m]; k00 += 64; output0_tm[m] += r2[m] * k00[m]; k00 += 64; output0_tm[m] += r3[m] * k00[m]; k00 += 64; k00 -= 64 * 4; output1_tm[m] += r0[m] * k10[m]; k10 += 64; output1_tm[m] += r1[m] * k10[m]; k10 += 64; output1_tm[m] += r2[m] * k10[m]; k10 += 64; output1_tm[m] += r3[m] * k10[m]; k10 += 64; k10 -= 64 * 4; output2_tm[m] += r0[m] * k20[m]; k20 += 64; output2_tm[m] += r1[m] * k20[m]; k20 += 64; output2_tm[m] += r2[m] * k20[m]; k20 += 64; output2_tm[m] += r3[m] * k20[m]; k20 += 64; k20 -= 64 * 4; output3_tm[m] += r0[m] * k30[m]; k30 += 64; output3_tm[m] += r1[m] * k30[m]; k30 += 64; output3_tm[m] += r2[m] * k30[m]; k30 += 64; output3_tm[m] += r3[m] * k30[m]; k30 += 64; k30 -= 64 * 4; } r0 += 64; r1 += 64; r2 += 64; r3 += 64; output0_tm += 64; output1_tm += 64; output2_tm += 64; output3_tm += 64; #endif // __ARM_NEON } } for (; q<inch; q++) { const float* r0 = bottom_blob_tm.channel(q); const float* k0 = kernel0_tm.row(q); const float* k1 = kernel1_tm.row(q); const float* k2 = kernel2_tm.row(q); const float* k3 = kernel3_tm.row(q); float* output0_tm = out0_tm; float* output1_tm = out1_tm; float* output2_tm = out2_tm; float* output3_tm = out3_tm; // tile for (int i=0; i<h_tm/8 * w_tm/8; i++) { // TODO neon optimize for (int m=0; m<64; m++) { output0_tm[m] += r0[m] * k0[m]; output1_tm[m] += r0[m] * k1[m]; output2_tm[m] += r0[m] * k2[m]; output3_tm[m] += r0[m] * k3[m]; } r0 += 64; output0_tm += 64; output1_tm += 64; output2_tm += 64; output3_tm += 64; } } } #pragma omp parallel for for (int p=remain_outch_start; p<outch; p++) { Mat out0_tm = top_blob_tm.channel(p); const Mat kernel0_tm = kernel_tm.channel(p); out0_tm.fill(0.f); int q = 0; for (; q+3<inch; q+=4) { const float* r0 = bottom_blob_tm.channel(q); const float* r1 = bottom_blob_tm.channel(q+1); const float* r2 = bottom_blob_tm.channel(q+2); const float* r3 = bottom_blob_tm.channel(q+3); const float* k0 = kernel0_tm.row(q); const float* k1 = kernel0_tm.row(q+1); const float* k2 = kernel0_tm.row(q+2); const float* k3 = kernel0_tm.row(q+3); float* output0_tm = out0_tm; // tile for (int i=0; i<h_tm/8 * w_tm/8; i++) { #if __ARM_NEON #if __aarch64__ for (int m=0; m+7<64; m+=8) { float32x4_t _output0_tm = vld1q_f32(output0_tm); float32x4_t _r0 = vld1q_f32(r0); float32x4_t _r1 = vld1q_f32(r1); float32x4_t _r2 = vld1q_f32(r2); float32x4_t _r3 = vld1q_f32(r3); float32x4_t _k0 = vld1q_f32(k0); float32x4_t _k1 = vld1q_f32(k1); float32x4_t _k2 = vld1q_f32(k2); float32x4_t _k3 = vld1q_f32(k3); _output0_tm = vmlaq_f32(_output0_tm, _r0, _k0); _output0_tm = vmlaq_f32(_output0_tm, _r1, _k1); _output0_tm = vmlaq_f32(_output0_tm, _r2, _k2); _output0_tm = vmlaq_f32(_output0_tm, _r3, _k3); vst1q_f32(output0_tm, _output0_tm); output0_tm += 4; r0 += 4; r1 += 4; r2 += 4; r3 += 4; k0 += 4; k1 += 4; k2 += 4; k3 += 4; float32x4_t _output0_tmn = vld1q_f32(output0_tm); float32x4_t _r0n = vld1q_f32(r0); float32x4_t _r1n = vld1q_f32(r1); float32x4_t _r2n = vld1q_f32(r2); float32x4_t _r3n = vld1q_f32(r3); float32x4_t _k0n = vld1q_f32(k0); float32x4_t _k1n = vld1q_f32(k1); float32x4_t _k2n = vld1q_f32(k2); float32x4_t _k3n = vld1q_f32(k3); _output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k0n); _output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k1n); _output0_tmn = vmlaq_f32(_output0_tmn, _r2n, _k2n); _output0_tmn = vmlaq_f32(_output0_tmn, _r3n, _k3n); vst1q_f32(output0_tm, _output0_tmn); output0_tm += 4; r0 += 4; r1 += 4; r2 += 4; r3 += 4; k0 += 4; k1 += 4; k2 += 4; k3 += 4; } #else asm volatile( "pld [%1, #256] \n" "vld1.f32 {d0-d3}, [%1 :128]! \n" "mov r4, %0 \n" "pld [%0, #256] \n" "vld1.f32 {d24-d27}, [%0 :128]!\n"//q12 q13 = output0_tm "pld [%5, #256] \n" "vld1.f32 {d4-d7}, [%5 :128]! \n" "vmla.f32 q12, q0, q2 \n" "pld [%2, #256] \n" "vld1.f32 {d16-d19}, [%2 :128]!\n" "vmla.f32 q13, q1, q3 \n" "pld [%6, #256] \n" "vld1.f32 {d20-d23}, [%6 :128]!\n" "vmla.f32 q12, q8, q10 \n" "pld [%3, #256] \n" "vld1.f32 {d0-d3}, [%3 :128]! \n" "vmla.f32 q13, q9, q11 \n" "pld [%7, #256] \n" "vld1.f32 {d4-d7}, [%7 :128]! \n" "vmla.f32 q12, q0, q2 \n" "pld [%4, #256] \n" "vld1.f32 {d16-d19}, [%4 :128]!\n" "vmla.f32 q13, q1, q3 \n" "pld [%8, #256] \n" "vld1.f32 {d20-d23}, [%8 :128]!\n" "vmla.f32 q12, q8, q10 \n" "pld [%0, #256] \n" "vld1.f32 {d28-d31}, [%0 :128]!\n"//q14 q15 = output0_tm "vmla.f32 q13, q9, q11 \n" "pld [%1, #256] \n" "vld1.f32 {d0-d3}, [%1 :128]! \n" "pld [%5, #256] \n" "vld1.f32 {d4-d7}, [%5 :128]! \n" "vmla.f32 q14, q0, q2 \n" "vst1.f32 {d24-d27}, [r4 :128]!\n" "pld [%2, #256] \n" "vld1.f32 {d16-d19}, [%2 :128]!\n" "vmla.f32 q15, q1, q3 \n" "pld [%6, #256] \n" "vld1.f32 {d20-d23}, [%6 :128]!\n" "vmla.f32 q14, q8, q10 \n" "pld [%3, #256] \n" "vld1.f32 {d0-d3}, [%3 :128]! \n" "vmla.f32 q15, q9, q11 \n" "pld [%7, #256] \n" "vld1.f32 {d4-d7}, [%7 :128]! \n" "vmla.f32 q14, q0, q2 \n" "pld [%4, #256] \n" "vld1.f32 {d16-d19}, [%4 :128]!\n" "vmla.f32 q15, q1, q3 \n" "pld [%8, #256] \n" "vld1.f32 {d20-d23}, [%8 :128]!\n" "vmla.f32 q14, q8, q10 \n" "pld [%0, #256] \n" "vld1.f32 {d24-d27}, [%0 :128]!\n"//q12 q13 = output0_tm "vmla.f32 q15, q9, q11 \n" "pld [%1, #256] \n" "vld1.f32 {d0-d3}, [%1 :128]! \n" "pld [%5, #256] \n" "vld1.f32 {d4-d7}, [%5 :128]! \n" "vmla.f32 q12, q0, q2 \n" "vst1.f32 {d28-d31}, [r4 :128]!\n" "pld [%2, #256] \n" "vld1.f32 {d16-d19}, [%2 :128]!\n" "vmla.f32 q13, q1, q3 \n" "pld [%6, #256] \n" "vld1.f32 {d20-d23}, [%6 :128]!\n" "vmla.f32 q12, q8, q10 \n" "pld [%3, #256] \n" "vld1.f32 {d0-d3}, [%3 :128]! \n" "vmla.f32 q13, q9, q11 \n" "pld [%7, #256] \n" "vld1.f32 {d4-d7}, [%7 :128]! \n" "vmla.f32 q12, q0, q2 \n" "pld [%4, #256] \n" "vld1.f32 {d16-d19}, [%4 :128]!\n" "vmla.f32 q13, q1, q3 \n" "pld [%8, #256] \n" "vld1.f32 {d20-d23}, [%8 :128]!\n" "vmla.f32 q12, q8, q10 \n" "pld [%0, #256] \n" "vld1.f32 {d28-d31}, [%0 :128]!\n"//q14 q15 = output0_tm "vmla.f32 q13, q9, q11 \n" "pld [%1, #256] \n" "vld1.f32 {d0-d3}, [%1 :128]! \n" "pld [%5, #256] \n" "vld1.f32 {d4-d7}, [%5 :128]! \n" "vmla.f32 q14, q0, q2 \n" "vst1.f32 {d24-d27}, [r4 :128]!\n" "pld [%2, #256] \n" "vld1.f32 {d16-d19}, [%2 :128]!\n" "vmla.f32 q15, q1, q3 \n" "pld [%6, #256] \n" "vld1.f32 {d20-d23}, [%6 :128]!\n" "vmla.f32 q14, q8, q10 \n" "pld [%3, #256] \n" "vld1.f32 {d0-d3}, [%3 :128]! \n" "vmla.f32 q15, q9, q11 \n" "pld [%7, #256] \n" "vld1.f32 {d4-d7}, [%7 :128]! \n" "vmla.f32 q14, q0, q2 \n" "pld [%4, #256] \n" "vld1.f32 {d16-d19}, [%4 :128]!\n" "vmla.f32 q15, q1, q3 \n" "pld [%8, #256] \n" "vld1.f32 {d20-d23}, [%8 :128]!\n" "vmla.f32 q14, q8, q10 \n" "pld [%0, #256] \n" "vld1.f32 {d24-d27}, [%0 :128]!\n"//q12 q13 = output0_tm "vmla.f32 q15, q9, q11 \n" "pld [%1, #256] \n" "vld1.f32 {d0-d3}, [%1 :128]! \n" "pld [%5, #256] \n" "vld1.f32 {d4-d7}, [%5 :128]! \n" "vmla.f32 q12, q0, q2 \n" "vst1.f32 {d28-d31}, [r4 :128]!\n" "pld [%2, #256] \n" "vld1.f32 {d16-d19}, [%2 :128]!\n" "vmla.f32 q13, q1, q3 \n" "pld [%6, #256] \n" "vld1.f32 {d20-d23}, [%6 :128]!\n" "vmla.f32 q12, q8, q10 \n" "pld [%3, #256] \n" "vld1.f32 {d0-d3}, [%3 :128]! \n" "vmla.f32 q13, q9, q11 \n" "pld [%7, #256] \n" "vld1.f32 {d4-d7}, [%7 :128]! \n" "vmla.f32 q12, q0, q2 \n" "pld [%4, #256] \n" "vld1.f32 {d16-d19}, [%4 :128]!\n" "vmla.f32 q13, q1, q3 \n" "pld [%8, #256] \n" "vld1.f32 {d20-d23}, [%8 :128]!\n" "vmla.f32 q12, q8, q10 \n" "pld [%0, #256] \n" "vld1.f32 {d28-d31}, [%0 :128]!\n"//q14 q15 = output0_tm "vmla.f32 q13, q9, q11 \n" "pld [%1, #256] \n" "vld1.f32 {d0-d3}, [%1 :128]! \n" "pld [%5, #256] \n" "vld1.f32 {d4-d7}, [%5 :128]! \n" "vmla.f32 q14, q0, q2 \n" "vst1.f32 {d24-d27}, [r4 :128]!\n" "pld [%2, #256] \n" "vld1.f32 {d16-d19}, [%2 :128]!\n" "vmla.f32 q15, q1, q3 \n" "pld [%6, #256] \n" "vld1.f32 {d20-d23}, [%6 :128]!\n" "vmla.f32 q14, q8, q10 \n" "pld [%3, #256] \n" "vld1.f32 {d0-d3}, [%3 :128]! \n" "vmla.f32 q15, q9, q11 \n" "pld [%7, #256] \n" "vld1.f32 {d4-d7}, [%7 :128]! \n" "vmla.f32 q14, q0, q2 \n" "pld [%4, #256] \n" "vld1.f32 {d16-d19}, [%4 :128]!\n" "vmla.f32 q15, q1, q3 \n" "pld [%8, #256] \n" "vld1.f32 {d20-d23}, [%8 :128]!\n" "vmla.f32 q14, q8, q10 \n" "pld [%0, #256] \n" "vld1.f32 {d24-d27}, [%0 :128]!\n"//q12 q13 = output0_tm "vmla.f32 q15, q9, q11 \n" "pld [%1, #256] \n" "vld1.f32 {d0-d3}, [%1 :128]! \n" "pld [%5, #256] \n" "vld1.f32 {d4-d7}, [%5 :128]! \n" "vmla.f32 q12, q0, q2 \n" "vst1.f32 {d28-d31}, [r4 :128]!\n" "pld [%2, #256] \n" "vld1.f32 {d16-d19}, [%2 :128]!\n" "vmla.f32 q13, q1, q3 \n" "pld [%6, #256] \n" "vld1.f32 {d20-d23}, [%6 :128]!\n" "vmla.f32 q12, q8, q10 \n" "pld [%3, #256] \n" "vld1.f32 {d0-d3}, [%3 :128]! \n" "vmla.f32 q13, q9, q11 \n" "pld [%7, #256] \n" "vld1.f32 {d4-d7}, [%7 :128]! \n" "vmla.f32 q12, q0, q2 \n" "pld [%4, #256] \n" "vld1.f32 {d16-d19}, [%4 :128]!\n" "vmla.f32 q13, q1, q3 \n" "pld [%8, #256] \n" "vld1.f32 {d20-d23}, [%8 :128]!\n" "vmla.f32 q12, q8, q10 \n" "pld [%0, #256] \n" "vld1.f32 {d28-d31}, [%0 :128]!\n"//q14 q15 = output0_tm "vmla.f32 q13, q9, q11 \n" "pld [%1, #256] \n" "vld1.f32 {d0-d3}, [%1 :128]! \n" "pld [%5, #256] \n" "vld1.f32 {d4-d7}, [%5 :128]! \n" "vmla.f32 q14, q0, q2 \n" "vst1.f32 {d24-d27}, [r4 :128]!\n" "pld [%2, #256] \n" "vld1.f32 {d16-d19}, [%2 :128]!\n" "vmla.f32 q15, q1, q3 \n" "pld [%6, #256] \n" "vld1.f32 {d20-d23}, [%6 :128]!\n" "vmla.f32 q14, q8, q10 \n" "pld [%3, #256] \n" "vld1.f32 {d0-d3}, [%3 :128]! \n" "vmla.f32 q15, q9, q11 \n" "pld [%7, #256] \n" "vld1.f32 {d4-d7}, [%7 :128]! \n" "vmla.f32 q14, q0, q2 \n" "pld [%4, #256] \n" "vld1.f32 {d16-d19}, [%4 :128]!\n" "vmla.f32 q15, q1, q3 \n" "pld [%8, #256] \n" "vld1.f32 {d20-d23}, [%8 :128]!\n" "vmla.f32 q14, q8, q10 \n" "vmla.f32 q15, q9, q11 \n" "vst1.f32 {d28-d31}, [r4 :128]!\n" : "=r"(output0_tm), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2), // %3 "=r"(r3), // %4 "=r"(k0), // %5 "=r"(k1), // %6 "=r"(k2), // %7 "=r"(k3) // %8 : "0"(output0_tm), "1"(r0), "2"(r1), "3"(r2), "4"(r3), "5"(k0), "6"(k1), "7"(k2), "8"(k3) : "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); #endif // __aarch64__ k0 -= 64; k1 -= 64; k2 -= 64; k3 -= 64; #else for (int m=0; m<64; m++) { output0_tm[m] += r0[m] * k0[m]; output0_tm[m] += r1[m] * k1[m]; output0_tm[m] += r2[m] * k2[m]; output0_tm[m] += r3[m] * k3[m]; } r0 += 64; r1 += 64; r2 += 64; r3 += 64; output0_tm += 64; #endif // __ARM_NEON } } for (; q<inch; q++) { const float* r0 = bottom_blob_tm.channel(q); const float* k0 = kernel0_tm.row(q); float* output0_tm = out0_tm; // tile for (int i=0; i<h_tm/8 * w_tm/8; i++) { // TODO neon optimize for (int m=0; m<64; m++) { output0_tm[m] += r0[m] * k0[m]; } r0 += 64; output0_tm += 64; } } } } bottom_blob_tm = Mat(); // END dot // BEGIN transform output Mat top_blob_bordered; top_blob_bordered.create(outw, outh, outch); { // const float otm[6][8] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 32.0f, 32.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 16.0f,-16.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 8.0f, 8.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 4.0f, -4.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 16.0f, 16.0f, 2.0f, 2.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 32.0f, -32.0f, 1.0f, -1.0f, 1.0f} // }; // 0 = r0 + (r1 + r2) + (r3 + r4) + (r5 + r6) * 32 // 1 = (r1 - r2) + (r3 - r4) * 2 + (r5 - r6) * 16 // 2 = (r1 + r2) + (r3 + r4) * 4 + (r5 + r6) * 8 // 3 = (r1 - r2) + (r3 - r4) * 8 + (r5 - r6) * 4 // 4 = (r1 + r2) + (r3 + r4) * 16+ (r5 + r6) * 2 // 5 = r7 + (r1 - r2) + (r3 - r4) * 32+ (r5 - r6) int w_tm = outw / 6 * 8; #pragma omp parallel for for (int p = 0; p<outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob_bordered.channel(p); const float bias0 = bias ? bias[p] : 0.f; float tmp[6][8]; // tile for (int i=0; i<outh/6; i++) { for (int j=0; j<outw/6; j++) { const float* output0_tm = out0_tm.row(i * w_tm/8 + j); float* output0 = out0.row(i * 6) + j * 6; // TODO neon optimize for (int m=0; m<8; m++) { float tmp024a = output0_tm[1] + output0_tm[2]; float tmp135a = output0_tm[1] - output0_tm[2]; float tmp024b = output0_tm[3] + output0_tm[4]; float tmp135b = output0_tm[3] - output0_tm[4]; float tmp024c = output0_tm[5] + output0_tm[6]; float tmp135c = output0_tm[5] - output0_tm[6]; tmp[0][m] = output0_tm[0] + tmp024a + tmp024b + tmp024c * 32; tmp[2][m] = tmp024a + tmp024b * 4 + tmp024c * 8; tmp[4][m] = tmp024a + tmp024b * 16 + tmp024c + tmp024c; tmp[1][m] = tmp135a + tmp135b + tmp135b + tmp135c * 16; tmp[3][m] = tmp135a + tmp135b * 8 + tmp135c * 4; tmp[5][m] = output0_tm[7] + tmp135a + tmp135b * 32 + tmp135c; output0_tm += 8; } for (int m=0; m<6; m++) { const float* tmp0 = tmp[m]; float tmp024a = tmp0[1] + tmp0[2]; float tmp135a = tmp0[1] - tmp0[2]; float tmp024b = tmp0[3] + tmp0[4]; float tmp135b = tmp0[3] - tmp0[4]; float tmp024c = tmp0[5] + tmp0[6]; float tmp135c = tmp0[5] - tmp0[6]; output0[0] = bias0 + tmp0[0] + tmp024a + tmp024b + tmp024c * 32; output0[2] = bias0 + tmp024a + tmp024b * 4 + tmp024c * 8; output0[4] = bias0 + tmp024a + tmp024b * 16 + tmp024c + tmp024c; output0[1] = bias0 + tmp135a + tmp135b + tmp135b + tmp135c * 16; output0[3] = bias0 + tmp135a + tmp135b * 8 + tmp135c * 4; output0[5] = bias0 + tmp0[7] + tmp135a + tmp135b * 32 + tmp135c; output0 += outw; } } } } } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w); } static void conv3x3s1_winograd64_neon2(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& _bias) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; // pad to 6n+2 Mat bottom_blob_bordered = bottom_blob; outw = (outw + 5) / 6 * 6; outh = (outh + 5) / 6 * 6; w = outw + 2; h = outh + 2; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, 0, 0.f); const float* bias = _bias; // BEGIN transform input Mat bottom_blob_tm; { int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; bottom_blob_tm.create(2*8, 4 * w_tm/8 * h_tm/8, inch); const int tiles = w_tm/8 * h_tm/8; // const float itm[8][8] = { // {1.0f, 0.0f, -5.25f, 0.00f, 5.25f, 0.00f, -1.0f, 0.0f}, // // {0.0f, 1.0f, 1.00f, -4.25f, -4.25f, 1.00f, 1.0f, 0.0f}, // {0.0f, -1.0f, 1.00f, 4.25f, -4.25f, -1.00f, 1.0f, 0.0f}, // // {0.0f, 0.5f, 0.25f, -2.50f, -1.25f, 2.00f, 1.0f, 0.0f}, // {0.0f, -0.5f, 0.25f, 2.50f, -1.25f, -2.00f, 1.0f, 0.0f}, // // {0.0f, 2.0f, 4.00f, -2.50f, -5.00f, 0.50f, 1.0f, 0.0f}, // {0.0f, -2.0f, 4.00f, 2.50f, -5.00f, -0.50f, 1.0f, 0.0f}, // // {0.0f, -1.0f, 0.00f, 5.25f, 0.00f, -5.25f, 0.0f, 1.0f} // }; // 0 = r00 - r06 + (r04 - r02) * 5.25 // 7 = r07 - r01 + (r03 - r05) * 5.25 // 1 = (r02 + r06 - r04 * 4.25) + (r01 - r03 * 4.25 + r05) // 2 = (r02 + r06 - r04 * 4.25) - (r01 - r03 * 4.25 + r05) // 3 = (r06 + r02 * 0.25 - r04 * 1.25) + (r01 * 0.5 - r03 * 2.5 + r05 * 2) // 4 = (r06 + r02 * 0.25 - r04 * 1.25) - (r01 * 0.5 - r03 * 2.5 + r05 * 2) // reuse r04 * 1.25 // reuse r03 * 2.5 // 5 = (r06 + (r02 - r04 * 1.25) * 4) + (r01 * 2 - r03 * 2.5 + r05 * 0.5) // 6 = (r06 + (r02 - r04 * 1.25) * 4) - (r01 * 2 - r03 * 2.5 + r05 * 0.5) #pragma omp parallel for for (int q = 0; q<inch; q++) { const Mat img0 = bottom_blob_bordered.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); float tmp[8][8]; // tile for (int i=0; i<h_tm/8; i++) { for (int j=0; j<w_tm/8; j++) { const float* r0 = img0.row(i * 6) + j * 6; float* r0_tm01 = img0_tm.row(i * w_tm/8 + j); float* r0_tm23 = img0_tm.row(tiles + i * w_tm/8 + j); float* r0_tm45 = img0_tm.row(tiles * 2 + i * w_tm/8 + j); float* r0_tm67 = img0_tm.row(tiles * 3 + i * w_tm/8 + j); for (int m=0; m<8; m++) { tmp[0][m] = r0[0] - r0[6] + (r0[4] - r0[2]) * 5.25; tmp[7][m] = r0[7] - r0[1] + (r0[3] - r0[5]) * 5.25; float tmp12a = (r0[2] + r0[6] - r0[4] * 4.25); float tmp12b = (r0[1] + r0[5] - r0[3] * 4.25); tmp[1][m] = tmp12a + tmp12b; tmp[2][m] = tmp12a - tmp12b; float tmp34a = (r0[6] + r0[2] * 0.25 - r0[4] * 1.25); float tmp34b = (r0[1] * 0.5 - r0[3] * 2.5 + r0[5] * 2); tmp[3][m] = tmp34a + tmp34b; tmp[4][m] = tmp34a - tmp34b; float tmp56a = (r0[6] + (r0[2] - r0[4] * 1.25) * 4); float tmp56b = (r0[1] * 2 - r0[3] * 2.5 + r0[5] * 0.5); tmp[5][m] = tmp56a + tmp56b; tmp[6][m] = tmp56a - tmp56b; r0 += w; } float* r0_tms[4] = { r0_tm01, r0_tm23, r0_tm45, r0_tm67 }; for (int m=0; m<8; m++) { const float* tmp0 = tmp[m]; float* r0_tm = r0_tms[m/2] + (m%2) * 8; r0_tm[0] = tmp0[0] - tmp0[6] + (tmp0[4] - tmp0[2]) * 5.25; r0_tm[7] = tmp0[7] - tmp0[1] + (tmp0[3] - tmp0[5]) * 5.25; float tmp12a = (tmp0[2] + tmp0[6] - tmp0[4] * 4.25); float tmp12b = (tmp0[1] - tmp0[3] * 4.25 + tmp0[5]); r0_tm[1] = tmp12a + tmp12b; r0_tm[2] = tmp12a - tmp12b; float tmp34a = (tmp0[6] + tmp0[2] * 0.25 - tmp0[4] * 1.25); float tmp34b = (tmp0[1] * 0.5 - tmp0[3] * 2.5 + tmp0[5] * 2); r0_tm[3] = tmp34a + tmp34b; r0_tm[4] = tmp34a - tmp34b; float tmp56a = (tmp0[6] + (tmp0[2] - tmp0[4] * 1.25) * 4); float tmp56b = (tmp0[1] * 2 - tmp0[3] * 2.5 + tmp0[5] * 0.5); r0_tm[5] = tmp56a + tmp56b; r0_tm[6] = tmp56a - tmp56b; } } } } } bottom_blob_bordered = Mat(); // END transform input // BEGIN dot Mat top_blob_tm; { int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; top_blob_tm.create(2*8, 4 * w_tm/8 * h_tm/8, outch); const int tiles = h_tm/8 * w_tm/8; #pragma omp parallel for for (int p = 0; p<outch; p++) { Mat out0_tm = top_blob_tm.channel(p); const Mat kernel0_tm = kernel_tm.channel(p); out0_tm.fill(0.f); int q = 0; for (; q+1<inch; q+=2) { const float* r0 = bottom_blob_tm.channel(q); const float* r1 = bottom_blob_tm.channel(q+1); const float* k0 = kernel0_tm.row(q); const float* k1 = kernel0_tm.row(q+1); float* output0_tm = out0_tm; for (int r=0; r<4; r++) { #if __ARM_NEON #if __aarch64__ float32x4_t _k0 = vld1q_f32(k0); float32x4_t _k0n = vld1q_f32(k0+4); float32x4_t _k0nn = vld1q_f32(k0+8); float32x4_t _k0nnn = vld1q_f32(k0+12); float32x4_t _k1 = vld1q_f32(k1); float32x4_t _k1n = vld1q_f32(k1+4); float32x4_t _k1nn = vld1q_f32(k1+8); float32x4_t _k1nnn = vld1q_f32(k1+12); #else float32x4_t _k0; float32x4_t _k0n; float32x4_t _k0nn; float32x4_t _k0nnn; float32x4_t _k1; float32x4_t _k1n; float32x4_t _k1nn; float32x4_t _k1nnn; asm volatile( "pld [%0, #512] \n" "vld1.f32 {%e2-%f2}, [%0 :128]! \n" "pld [%1, #512] \n" "vld1.f32 {%e4-%f4}, [%1 :128]! \n" "vld1.f32 {%e3-%f3}, [%0 :128]! \n" "vld1.f32 {%e5-%f5}, [%1 :128]! \n" "vld1.f32 {%e6-%f6}, [%0 :128]! \n" "vld1.f32 {%e8-%f8}, [%1 :128]! \n" "vld1.f32 {%e7-%f7}, [%0 :128]! \n" "vld1.f32 {%e9-%f9}, [%1 :128]! \n" : "=r"(k0), // %0 "=r"(k1), // %1 "=w"(_k0), // %2 "=w"(_k0n), // %3 "=w"(_k1), // %4 "=w"(_k1n), // %5 "=w"(_k0nn), // %6 "=w"(_k0nnn), // %7 "=w"(_k1nn), // %8 "=w"(_k1nnn) // %9 : "0"(k0), "1"(k1) : "cc", "memory" ); #endif // __aarch64__ #endif // __ARM_NEON // tile #if __ARM_NEON int nn = tiles >> 2; int remain = tiles & 3; #else int remain = tiles; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ for (; nn>0; nn--) { float32x4_t _output0_tm = vld1q_f32(output0_tm); float32x4_t _output0_tmn = vld1q_f32(output0_tm+4); float32x4_t _r0 = vld1q_f32(r0); float32x4_t _r0n = vld1q_f32(r0+4); float32x4_t _r1 = vld1q_f32(r1); float32x4_t _r1n = vld1q_f32(r1+4); r0 += 8; r1 += 8; _output0_tm = vmlaq_f32(_output0_tm, _r0, _k0); _output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k0n); _output0_tm = vmlaq_f32(_output0_tm, _r1, _k1); _output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k1n); vst1q_f32(output0_tm, _output0_tm); vst1q_f32(output0_tm+4, _output0_tmn); output0_tm += 8; _output0_tm = vld1q_f32(output0_tm); _output0_tmn = vld1q_f32(output0_tm+4); _r0 = vld1q_f32(r0); _r0n = vld1q_f32(r0+4); _r1 = vld1q_f32(r1); _r1n = vld1q_f32(r1+4); r0 += 8; r1 += 8; _output0_tm = vmlaq_f32(_output0_tm, _r0, _k0nn); _output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k0nnn); _output0_tm = vmlaq_f32(_output0_tm, _r1, _k1nn); _output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k1nnn); vst1q_f32(output0_tm, _output0_tm); vst1q_f32(output0_tm+4, _output0_tmn); output0_tm += 8; _output0_tm = vld1q_f32(output0_tm); _output0_tmn = vld1q_f32(output0_tm+4); _r0 = vld1q_f32(r0); _r0n = vld1q_f32(r0+4); _r1 = vld1q_f32(r1); _r1n = vld1q_f32(r1+4); r0 += 8; r1 += 8; _output0_tm = vmlaq_f32(_output0_tm, _r0, _k0); _output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k0n); _output0_tm = vmlaq_f32(_output0_tm, _r1, _k1); _output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k1n); vst1q_f32(output0_tm, _output0_tm); vst1q_f32(output0_tm+4, _output0_tmn); output0_tm += 8; _output0_tm = vld1q_f32(output0_tm); _output0_tmn = vld1q_f32(output0_tm+4); _r0 = vld1q_f32(r0); _r0n = vld1q_f32(r0+4); _r1 = vld1q_f32(r1); _r1n = vld1q_f32(r1+4); r0 += 8; r1 += 8; _output0_tm = vmlaq_f32(_output0_tm, _r0, _k0nn); _output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k0nnn); _output0_tm = vmlaq_f32(_output0_tm, _r1, _k1nn); _output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k1nnn); vst1q_f32(output0_tm, _output0_tm); vst1q_f32(output0_tm+4, _output0_tmn); output0_tm += 8; _output0_tm = vld1q_f32(output0_tm); _output0_tmn = vld1q_f32(output0_tm+4); _r0 = vld1q_f32(r0); _r0n = vld1q_f32(r0+4); _r1 = vld1q_f32(r1); _r1n = vld1q_f32(r1+4); r0 += 8; r1 += 8; _output0_tm = vmlaq_f32(_output0_tm, _r0, _k0); _output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k0n); _output0_tm = vmlaq_f32(_output0_tm, _r1, _k1); _output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k1n); vst1q_f32(output0_tm, _output0_tm); vst1q_f32(output0_tm+4, _output0_tmn); output0_tm += 8; _output0_tm = vld1q_f32(output0_tm); _output0_tmn = vld1q_f32(output0_tm+4); _r0 = vld1q_f32(r0); _r0n = vld1q_f32(r0+4); _r1 = vld1q_f32(r1); _r1n = vld1q_f32(r1+4); r0 += 8; r1 += 8; _output0_tm = vmlaq_f32(_output0_tm, _r0, _k0nn); _output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k0nnn); _output0_tm = vmlaq_f32(_output0_tm, _r1, _k1nn); _output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k1nnn); vst1q_f32(output0_tm, _output0_tm); vst1q_f32(output0_tm+4, _output0_tmn); output0_tm += 8; _output0_tm = vld1q_f32(output0_tm); _output0_tmn = vld1q_f32(output0_tm+4); _r0 = vld1q_f32(r0); _r0n = vld1q_f32(r0+4); _r1 = vld1q_f32(r1); _r1n = vld1q_f32(r1+4); r0 += 8; r1 += 8; _output0_tm = vmlaq_f32(_output0_tm, _r0, _k0); _output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k0n); _output0_tm = vmlaq_f32(_output0_tm, _r1, _k1); _output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k1n); vst1q_f32(output0_tm, _output0_tm); vst1q_f32(output0_tm+4, _output0_tmn); output0_tm += 8; _output0_tm = vld1q_f32(output0_tm); _output0_tmn = vld1q_f32(output0_tm+4); _r0 = vld1q_f32(r0); _r0n = vld1q_f32(r0+4); _r1 = vld1q_f32(r1); _r1n = vld1q_f32(r1+4); r0 += 8; r1 += 8; _output0_tm = vmlaq_f32(_output0_tm, _r0, _k0nn); _output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k0nnn); _output0_tm = vmlaq_f32(_output0_tm, _r1, _k1nn); _output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k1nnn); vst1q_f32(output0_tm, _output0_tm); vst1q_f32(output0_tm+4, _output0_tmn); output0_tm += 8; } #else if (nn > 0) { asm volatile( "mov r4, %1 \n" "pld [%2, #256] \n" "vld1.f32 {d24-d27}, [%2 :128]! \n"// q12 q13 = _r0 "pld [%1, #256] \n" "vld1.f32 {d16-d19}, [%1 :128]! \n"// q8 q9 = _output0_tm "vmla.f32 q8, q12, %q8 \n" "vmla.f32 q9, q13, %q9 \n" "pld [%2, #256] \n" "vld1.f32 {d24-d27}, [%2 :128]! \n"// q12 q13 = _r0 "0: \n" "pld [%1, #256] \n" "vld1.f32 {d20-d23}, [%1 :128]! \n"// q10 q11 = _output0_tm "vmla.f32 q10, q12, %q12 \n" "vmla.f32 q11, q13, %q13 \n" "pld [%3, #256] \n" "vld1.f32 {d28-d31}, [%3 :128]! \n"// q14 q15 = _r1 "vmla.f32 q8, q14, %q10 \n" "vmla.f32 q9, q15, %q11 \n" "pld [%3, #256] \n" "vld1.f32 {d28-d31}, [%3 :128]! \n"// q14 q15 = _r1 "pld [%2, #256] \n" "vld1.f32 {d24-d27}, [%2 :128]! \n"// q12 q13 = _r0 "vmla.f32 q10, q14, %q14 \n" "vmla.f32 q11, q15, %q15 \n" "vst1.f32 {d16-d19}, [r4 :128]! \n" "pld [%1, #256] \n" "vld1.f32 {d16-d19}, [%1 :128]! \n"// q8 q9 = _output0_tm "vmla.f32 q8, q12, %q8 \n" "vmla.f32 q9, q13, %q9 \n" "pld [%2, #256] \n" "vld1.f32 {d24-d27}, [%2 :128]! \n"// q12 q13 = _r0 "vst1.f32 {d20-d23}, [r4 :128]! \n" "pld [%1, #256] \n" "vld1.f32 {d20-d23}, [%1 :128]! \n"// q10 q11 = _output0_tm "vmla.f32 q10, q12, %q12 \n" "vmla.f32 q11, q13, %q13 \n" "pld [%3, #256] \n" "vld1.f32 {d28-d31}, [%3 :128]! \n"// q14 q15 = _r1 "vmla.f32 q8, q14, %q10 \n" "vmla.f32 q9, q15, %q11 \n" "pld [%3, #256] \n" "vld1.f32 {d28-d31}, [%3 :128]! \n"// q14 q15 = _r1 "pld [%2, #256] \n" "vld1.f32 {d24-d27}, [%2 :128]! \n"// q12 q13 = _r0 "vmla.f32 q10, q14, %q14 \n" "vmla.f32 q11, q15, %q15 \n" "vst1.f32 {d16-d19}, [r4 :128]! \n" "pld [%1, #256] \n" "vld1.f32 {d16-d19}, [%1 :128]! \n"// q8 q9 = _output0_tm "vmla.f32 q8, q12, %q8 \n" "vmla.f32 q9, q13, %q9 \n" "pld [%2, #256] \n" "vld1.f32 {d24-d27}, [%2 :128]! \n"// q12 q13 = _r0 "vst1.f32 {d20-d23}, [r4 :128]! \n" "pld [%1, #256] \n" "vld1.f32 {d20-d23}, [%1 :128]! \n"// q10 q11 = _output0_tm "vmla.f32 q10, q12, %q12 \n" "vmla.f32 q11, q13, %q13 \n" "pld [%3, #256] \n" "vld1.f32 {d28-d31}, [%3 :128]! \n"// q14 q15 = _r1 "vmla.f32 q8, q14, %q10 \n" "vmla.f32 q9, q15, %q11 \n" "pld [%3, #256] \n" "vld1.f32 {d28-d31}, [%3 :128]! \n"// q14 q15 = _r1 "pld [%2, #256] \n" "vld1.f32 {d24-d27}, [%2 :128]! \n"// q12 q13 = _r0 "vmla.f32 q10, q14, %q14 \n" "vmla.f32 q11, q15, %q15 \n" "vst1.f32 {d16-d19}, [r4 :128]! \n" "pld [%1, #256] \n" "vld1.f32 {d16-d19}, [%1 :128]! \n"// q8 q9 = _output0_tm "vmla.f32 q8, q12, %q8 \n" "vmla.f32 q9, q13, %q9 \n" "pld [%2, #256] \n" "vld1.f32 {d24-d27}, [%2 :128]! \n"// q12 q13 = _r0 "vst1.f32 {d20-d23}, [r4 :128]! \n" "pld [%1, #256] \n" "vld1.f32 {d20-d23}, [%1 :128]! \n"// q10 q11 = _output0_tm "vmla.f32 q10, q12, %q12 \n" "vmla.f32 q11, q13, %q13 \n" "pld [%3, #256] \n" "vld1.f32 {d28-d31}, [%3 :128]! \n"// q14 q15 = _r1 "vmla.f32 q8, q14, %q10 \n" "vmla.f32 q9, q15, %q11 \n" "pld [%3, #256] \n" "vld1.f32 {d28-d31}, [%3 :128]! \n"// q14 q15 = _r1 "pld [%2, #256] \n" "vld1.f32 {d24-d27}, [%2 :128]! \n"// q12 q13 = _r0 "vmla.f32 q10, q14, %q14 \n" "vmla.f32 q11, q15, %q15 \n" "vst1.f32 {d16-d19}, [r4 :128]! \n" "pld [%1, #256] \n" "vld1.f32 {d16-d19}, [%1 :128]! \n"// q8 q9 = _output0_tm "vmla.f32 q8, q12, %q8 \n" "vmla.f32 q9, q13, %q9 \n" "pld [%2, #256] \n" "vld1.f32 {d24-d27}, [%2 :128]! \n"// q12 q13 = _r0 "subs %0, #1 \n" "vst1.f32 {d20-d23}, [r4 :128]! \n" "bne 0b \n" "sub %1, #32 \n" "sub %2, #64 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(r1) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(r1), "w"(_k0), // %8 "w"(_k0n), // %9 "w"(_k1), // %10 "w"(_k1n), // %11 "w"(_k0nn), // %12 "w"(_k0nnn), // %13 "w"(_k1nn), // %14 "w"(_k1nnn) // %15 : "cc", "memory", "r4", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { #if __ARM_NEON #if __aarch64__ float32x4_t _output0_tm = vld1q_f32(output0_tm); float32x4_t _output0_tmn = vld1q_f32(output0_tm+4); float32x4_t _r0 = vld1q_f32(r0); float32x4_t _r0n = vld1q_f32(r0+4); float32x4_t _r1 = vld1q_f32(r1); float32x4_t _r1n = vld1q_f32(r1+4); r0 += 8; r1 += 8; _output0_tm = vmlaq_f32(_output0_tm, _r0, _k0); _output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k0n); _output0_tm = vmlaq_f32(_output0_tm, _r1, _k1); _output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k1n); vst1q_f32(output0_tm, _output0_tm); vst1q_f32(output0_tm+4, _output0_tmn); output0_tm += 8; _output0_tm = vld1q_f32(output0_tm); _output0_tmn = vld1q_f32(output0_tm+4); _r0 = vld1q_f32(r0); _r0n = vld1q_f32(r0+4); _r1 = vld1q_f32(r1); _r1n = vld1q_f32(r1+4); r0 += 8; r1 += 8; _output0_tm = vmlaq_f32(_output0_tm, _r0, _k0nn); _output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k0nnn); _output0_tm = vmlaq_f32(_output0_tm, _r1, _k1nn); _output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k1nnn); vst1q_f32(output0_tm, _output0_tm); vst1q_f32(output0_tm+4, _output0_tmn); output0_tm += 8; #else asm volatile( "mov r4, %0 \n" "pld [%1, #256] \n" "vld1.f32 {d24-d27}, [%1 :128]! \n"// q12 q13 = _r0 "pld [%0, #256] \n" "vld1.f32 {d16-d19}, [%0 :128]! \n"// q8 q9 = _output0_tm "vmla.f32 q8, q12, %q6 \n" "pld [%2, #256] \n" "vld1.f32 {d28-d31}, [%2 :128]! \n"// q14 q15 = _r1 "vmla.f32 q9, q13, %q7 \n" "pld [%1, #256] \n" "vld1.f32 {d24-d27}, [%1 :128]! \n"// q12 q13 = _r0 "vmla.f32 q8, q14, %q8 \n" "pld [%0, #256] \n" "vld1.f32 {d20-d23}, [%0 :128] \n"// q10 q11 = _output0_tm "vmla.f32 q9, q15, %q9 \n" "vmla.f32 q10, q12, %q10 \n" "vmla.f32 q11, q13, %q11 \n" "vst1.f32 {d16-d19}, [r4 :128] \n" "pld [%2, #256] \n" "vld1.f32 {d28-d31}, [%2 :128]! \n"// q14 q15 = _r1 "vmla.f32 q10, q14, %q12 \n" "vmla.f32 q11, q15, %q13 \n" "vst1.f32 {d20-d23}, [%0 :128]! \n" : "=r"(output0_tm), // %0 "=r"(r0), // %1 "=r"(r1) // %2 : "0"(output0_tm), "1"(r0), "2"(r1), "w"(_k0), // %6 "w"(_k0n), // %7 "w"(_k1), // %8 "w"(_k1n), // %9 "w"(_k0nn), // %10 "w"(_k0nnn), // %11 "w"(_k1nn), // %12 "w"(_k1nnn) // %13 : "cc", "memory", "r4", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); #endif // __aarch64__ #else for (int m=0; m<16; m++) { output0_tm[m] += r0[m] * k0[m]; output0_tm[m] += r1[m] * k1[m]; } r0 += 16; r1 += 16; output0_tm += 16; #endif // __ARM_NEON } #if __ARM_NEON #if __aarch64__ k0 += 16; k1 += 16; #endif // __aarch64__ #else k0 += 16; k1 += 16; #endif // __ARM_NEON } } for (; q<inch; q++) { const float* r0 = bottom_blob_tm.channel(q); const float* k0 = kernel0_tm.row(q); float* output0_tm = out0_tm; for (int r=0; r<4; r++) { #if __ARM_NEON #if __aarch64__ float32x4_t _k0 = vld1q_f32(k0); float32x4_t _k0n = vld1q_f32(k0+4); float32x4_t _k0nn = vld1q_f32(k0+8); float32x4_t _k0nnn = vld1q_f32(k0+12); #else float32x4_t _k0; float32x4_t _k0n; float32x4_t _k0nn; float32x4_t _k0nnn; asm volatile( "pld [%0, #512] \n" "vld1.f32 {%e1-%f1}, [%0 :128]! \n" "vld1.f32 {%e2-%f2}, [%0 :128]! \n" "vld1.f32 {%e3-%f3}, [%0 :128]! \n" "vld1.f32 {%e4-%f4}, [%0 :128]! \n" : "=r"(k0), // %0 "=w"(_k0), // %1 "=w"(_k0n), // %2 "=w"(_k0nn), // %3 "=w"(_k0nnn) // %4 : "0"(k0) : "cc", "memory" ); #endif // __aarch64__ #endif // __ARM_NEON // tile for (int i=0; i<tiles; i++) { #if __ARM_NEON #if __aarch64__ float32x4_t _output0_tm = vld1q_f32(output0_tm); float32x4_t _output0_tmn = vld1q_f32(output0_tm+4); float32x4_t _r0 = vld1q_f32(r0); float32x4_t _r0n = vld1q_f32(r0+4); r0 += 8; _output0_tm = vmlaq_f32(_output0_tm, _r0, _k0); _output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k0n); vst1q_f32(output0_tm, _output0_tm); vst1q_f32(output0_tm+4, _output0_tmn); output0_tm += 8; _output0_tm = vld1q_f32(output0_tm); _output0_tmn = vld1q_f32(output0_tm+4); _r0 = vld1q_f32(r0); _r0n = vld1q_f32(r0+4); r0 += 8; _output0_tm = vmlaq_f32(_output0_tm, _r0, _k0nn); _output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k0nnn); vst1q_f32(output0_tm, _output0_tm); vst1q_f32(output0_tm+4, _output0_tmn); output0_tm += 8; #else asm volatile( "mov r4, %0 \n" "pld [%1, #256] \n" "vld1.f32 {d24-d27}, [%1 :128]! \n"// q12 q13 = _r0 "pld [%0, #256] \n" "vld1.f32 {d16-d19}, [%0 :128]! \n"// q8 q9 = _output0_tm "vmla.f32 q8, q12, %q4 \n" "vmla.f32 q9, q13, %q5 \n" "pld [%1, #256] \n" "vld1.f32 {d24-d27}, [%1 :128]! \n"// q12 q13 = _r0 "pld [%0, #256] \n" "vld1.f32 {d20-d23}, [%0 :128] \n"// q10 q11 = _output0_tm "vmla.f32 q10, q12, %q6 \n" "vst1.f32 {d16-d19}, [r4 :128] \n" "vmla.f32 q11, q13, %q7 \n" "vst1.f32 {d20-d23}, [%0 :128]! \n" : "=r"(output0_tm), // %0 "=r"(r0) // %1 : "0"(output0_tm), "1"(r0), "w"(_k0), // %4 "w"(_k0n), // %5 "w"(_k0nn), // %6 "w"(_k0nnn) // %7 : "cc", "memory", "r4", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); #endif // __aarch64__ #else for (int m=0; m<16; m++) { output0_tm[m] += r0[m] * k0[m]; } r0 += 16; output0_tm += 16; #endif // __ARM_NEON } #if __ARM_NEON #if __aarch64__ k0 += 16; #endif // __aarch64__ #else k0 += 16; #endif // __ARM_NEON } } } } bottom_blob_tm = Mat(); // END dot // BEGIN transform output Mat top_blob_bordered; top_blob_bordered.create(outw, outh, outch); { // const float otm[6][8] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 32.0f, 32.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 16.0f,-16.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 8.0f, 8.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 4.0f, -4.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 16.0f, 16.0f, 2.0f, 2.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 32.0f, -32.0f, 1.0f, -1.0f, 1.0f} // }; // 0 = r0 + (r1 + r2) + (r3 + r4) + (r5 + r6) * 32 // 1 = (r1 - r2) + (r3 - r4) * 2 + (r5 - r6) * 16 // 2 = (r1 + r2) + (r3 + r4) * 4 + (r5 + r6) * 8 // 3 = (r1 - r2) + (r3 - r4) * 8 + (r5 - r6) * 4 // 4 = (r1 + r2) + (r3 + r4) * 16+ (r5 + r6) * 2 // 5 = r7 + (r1 - r2) + (r3 - r4) * 32+ (r5 - r6) int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = w_tm/8 * h_tm/8; #pragma omp parallel for for (int p = 0; p<outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob_bordered.channel(p); const float bias0 = bias ? bias[p] : 0.f; float tmp[6][8]; // tile for (int i=0; i<outh/6; i++) { for (int j=0; j<outw/6; j++) { const float* output0_tm01 = out0_tm.row(i * w_tm/8 + j); const float* output0_tm23 = out0_tm.row(tiles + i * w_tm/8 + j); const float* output0_tm45 = out0_tm.row(tiles * 2 + i * w_tm/8 + j); const float* output0_tm67 = out0_tm.row(tiles * 3 + i * w_tm/8 + j); float* output0 = out0.row(i * 6) + j * 6; const float* output0_tms[4] = { output0_tm01, output0_tm23, output0_tm45, output0_tm67 }; for (int m=0; m<8; m++) { const float* output0_tm = output0_tms[m/2] + (m%2) * 8; float tmp024a = output0_tm[1] + output0_tm[2]; float tmp135a = output0_tm[1] - output0_tm[2]; float tmp024b = output0_tm[3] + output0_tm[4]; float tmp135b = output0_tm[3] - output0_tm[4]; float tmp024c = output0_tm[5] + output0_tm[6]; float tmp135c = output0_tm[5] - output0_tm[6]; tmp[0][m] = output0_tm[0] + tmp024a + tmp024b + tmp024c * 32; tmp[2][m] = tmp024a + tmp024b * 4 + tmp024c * 8; tmp[4][m] = tmp024a + tmp024b * 16 + tmp024c + tmp024c; tmp[1][m] = tmp135a + tmp135b + tmp135b + tmp135c * 16; tmp[3][m] = tmp135a + tmp135b * 8 + tmp135c * 4; tmp[5][m] = output0_tm[7] + tmp135a + tmp135b * 32 + tmp135c; } for (int m=0; m<6; m++) { const float* tmp0 = tmp[m]; float tmp024a = tmp0[1] + tmp0[2]; float tmp135a = tmp0[1] - tmp0[2]; float tmp024b = tmp0[3] + tmp0[4]; float tmp135b = tmp0[3] - tmp0[4]; float tmp024c = tmp0[5] + tmp0[6]; float tmp135c = tmp0[5] - tmp0[6]; output0[0] = bias0 + tmp0[0] + tmp024a + tmp024b + tmp024c * 32; output0[2] = bias0 + tmp024a + tmp024b * 4 + tmp024c * 8; output0[4] = bias0 + tmp024a + tmp024b * 16 + tmp024c + tmp024c; output0[1] = bias0 + tmp135a + tmp135b + tmp135b + tmp135c * 16; output0[3] = bias0 + tmp135a + tmp135b * 8 + tmp135c * 4; output0[5] = bias0 + tmp0[7] + tmp135a + tmp135b * 32 + tmp135c; output0 += outw; } } } } } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w); } static void conv3x3s1_winograd64_neon3(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& _bias) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; // pad to 6n+2 Mat bottom_blob_bordered = bottom_blob; outw = (outw + 5) / 6 * 6; outh = (outh + 5) / 6 * 6; w = outw + 2; h = outh + 2; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, 0, 0.f); const float* bias = _bias; // BEGIN transform input Mat bottom_blob_tm; { int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; bottom_blob_tm.create(8, 8 * w_tm/8 * h_tm/8, inch); const int tiles = w_tm/8 * h_tm/8; // const float itm[8][8] = { // {1.0f, 0.0f, -5.25f, 0.00f, 5.25f, 0.00f, -1.0f, 0.0f}, // // {0.0f, 1.0f, 1.00f, -4.25f, -4.25f, 1.00f, 1.0f, 0.0f}, // {0.0f, -1.0f, 1.00f, 4.25f, -4.25f, -1.00f, 1.0f, 0.0f}, // // {0.0f, 0.5f, 0.25f, -2.50f, -1.25f, 2.00f, 1.0f, 0.0f}, // {0.0f, -0.5f, 0.25f, 2.50f, -1.25f, -2.00f, 1.0f, 0.0f}, // // {0.0f, 2.0f, 4.00f, -2.50f, -5.00f, 0.50f, 1.0f, 0.0f}, // {0.0f, -2.0f, 4.00f, 2.50f, -5.00f, -0.50f, 1.0f, 0.0f}, // // {0.0f, -1.0f, 0.00f, 5.25f, 0.00f, -5.25f, 0.0f, 1.0f} // }; // 0 = r00 - r06 + (r04 - r02) * 5.25 // 7 = r07 - r01 + (r03 - r05) * 5.25 // 1 = (r02 + r06 - r04 * 4.25) + (r01 - r03 * 4.25 + r05) // 2 = (r02 + r06 - r04 * 4.25) - (r01 - r03 * 4.25 + r05) // 3 = (r06 + r02 * 0.25 - r04 * 1.25) + (r01 * 0.5 - r03 * 2.5 + r05 * 2) // 4 = (r06 + r02 * 0.25 - r04 * 1.25) - (r01 * 0.5 - r03 * 2.5 + r05 * 2) // reuse r04 * 1.25 // reuse r03 * 2.5 // 5 = (r06 + (r02 - r04 * 1.25) * 4) + (r01 * 2 - r03 * 2.5 + r05 * 0.5) // 6 = (r06 + (r02 - r04 * 1.25) * 4) - (r01 * 2 - r03 * 2.5 + r05 * 0.5) #pragma omp parallel for for (int q = 0; q<inch; q++) { const Mat img0 = bottom_blob_bordered.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); float tmp[8][8]; // tile for (int i=0; i<h_tm/8; i++) { for (int j=0; j<w_tm/8; j++) { const float* r0 = img0.row(i * 6) + j * 6; float* r0_tm0 = img0_tm.row(i * w_tm/8 + j); float* r0_tm1 = img0_tm.row(i * w_tm/8 + j + tiles); float* r0_tm2 = img0_tm.row(i * w_tm/8 + j + tiles * 2); float* r0_tm3 = img0_tm.row(i * w_tm/8 + j + tiles * 3); float* r0_tm4 = img0_tm.row(i * w_tm/8 + j + tiles * 4); float* r0_tm5 = img0_tm.row(i * w_tm/8 + j + tiles * 5); float* r0_tm6 = img0_tm.row(i * w_tm/8 + j + tiles * 6); float* r0_tm7 = img0_tm.row(i * w_tm/8 + j + tiles * 7); for (int m=0; m<8; m++) { tmp[0][m] = r0[0] - r0[6] + (r0[4] - r0[2]) * 5.25; tmp[7][m] = r0[7] - r0[1] + (r0[3] - r0[5]) * 5.25; float tmp12a = (r0[2] + r0[6] - r0[4] * 4.25); float tmp12b = (r0[1] + r0[5] - r0[3] * 4.25); tmp[1][m] = tmp12a + tmp12b; tmp[2][m] = tmp12a - tmp12b; float tmp34a = (r0[6] + r0[2] * 0.25 - r0[4] * 1.25); float tmp34b = (r0[1] * 0.5 - r0[3] * 2.5 + r0[5] * 2); tmp[3][m] = tmp34a + tmp34b; tmp[4][m] = tmp34a - tmp34b; float tmp56a = (r0[6] + (r0[2] - r0[4] * 1.25) * 4); float tmp56b = (r0[1] * 2 - r0[3] * 2.5 + r0[5] * 0.5); tmp[5][m] = tmp56a + tmp56b; tmp[6][m] = tmp56a - tmp56b; r0 += w; } float* r0_tms[8] = { r0_tm0, r0_tm1, r0_tm2, r0_tm3, r0_tm4, r0_tm5, r0_tm6, r0_tm7 }; for (int m=0; m<8; m++) { const float* tmp0 = tmp[m]; float* r0_tm = r0_tms[m]; r0_tm[0] = tmp0[0] - tmp0[6] + (tmp0[4] - tmp0[2]) * 5.25; r0_tm[7] = tmp0[7] - tmp0[1] + (tmp0[3] - tmp0[5]) * 5.25; float tmp12a = (tmp0[2] + tmp0[6] - tmp0[4] * 4.25); float tmp12b = (tmp0[1] - tmp0[3] * 4.25 + tmp0[5]); r0_tm[1] = tmp12a + tmp12b; r0_tm[2] = tmp12a - tmp12b; float tmp34a = (tmp0[6] + tmp0[2] * 0.25 - tmp0[4] * 1.25); float tmp34b = (tmp0[1] * 0.5 - tmp0[3] * 2.5 + tmp0[5] * 2); r0_tm[3] = tmp34a + tmp34b; r0_tm[4] = tmp34a - tmp34b; float tmp56a = (tmp0[6] + (tmp0[2] - tmp0[4] * 1.25) * 4); float tmp56b = (tmp0[1] * 2 - tmp0[3] * 2.5 + tmp0[5] * 0.5); r0_tm[5] = tmp56a + tmp56b; r0_tm[6] = tmp56a - tmp56b; } } } } } bottom_blob_bordered = Mat(); // END transform input // BEGIN dot Mat top_blob_tm; { int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; top_blob_tm.create(8, 8 * w_tm/8 * h_tm/8, outch); const int tiles = h_tm/8 * w_tm/8; int nn_outch = outch >> 1; int remain_outch_start = nn_outch << 1; #pragma omp parallel for for (int pp=0; pp<nn_outch; pp++) { int p = pp * 2; Mat out0_tm = top_blob_tm.channel(p); Mat out1_tm = top_blob_tm.channel(p+1); const Mat kernel0_tm = kernel_tm.channel(p); const Mat kernel1_tm = kernel_tm.channel(p+1); out0_tm.fill(0.f); out1_tm.fill(0.f); int q = 0; for (; q+1<inch; q+=2) { const float* r0 = bottom_blob_tm.channel(q); const float* r1 = bottom_blob_tm.channel(q+1); const float* k00 = kernel0_tm.row(q); const float* k01 = kernel0_tm.row(q+1); const float* k10 = kernel1_tm.row(q); const float* k11 = kernel1_tm.row(q+1); float* output0_tm = out0_tm; float* output1_tm = out1_tm; for (int r=0; r<8; r++) { #if __ARM_NEON #if __aarch64__ float32x4_t _k00 = vld1q_f32(k00); float32x4_t _k00n = vld1q_f32(k00+4); float32x4_t _k01 = vld1q_f32(k01); float32x4_t _k01n = vld1q_f32(k01+4); float32x4_t _k10 = vld1q_f32(k10); float32x4_t _k10n = vld1q_f32(k10+4); float32x4_t _k11 = vld1q_f32(k11); float32x4_t _k11n = vld1q_f32(k11+4); #else float32x4_t _k00; float32x4_t _k00n; float32x4_t _k01; float32x4_t _k01n; float32x4_t _k10; float32x4_t _k10n; float32x4_t _k11; float32x4_t _k11n; asm volatile( "pld [%0, #256] \n" "vld1.f32 {%e4-%f4}, [%0 :128]! \n" "pld [%1, #256] \n" "vld1.f32 {%e6-%f6}, [%1 :128]! \n" "pld [%2, #256] \n" "vld1.f32 {%e8-%f8}, [%2 :128]! \n" "pld [%3, #256] \n" "vld1.f32 {%e10-%f10}, [%3 :128]! \n" "vld1.f32 {%e5-%f5}, [%0 :128]! \n" "vld1.f32 {%e7-%f7}, [%1 :128]! \n" "vld1.f32 {%e9-%f9}, [%2 :128]! \n" "vld1.f32 {%e11-%f11}, [%3 :128]! \n" : "=r"(k00), // %0 "=r"(k01), // %1 "=r"(k10), // %2 "=r"(k11), // %3 "=w"(_k00), // %4 "=w"(_k00n), // %5 "=w"(_k01), // %6 "=w"(_k01n), // %7 "=w"(_k10), // %8 "=w"(_k10n), // %9 "=w"(_k11), // %10 "=w"(_k11n) // %11 : "0"(k00), "1"(k01), "2"(k10), "3"(k11) : "cc", "memory" ); #endif // __aarch64__ #endif // __ARM_NEON // tile #if __ARM_NEON int nn = tiles >> 2; int remain = tiles & 3; #else int remain = tiles; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ for (; nn>0; nn--) { float32x4_t _output0_tm = vld1q_f32(output0_tm); float32x4_t _output0_tmn = vld1q_f32(output0_tm+4); float32x4_t _output1_tm = vld1q_f32(output1_tm); float32x4_t _output1_tmn = vld1q_f32(output1_tm+4); float32x4_t _r0 = vld1q_f32(r0); float32x4_t _r0n = vld1q_f32(r0+4); float32x4_t _r1 = vld1q_f32(r1); float32x4_t _r1n = vld1q_f32(r1+4); r0 += 8; r1 += 8; _output0_tm = vmlaq_f32(_output0_tm, _r0, _k00); _output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k00n); _output0_tm = vmlaq_f32(_output0_tm, _r1, _k01); _output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k01n); _output1_tm = vmlaq_f32(_output1_tm, _r0, _k10); _output1_tmn = vmlaq_f32(_output1_tmn, _r0n, _k10n); _output1_tm = vmlaq_f32(_output1_tm, _r1, _k11); _output1_tmn = vmlaq_f32(_output1_tmn, _r1n, _k11n); vst1q_f32(output0_tm, _output0_tm); vst1q_f32(output0_tm+4, _output0_tmn); vst1q_f32(output1_tm, _output1_tm); vst1q_f32(output1_tm+4, _output1_tmn); output0_tm += 8; output1_tm += 8; _output0_tm = vld1q_f32(output0_tm); _output0_tmn = vld1q_f32(output0_tm+4); _output1_tm = vld1q_f32(output1_tm); _output1_tmn = vld1q_f32(output1_tm+4); _r0 = vld1q_f32(r0); _r0n = vld1q_f32(r0+4); _r1 = vld1q_f32(r1); _r1n = vld1q_f32(r1+4); r0 += 8; r1 += 8; _output0_tm = vmlaq_f32(_output0_tm, _r0, _k00); _output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k00n); _output0_tm = vmlaq_f32(_output0_tm, _r1, _k01); _output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k01n); _output1_tm = vmlaq_f32(_output1_tm, _r0, _k10); _output1_tmn = vmlaq_f32(_output1_tmn, _r0n, _k10n); _output1_tm = vmlaq_f32(_output1_tm, _r1, _k11); _output1_tmn = vmlaq_f32(_output1_tmn, _r1n, _k11n); vst1q_f32(output0_tm, _output0_tm); vst1q_f32(output0_tm+4, _output0_tmn); vst1q_f32(output1_tm, _output1_tm); vst1q_f32(output1_tm+4, _output1_tmn); output0_tm += 8; output1_tm += 8; _output0_tm = vld1q_f32(output0_tm); _output0_tmn = vld1q_f32(output0_tm+4); _output1_tm = vld1q_f32(output1_tm); _output1_tmn = vld1q_f32(output1_tm+4); _r0 = vld1q_f32(r0); _r0n = vld1q_f32(r0+4); _r1 = vld1q_f32(r1); _r1n = vld1q_f32(r1+4); r0 += 8; r1 += 8; _output0_tm = vmlaq_f32(_output0_tm, _r0, _k00); _output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k00n); _output0_tm = vmlaq_f32(_output0_tm, _r1, _k01); _output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k01n); _output1_tm = vmlaq_f32(_output1_tm, _r0, _k10); _output1_tmn = vmlaq_f32(_output1_tmn, _r0n, _k10n); _output1_tm = vmlaq_f32(_output1_tm, _r1, _k11); _output1_tmn = vmlaq_f32(_output1_tmn, _r1n, _k11n); vst1q_f32(output0_tm, _output0_tm); vst1q_f32(output0_tm+4, _output0_tmn); vst1q_f32(output1_tm, _output1_tm); vst1q_f32(output1_tm+4, _output1_tmn); output0_tm += 8; output1_tm += 8; _output0_tm = vld1q_f32(output0_tm); _output0_tmn = vld1q_f32(output0_tm+4); _output1_tm = vld1q_f32(output1_tm); _output1_tmn = vld1q_f32(output1_tm+4); _r0 = vld1q_f32(r0); _r0n = vld1q_f32(r0+4); _r1 = vld1q_f32(r1); _r1n = vld1q_f32(r1+4); r0 += 8; r1 += 8; _output0_tm = vmlaq_f32(_output0_tm, _r0, _k00); _output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k00n); _output0_tm = vmlaq_f32(_output0_tm, _r1, _k01); _output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k01n); _output1_tm = vmlaq_f32(_output1_tm, _r0, _k10); _output1_tmn = vmlaq_f32(_output1_tmn, _r0n, _k10n); _output1_tm = vmlaq_f32(_output1_tm, _r1, _k11); _output1_tmn = vmlaq_f32(_output1_tmn, _r1n, _k11n); vst1q_f32(output0_tm, _output0_tm); vst1q_f32(output0_tm+4, _output0_tmn); vst1q_f32(output1_tm, _output1_tm); vst1q_f32(output1_tm+4, _output1_tmn); output0_tm += 8; output1_tm += 8; } #else if (nn > 0) { asm volatile( "0: \n" "pld [%3, #256] \n" "vld1.f32 {d24-d27}, [%3 :128]! \n"// q12 q13 = _r0 "pld [%1, #256] \n" "vld1.f32 {d16-d19}, [%1 :128] \n"// q8 q9 = _output0_tm "vmla.f32 q8, q12, %q10 \n" "vmla.f32 q9, q13, %q11 \n" "pld [%4, #256] \n" "vld1.f32 {d28-d31}, [%4 :128]! \n"// q14 q15 = _r1 "vmla.f32 q8, q14, %q12 \n" "vmla.f32 q9, q15, %q13 \n" "pld [%2, #256] \n" "vld1.f32 {d20-d23}, [%2 :128] \n"// q10 q11 = _output1_tm "vmla.f32 q10, q12, %q14 \n" "vmla.f32 q11, q13, %q15 \n" "pld [%3, #256] \n" "vld1.f32 {d24-d27}, [%3 :128]! \n"// q12 q13 = _r0 "vmla.f32 q10, q14, %q16 \n" "vmla.f32 q11, q15, %q17 \n" "vst1.f32 {d16-d19}, [%1 :128]! \n" "pld [%1, #256] \n" "vld1.f32 {d16-d19}, [%1 :128] \n"// q8 q9 = _output0_tm "vmla.f32 q8, q12, %q10 \n" "vmla.f32 q9, q13, %q11 \n" "pld [%4, #256] \n" "vld1.f32 {d28-d31}, [%4 :128]! \n"// q14 q15 = _r1 "vmla.f32 q8, q14, %q12 \n" "vmla.f32 q9, q15, %q13 \n" "vst1.f32 {d20-d23}, [%2 :128]! \n" "pld [%2, #256] \n" "vld1.f32 {d20-d23}, [%2 :128] \n"// q10 q11 = _output1_tm "vmla.f32 q10, q12, %q14 \n" "vmla.f32 q11, q13, %q15 \n" "pld [%3, #256] \n" "vld1.f32 {d24-d27}, [%3 :128]! \n"// q12 q13 = _r0 "vmla.f32 q10, q14, %q16 \n" "vmla.f32 q11, q15, %q17 \n" "vst1.f32 {d16-d19}, [%1 :128]! \n" "pld [%1, #256] \n" "vld1.f32 {d16-d19}, [%1 :128] \n"// q8 q9 = _output0_tm "vmla.f32 q8, q12, %q10 \n" "vmla.f32 q9, q13, %q11 \n" "pld [%4, #256] \n" "vld1.f32 {d28-d31}, [%4 :128]! \n"// q14 q15 = _r1 "vmla.f32 q8, q14, %q12 \n" "vmla.f32 q9, q15, %q13 \n" "vst1.f32 {d20-d23}, [%2 :128]! \n" "pld [%2, #256] \n" "vld1.f32 {d20-d23}, [%2 :128] \n"// q10 q11 = _output1_tm "vmla.f32 q10, q12, %q14 \n" "vmla.f32 q11, q13, %q15 \n" "pld [%3, #256] \n" "vld1.f32 {d24-d27}, [%3 :128]! \n"// q12 q13 = _r0 "vmla.f32 q10, q14, %q16 \n" "vmla.f32 q11, q15, %q17 \n" "vst1.f32 {d16-d19}, [%1 :128]! \n" "pld [%1, #256] \n" "vld1.f32 {d16-d19}, [%1 :128] \n"// q8 q9 = _output0_tm "vmla.f32 q8, q12, %q10 \n" "vmla.f32 q9, q13, %q11 \n" "pld [%4, #256] \n" "vld1.f32 {d28-d31}, [%4 :128]! \n"// q14 q15 = _r1 "vmla.f32 q8, q14, %q12 \n" "vmla.f32 q9, q15, %q13 \n" "vst1.f32 {d20-d23}, [%2 :128]! \n" "pld [%2, #256] \n" "vld1.f32 {d20-d23}, [%2 :128] \n"// q10 q11 = _output1_tm "vmla.f32 q10, q12, %q14 \n" "vmla.f32 q11, q13, %q15 \n" "vmla.f32 q10, q14, %q16 \n" "vmla.f32 q11, q15, %q17 \n" "vst1.f32 {d16-d19}, [%1 :128]! \n" "vst1.f32 {d20-d23}, [%2 :128]! \n" "subs %0, #1 \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(r0), // %3 "=r"(r1) // %4 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(r0), "4"(r1), "w"(_k00), // %10 "w"(_k00n), // %11 "w"(_k01), // %12 "w"(_k01n), // %13 "w"(_k10), // %14 "w"(_k10n), // %15 "w"(_k11), // %16 "w"(_k11n) // %17 : "cc", "memory", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { #if __ARM_NEON #if __aarch64__ float32x4_t _output0_tm = vld1q_f32(output0_tm); float32x4_t _output0_tmn = vld1q_f32(output0_tm+4); float32x4_t _output1_tm = vld1q_f32(output1_tm); float32x4_t _output1_tmn = vld1q_f32(output1_tm+4); float32x4_t _r0 = vld1q_f32(r0); float32x4_t _r0n = vld1q_f32(r0+4); float32x4_t _r1 = vld1q_f32(r1); float32x4_t _r1n = vld1q_f32(r1+4); r0 += 8; r1 += 8; _output0_tm = vmlaq_f32(_output0_tm, _r0, _k00); _output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k00n); _output0_tm = vmlaq_f32(_output0_tm, _r1, _k01); _output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k01n); _output1_tm = vmlaq_f32(_output1_tm, _r0, _k10); _output1_tmn = vmlaq_f32(_output1_tmn, _r0n, _k10n); _output1_tm = vmlaq_f32(_output1_tm, _r1, _k11); _output1_tmn = vmlaq_f32(_output1_tmn, _r1n, _k11n); vst1q_f32(output0_tm, _output0_tm); vst1q_f32(output0_tm+4, _output0_tmn); vst1q_f32(output1_tm, _output1_tm); vst1q_f32(output1_tm+4, _output1_tmn); output0_tm += 8; output1_tm += 8; #else asm volatile( "pld [%2, #256] \n" "vld1.f32 {d24-d27}, [%2 :128]! \n"// q12 q13 = _r0 "pld [%0, #256] \n" "vld1.f32 {d16-d19}, [%0 :128] \n"// q8 q9 = _output0_tm "vmla.f32 q8, q12, %q8 \n" "vmla.f32 q9, q13, %q9 \n" "pld [%3, #256] \n" "vld1.f32 {d28-d31}, [%3 :128]! \n"// q14 q15 = _r1 "vmla.f32 q8, q14, %q10 \n" "vmla.f32 q9, q15, %q11 \n" "pld [%1, #256] \n" "vld1.f32 {d20-d23}, [%1 :128] \n"// q10 q11 = _output1_tm "vmla.f32 q10, q12, %q12 \n" "vmla.f32 q11, q13, %q13 \n" "vmla.f32 q10, q14, %q14 \n" "vmla.f32 q11, q15, %q15 \n" "vst1.f32 {d16-d19}, [%0 :128]! \n" "vst1.f32 {d20-d23}, [%1 :128]! \n" : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(r0), // %2 "=r"(r1) // %3 : "0"(output0_tm), "1"(output1_tm), "2"(r0), "3"(r1), "w"(_k00), // %8 "w"(_k00n), // %9 "w"(_k01), // %10 "w"(_k01n), // %11 "w"(_k10), // %12 "w"(_k10n), // %13 "w"(_k11), // %14 "w"(_k11n) // %15 : "cc", "memory", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); #endif // __aarch64__ #else for (int m=0; m<8; m++) { output0_tm[m] += r0[m] * k00[m]; output0_tm[m] += r1[m] * k01[m]; output1_tm[m] += r0[m] * k10[m]; output1_tm[m] += r1[m] * k11[m]; } r0 += 8; r1 += 8; output0_tm += 8; output1_tm += 8; #endif // __ARM_NEON } #if __ARM_NEON #if __aarch64__ k00 += 8; k01 += 8; k10 += 8; k11 += 8; #endif // __aarch64__ #else k00 += 8; k01 += 8; k10 += 8; k11 += 8; #endif // __ARM_NEON } } for (; q<inch; q++) { const float* r0 = bottom_blob_tm.channel(q); const float* k00 = kernel0_tm.row(q); const float* k10 = kernel1_tm.row(q); float* output0_tm = out0_tm; float* output1_tm = out1_tm; for (int r=0; r<8; r++) { #if __ARM_NEON #if __aarch64__ float32x4_t _k00 = vld1q_f32(k00); float32x4_t _k00n = vld1q_f32(k00+4); float32x4_t _k10 = vld1q_f32(k10); float32x4_t _k10n = vld1q_f32(k10+4); #else float32x4_t _k00; float32x4_t _k00n; float32x4_t _k10; float32x4_t _k10n; asm volatile( "pld [%0, #256] \n" "vld1.f32 {%e2-%f2}, [%0 :128]! \n" "pld [%1, #256] \n" "vld1.f32 {%e4-%f4}, [%1 :128]! \n" "vld1.f32 {%e3-%f3}, [%0 :128]! \n" "vld1.f32 {%e5-%f5}, [%1 :128]! \n" : "=r"(k00), // %0 "=r"(k10), // %1 "=w"(_k00), // %2 "=w"(_k00n), // %3 "=w"(_k10), // %4 "=w"(_k10n) // %5 : "0"(k00), "1"(k10) : "cc", "memory" ); #endif // __aarch64__ #endif // __ARM_NEON // tile #if __ARM_NEON int nn = tiles >> 2; int remain = tiles & 3; #else int remain = tiles; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ for (; nn>0; nn--) { float32x4_t _output0_tm = vld1q_f32(output0_tm); float32x4_t _output0_tmn = vld1q_f32(output0_tm+4); float32x4_t _output1_tm = vld1q_f32(output1_tm); float32x4_t _output1_tmn = vld1q_f32(output1_tm+4); float32x4_t _r0 = vld1q_f32(r0); float32x4_t _r0n = vld1q_f32(r0+4); r0 += 8; _output0_tm = vmlaq_f32(_output0_tm, _r0, _k00); _output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k00n); _output1_tm = vmlaq_f32(_output1_tm, _r0, _k10); _output1_tmn = vmlaq_f32(_output1_tmn, _r0n, _k10n); vst1q_f32(output0_tm, _output0_tm); vst1q_f32(output0_tm+4, _output0_tmn); vst1q_f32(output1_tm, _output1_tm); vst1q_f32(output1_tm+4, _output1_tmn); output0_tm += 8; output1_tm += 8; _output0_tm = vld1q_f32(output0_tm); _output0_tmn = vld1q_f32(output0_tm+4); _output1_tm = vld1q_f32(output1_tm); _output1_tmn = vld1q_f32(output1_tm+4); _r0 = vld1q_f32(r0); _r0n = vld1q_f32(r0+4); r0 += 8; _output0_tm = vmlaq_f32(_output0_tm, _r0, _k00); _output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k00n); _output1_tm = vmlaq_f32(_output1_tm, _r0, _k10); _output1_tmn = vmlaq_f32(_output1_tmn, _r0n, _k10n); vst1q_f32(output0_tm, _output0_tm); vst1q_f32(output0_tm+4, _output0_tmn); vst1q_f32(output1_tm, _output1_tm); vst1q_f32(output1_tm+4, _output1_tmn); output0_tm += 8; output1_tm += 8; _output0_tm = vld1q_f32(output0_tm); _output0_tmn = vld1q_f32(output0_tm+4); _output1_tm = vld1q_f32(output1_tm); _output1_tmn = vld1q_f32(output1_tm+4); _r0 = vld1q_f32(r0); _r0n = vld1q_f32(r0+4); r0 += 8; _output0_tm = vmlaq_f32(_output0_tm, _r0, _k00); _output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k00n); _output1_tm = vmlaq_f32(_output1_tm, _r0, _k10); _output1_tmn = vmlaq_f32(_output1_tmn, _r0n, _k10n); vst1q_f32(output0_tm, _output0_tm); vst1q_f32(output0_tm+4, _output0_tmn); vst1q_f32(output1_tm, _output1_tm); vst1q_f32(output1_tm+4, _output1_tmn); output0_tm += 8; output1_tm += 8; _output0_tm = vld1q_f32(output0_tm); _output0_tmn = vld1q_f32(output0_tm+4); _output1_tm = vld1q_f32(output1_tm); _output1_tmn = vld1q_f32(output1_tm+4); _r0 = vld1q_f32(r0); _r0n = vld1q_f32(r0+4); r0 += 8; _output0_tm = vmlaq_f32(_output0_tm, _r0, _k00); _output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k00n); _output1_tm = vmlaq_f32(_output1_tm, _r0, _k10); _output1_tmn = vmlaq_f32(_output1_tmn, _r0n, _k10n); vst1q_f32(output0_tm, _output0_tm); vst1q_f32(output0_tm+4, _output0_tmn); vst1q_f32(output1_tm, _output1_tm); vst1q_f32(output1_tm+4, _output1_tmn); output0_tm += 8; output1_tm += 8; } #else if (nn > 0) { asm volatile( "0: \n" "pld [%3, #256] \n" "vld1.f32 {d24-d27}, [%3 :128]! \n"// q12 q13 = _r0 "pld [%1, #256] \n" "vld1.f32 {d16-d19}, [%1 :128] \n"// q8 q9 = _output0_tm "vmla.f32 q8, q12, %q8 \n" "vmla.f32 q9, q13, %q9 \n" "pld [%2, #256] \n" "vld1.f32 {d20-d23}, [%2 :128] \n"// q10 q11 = _output1_tm "vmla.f32 q10, q12, %q10 \n" "vmla.f32 q11, q13, %q11 \n" "pld [%3, #256] \n" "vld1.f32 {d24-d27}, [%3 :128]! \n"// q12 q13 = _r0 "vst1.f32 {d16-d19}, [%1 :128]! \n" "pld [%1, #256] \n" "vld1.f32 {d16-d19}, [%1 :128] \n"// q8 q9 = _output0_tm "vmla.f32 q8, q12, %q8 \n" "vmla.f32 q9, q13, %q9 \n" "vst1.f32 {d20-d23}, [%2 :128]! \n" "pld [%2, #256] \n" "vld1.f32 {d20-d23}, [%2 :128] \n"// q10 q11 = _output1_tm "vmla.f32 q10, q12, %q10 \n" "vmla.f32 q11, q13, %q11 \n" "pld [%3, #256] \n" "vld1.f32 {d24-d27}, [%3 :128]! \n"// q12 q13 = _r0 "vst1.f32 {d16-d19}, [%1 :128]! \n" "pld [%1, #256] \n" "vld1.f32 {d16-d19}, [%1 :128] \n"// q8 q9 = _output0_tm "vmla.f32 q8, q12, %q8 \n" "vmla.f32 q9, q13, %q9 \n" "vst1.f32 {d20-d23}, [%2 :128]! \n" "pld [%2, #256] \n" "vld1.f32 {d20-d23}, [%2 :128] \n"// q10 q11 = _output1_tm "vmla.f32 q10, q12, %q10 \n" "vmla.f32 q11, q13, %q11 \n" "pld [%3, #256] \n" "vld1.f32 {d24-d27}, [%3 :128]! \n"// q12 q13 = _r0 "vst1.f32 {d16-d19}, [%1 :128]! \n" "pld [%1, #256] \n" "vld1.f32 {d16-d19}, [%1 :128] \n"// q8 q9 = _output0_tm "vmla.f32 q8, q12, %q8 \n" "vmla.f32 q9, q13, %q9 \n" "vst1.f32 {d20-d23}, [%2 :128]! \n" "pld [%2, #256] \n" "vld1.f32 {d20-d23}, [%2 :128] \n"// q10 q11 = _output1_tm "vmla.f32 q10, q12, %q10 \n" "vmla.f32 q11, q13, %q11 \n" "vst1.f32 {d16-d19}, [%1 :128]! \n" "vst1.f32 {d20-d23}, [%2 :128]! \n" "subs %0, #1 \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(r0) // %3 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(r0), "w"(_k00), // %8 "w"(_k00n), // %9 "w"(_k10), // %10 "w"(_k10n) // %11 : "cc", "memory", "q8", "q9", "q10", "q11", "q12", "q13" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { #if __ARM_NEON #if __aarch64__ float32x4_t _output0_tm = vld1q_f32(output0_tm); float32x4_t _output0_tmn = vld1q_f32(output0_tm+4); float32x4_t _output1_tm = vld1q_f32(output1_tm); float32x4_t _output1_tmn = vld1q_f32(output1_tm+4); float32x4_t _r0 = vld1q_f32(r0); float32x4_t _r0n = vld1q_f32(r0+4); r0 += 8; _output0_tm = vmlaq_f32(_output0_tm, _r0, _k00); _output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k00n); _output1_tm = vmlaq_f32(_output1_tm, _r0, _k10); _output1_tmn = vmlaq_f32(_output1_tmn, _r0n, _k10n); vst1q_f32(output0_tm, _output0_tm); vst1q_f32(output0_tm+4, _output0_tmn); vst1q_f32(output1_tm, _output1_tm); vst1q_f32(output1_tm+4, _output1_tmn); output0_tm += 8; output1_tm += 8; #else asm volatile( "pld [%2, #256] \n" "vld1.f32 {d24-d27}, [%2 :128]! \n"// q12 q13 = _r0 "pld [%0, #256] \n" "vld1.f32 {d16-d19}, [%0 :128] \n"// q8 q9 = _output0_tm "vmla.f32 q8, q12, %q6 \n" "vmla.f32 q9, q13, %q7 \n" "pld [%1, #256] \n" "vld1.f32 {d20-d23}, [%1 :128] \n"// q10 q11 = _output1_tm "vmla.f32 q10, q12, %q8 \n" "vmla.f32 q11, q13, %q9 \n" "vst1.f32 {d16-d19}, [%0 :128]! \n" "vst1.f32 {d20-d23}, [%1 :128]! \n" : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(r0) // %2 : "0"(output0_tm), "1"(output1_tm), "2"(r0), "w"(_k00), // %6 "w"(_k00n), // %7 "w"(_k10), // %8 "w"(_k10n) // %9 : "cc", "memory", "q8", "q9", "q10", "q11", "q12", "q13" ); #endif // __aarch64__ #else for (int m=0; m<8; m++) { output0_tm[m] += r0[m] * k00[m]; output1_tm[m] += r0[m] * k10[m]; } r0 += 8; output0_tm += 8; output1_tm += 8; #endif // __ARM_NEON } #if __ARM_NEON #if __aarch64__ k00 += 8; k10 += 8; #endif // __aarch64__ #else k00 += 8; k10 += 8; #endif // __ARM_NEON } } } #pragma omp parallel for for (int p = remain_outch_start; p<outch; p++) { Mat out0_tm = top_blob_tm.channel(p); const Mat kernel0_tm = kernel_tm.channel(p); out0_tm.fill(0.f); int q = 0; for (; q+1<inch; q+=2) { const float* r0 = bottom_blob_tm.channel(q); const float* r1 = bottom_blob_tm.channel(q+1); const float* k00 = kernel0_tm.row(q); const float* k01 = kernel0_tm.row(q+1); float* output0_tm = out0_tm; for (int r=0; r<8; r++) { #if __ARM_NEON #if __aarch64__ float32x4_t _k00 = vld1q_f32(k00); float32x4_t _k00n = vld1q_f32(k00+4); float32x4_t _k01 = vld1q_f32(k01); float32x4_t _k01n = vld1q_f32(k01+4); #else float32x4_t _k00; float32x4_t _k00n; float32x4_t _k01; float32x4_t _k01n; asm volatile( "pld [%0, #256] \n" "vld1.f32 {%e2-%f2}, [%0 :128]! \n" "pld [%1, #256] \n" "vld1.f32 {%e4-%f4}, [%1 :128]! \n" "vld1.f32 {%e3-%f3}, [%0 :128]! \n" "vld1.f32 {%e5-%f5}, [%1 :128]! \n" : "=r"(k00), // %0 "=r"(k01), // %1 "=w"(_k00), // %2 "=w"(_k00n), // %3 "=w"(_k01), // %4 "=w"(_k01n) // %5 : "0"(k00), "1"(k01) : "cc", "memory" ); #endif // __aarch64__ #endif // __ARM_NEON // tile #if __ARM_NEON int nn = tiles >> 2; int remain = tiles & 3; #else int remain = tiles; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ for (; nn>0; nn--) { float32x4_t _output0_tm = vld1q_f32(output0_tm); float32x4_t _output0_tmn = vld1q_f32(output0_tm+4); float32x4_t _r0 = vld1q_f32(r0); float32x4_t _r0n = vld1q_f32(r0+4); float32x4_t _r1 = vld1q_f32(r1); float32x4_t _r1n = vld1q_f32(r1+4); r0 += 8; r1 += 8; _output0_tm = vmlaq_f32(_output0_tm, _r0, _k00); _output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k00n); _output0_tm = vmlaq_f32(_output0_tm, _r1, _k01); _output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k01n); vst1q_f32(output0_tm, _output0_tm); vst1q_f32(output0_tm+4, _output0_tmn); output0_tm += 8; _output0_tm = vld1q_f32(output0_tm); _output0_tmn = vld1q_f32(output0_tm+4); _r0 = vld1q_f32(r0); _r0n = vld1q_f32(r0+4); _r1 = vld1q_f32(r1); _r1n = vld1q_f32(r1+4); r0 += 8; r1 += 8; _output0_tm = vmlaq_f32(_output0_tm, _r0, _k00); _output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k00n); _output0_tm = vmlaq_f32(_output0_tm, _r1, _k01); _output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k01n); vst1q_f32(output0_tm, _output0_tm); vst1q_f32(output0_tm+4, _output0_tmn); output0_tm += 8; _output0_tm = vld1q_f32(output0_tm); _output0_tmn = vld1q_f32(output0_tm+4); _r0 = vld1q_f32(r0); _r0n = vld1q_f32(r0+4); _r1 = vld1q_f32(r1); _r1n = vld1q_f32(r1+4); r0 += 8; r1 += 8; _output0_tm = vmlaq_f32(_output0_tm, _r0, _k00); _output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k00n); _output0_tm = vmlaq_f32(_output0_tm, _r1, _k01); _output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k01n); vst1q_f32(output0_tm, _output0_tm); vst1q_f32(output0_tm+4, _output0_tmn); output0_tm += 8; _output0_tm = vld1q_f32(output0_tm); _output0_tmn = vld1q_f32(output0_tm+4); _r0 = vld1q_f32(r0); _r0n = vld1q_f32(r0+4); _r1 = vld1q_f32(r1); _r1n = vld1q_f32(r1+4); r0 += 8; r1 += 8; _output0_tm = vmlaq_f32(_output0_tm, _r0, _k00); _output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k00n); _output0_tm = vmlaq_f32(_output0_tm, _r1, _k01); _output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k01n); vst1q_f32(output0_tm, _output0_tm); vst1q_f32(output0_tm+4, _output0_tmn); output0_tm += 8; } #else if (nn > 0) { asm volatile( "0: \n" "pld [%2, #256] \n" "vld1.f32 {d24-d27}, [%2 :128]! \n"// q12 q13 = _r0 "pld [%1, #256] \n" "vld1.f32 {d16-d19}, [%1 :128] \n"// q8 q9 = _output0_tm "vmla.f32 q8, q12, %q8 \n" "vmla.f32 q9, q13, %q9 \n" "pld [%3, #256] \n" "vld1.f32 {d28-d31}, [%3 :128]! \n"// q14 q15 = _r1 "vmla.f32 q8, q14, %q10 \n" "vmla.f32 q9, q15, %q11 \n" "pld [%2, #256] \n" "vld1.f32 {d24-d27}, [%2 :128]! \n"// q12 q13 = _r0 "vst1.f32 {d16-d19}, [%1 :128]! \n" "pld [%1, #256] \n" "vld1.f32 {d16-d19}, [%1 :128] \n"// q8 q9 = _output0_tm "vmla.f32 q8, q12, %q8 \n" "vmla.f32 q9, q13, %q9 \n" "pld [%3, #256] \n" "vld1.f32 {d28-d31}, [%3 :128]! \n"// q14 q15 = _r1 "vmla.f32 q8, q14, %q10 \n" "vmla.f32 q9, q15, %q11 \n" "pld [%2, #256] \n" "vld1.f32 {d24-d27}, [%2 :128]! \n"// q12 q13 = _r0 "vst1.f32 {d16-d19}, [%1 :128]! \n" "pld [%1, #256] \n" "vld1.f32 {d16-d19}, [%1 :128] \n"// q8 q9 = _output0_tm "vmla.f32 q8, q12, %q8 \n" "vmla.f32 q9, q13, %q9 \n" "pld [%3, #256] \n" "vld1.f32 {d28-d31}, [%3 :128]! \n"// q14 q15 = _r1 "vmla.f32 q8, q14, %q10 \n" "vmla.f32 q9, q15, %q11 \n" "pld [%2, #256] \n" "vld1.f32 {d24-d27}, [%2 :128]! \n"// q12 q13 = _r0 "vst1.f32 {d16-d19}, [%1 :128]! \n" "pld [%1, #256] \n" "vld1.f32 {d16-d19}, [%1 :128] \n"// q8 q9 = _output0_tm "vmla.f32 q8, q12, %q8 \n" "vmla.f32 q9, q13, %q9 \n" "pld [%3, #256] \n" "vld1.f32 {d28-d31}, [%3 :128]! \n"// q14 q15 = _r1 "vmla.f32 q8, q14, %q10 \n" "vmla.f32 q9, q15, %q11 \n" "vst1.f32 {d16-d19}, [%1 :128]! \n" "subs %0, #1 \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(r1) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(r1), "w"(_k00), // %8 "w"(_k00n), // %9 "w"(_k01), // %10 "w"(_k01n) // %11 : "cc", "memory", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { #if __ARM_NEON #if __aarch64__ float32x4_t _output0_tm = vld1q_f32(output0_tm); float32x4_t _output0_tmn = vld1q_f32(output0_tm+4); float32x4_t _r0 = vld1q_f32(r0); float32x4_t _r0n = vld1q_f32(r0+4); float32x4_t _r1 = vld1q_f32(r1); float32x4_t _r1n = vld1q_f32(r1+4); r0 += 8; r1 += 8; _output0_tm = vmlaq_f32(_output0_tm, _r0, _k00); _output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k00n); _output0_tm = vmlaq_f32(_output0_tm, _r1, _k01); _output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k01n); vst1q_f32(output0_tm, _output0_tm); vst1q_f32(output0_tm+4, _output0_tmn); output0_tm += 8; #else asm volatile( "pld [%1, #256] \n" "vld1.f32 {d24-d27}, [%1 :128]! \n"// q12 q13 = _r0 "pld [%0, #256] \n" "vld1.f32 {d16-d19}, [%0 :128] \n"// q8 q9 = _output0_tm "vmla.f32 q8, q12, %q6 \n" "vmla.f32 q9, q13, %q7 \n" "pld [%2, #256] \n" "vld1.f32 {d28-d31}, [%2 :128]! \n"// q14 q15 = _r1 "vmla.f32 q8, q14, %q8 \n" "vmla.f32 q9, q15, %q9 \n" "vst1.f32 {d16-d19}, [%0 :128]! \n" : "=r"(output0_tm), // %0 "=r"(r0), // %1 "=r"(r1) // %2 : "0"(output0_tm), "1"(r0), "2"(r1), "w"(_k00), // %6 "w"(_k00n), // %7 "w"(_k01), // %8 "w"(_k01n) // %9 : "cc", "memory", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); #endif // __aarch64__ #else for (int m=0; m<8; m++) { output0_tm[m] += r0[m] * k00[m]; output0_tm[m] += r1[m] * k01[m]; } r0 += 8; r1 += 8; output0_tm += 8; #endif // __ARM_NEON } #if __ARM_NEON #if __aarch64__ k00 += 8; k01 += 8; #endif // __aarch64__ #else k00 += 8; k01 += 8; #endif // __ARM_NEON } } for (; q<inch; q++) { const float* r0 = bottom_blob_tm.channel(q); const float* k00 = kernel0_tm.row(q); float* output0_tm = out0_tm; for (int r=0; r<8; r++) { #if __ARM_NEON #if __aarch64__ float32x4_t _k00 = vld1q_f32(k00); float32x4_t _k00n = vld1q_f32(k00+4); #else float32x4_t _k00; float32x4_t _k00n; asm volatile( "pld [%0, #256] \n" "vld1.f32 {%e1-%f1}, [%0 :128]! \n" "vld1.f32 {%e2-%f2}, [%0 :128]! \n" : "=r"(k00), // %0 "=w"(_k00), // %1 "=w"(_k00n) // %2 : "0"(k00) : "cc", "memory" ); #endif // __aarch64__ #endif // __ARM_NEON // tile for (int i=0; i<tiles; i++) { #if __ARM_NEON #if __aarch64__ float32x4_t _output0_tm = vld1q_f32(output0_tm); float32x4_t _output0_tmn = vld1q_f32(output0_tm+4); float32x4_t _r0 = vld1q_f32(r0); float32x4_t _r0n = vld1q_f32(r0+4); r0 += 8; _output0_tm = vmlaq_f32(_output0_tm, _r0, _k00); _output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k00n); vst1q_f32(output0_tm, _output0_tm); vst1q_f32(output0_tm+4, _output0_tmn); output0_tm += 8; #else asm volatile( "pld [%1, #256] \n" "vld1.f32 {d24-d27}, [%1 :128]! \n"// q12 q13 = _r0 "pld [%0, #256] \n" "vld1.f32 {d16-d19}, [%0 :128] \n"// q8 q9 = _output0_tm "vmla.f32 q8, q12, %q4 \n" "vmla.f32 q9, q13, %q5 \n" "vst1.f32 {d16-d19}, [%0 :128]! \n" : "=r"(output0_tm), // %0 "=r"(r0) // %1 : "0"(output0_tm), "1"(r0), "w"(_k00), // %4 "w"(_k00n) // %5 : "cc", "memory", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); #endif // __aarch64__ #else for (int m=0; m<8; m++) { output0_tm[m] += r0[m] * k00[m]; } r0 += 8; output0_tm += 8; #endif // __ARM_NEON } #if __ARM_NEON #if __aarch64__ k00 += 8; #endif // __aarch64__ #else k00 += 8; #endif // __ARM_NEON } } } } bottom_blob_tm = Mat(); // END dot // BEGIN transform output Mat top_blob_bordered; top_blob_bordered.create(outw, outh, outch); { // const float otm[6][8] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 32.0f, 32.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 16.0f,-16.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 8.0f, 8.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 4.0f, -4.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 16.0f, 16.0f, 2.0f, 2.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 32.0f, -32.0f, 1.0f, -1.0f, 1.0f} // }; // 0 = r0 + (r1 + r2) + (r3 + r4) + (r5 + r6) * 32 // 1 = (r1 - r2) + (r3 - r4) * 2 + (r5 - r6) * 16 // 2 = (r1 + r2) + (r3 + r4) * 4 + (r5 + r6) * 8 // 3 = (r1 - r2) + (r3 - r4) * 8 + (r5 - r6) * 4 // 4 = (r1 + r2) + (r3 + r4) * 16+ (r5 + r6) * 2 // 5 = r7 + (r1 - r2) + (r3 - r4) * 32+ (r5 - r6) int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = w_tm/8 * h_tm/8; #pragma omp parallel for for (int p = 0; p<outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob_bordered.channel(p); const float bias0 = bias ? bias[p] : 0.f; float tmp[6][8]; // tile for (int i=0; i<outh/6; i++) { for (int j=0; j<outw/6; j++) { const float* output0_tm0 = out0_tm.row(i * w_tm/8 + j); const float* output0_tm1 = out0_tm.row(i * w_tm/8 + j + tiles); const float* output0_tm2 = out0_tm.row(i * w_tm/8 + j + tiles * 2); const float* output0_tm3 = out0_tm.row(i * w_tm/8 + j + tiles * 3); const float* output0_tm4 = out0_tm.row(i * w_tm/8 + j + tiles * 4); const float* output0_tm5 = out0_tm.row(i * w_tm/8 + j + tiles * 5); const float* output0_tm6 = out0_tm.row(i * w_tm/8 + j + tiles * 6); const float* output0_tm7 = out0_tm.row(i * w_tm/8 + j + tiles * 7); float* output0 = out0.row(i * 6) + j * 6; const float* output0_tms[8] = { output0_tm0, output0_tm1, output0_tm2, output0_tm3, output0_tm4, output0_tm5, output0_tm6, output0_tm7 }; for (int m=0; m<8; m++) { const float* output0_tm = output0_tms[m]; float tmp024a = output0_tm[1] + output0_tm[2]; float tmp135a = output0_tm[1] - output0_tm[2]; float tmp024b = output0_tm[3] + output0_tm[4]; float tmp135b = output0_tm[3] - output0_tm[4]; float tmp024c = output0_tm[5] + output0_tm[6]; float tmp135c = output0_tm[5] - output0_tm[6]; tmp[0][m] = output0_tm[0] + tmp024a + tmp024b + tmp024c * 32; tmp[2][m] = tmp024a + tmp024b * 4 + tmp024c * 8; tmp[4][m] = tmp024a + tmp024b * 16 + tmp024c + tmp024c; tmp[1][m] = tmp135a + tmp135b + tmp135b + tmp135c * 16; tmp[3][m] = tmp135a + tmp135b * 8 + tmp135c * 4; tmp[5][m] = output0_tm[7] + tmp135a + tmp135b * 32 + tmp135c; } for (int m=0; m<6; m++) { const float* tmp0 = tmp[m]; float tmp024a = tmp0[1] + tmp0[2]; float tmp135a = tmp0[1] - tmp0[2]; float tmp024b = tmp0[3] + tmp0[4]; float tmp135b = tmp0[3] - tmp0[4]; float tmp024c = tmp0[5] + tmp0[6]; float tmp135c = tmp0[5] - tmp0[6]; output0[0] = bias0 + tmp0[0] + tmp024a + tmp024b + tmp024c * 32; output0[2] = bias0 + tmp024a + tmp024b * 4 + tmp024c * 8; output0[4] = bias0 + tmp024a + tmp024b * 16 + tmp024c + tmp024c; output0[1] = bias0 + tmp135a + tmp135b + tmp135b + tmp135c * 16; output0[3] = bias0 + tmp135a + tmp135b * 8 + tmp135c * 4; output0[5] = bias0 + tmp0[7] + tmp135a + tmp135b * 32 + tmp135c; output0 += outw; } } } } } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w); } #endif static void conv3x3s1_winograd64_neon4(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& _bias) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; // pad to 6n+2 Mat bottom_blob_bordered = bottom_blob; outw = (outw + 5) / 6 * 6; outh = (outh + 5) / 6 * 6; w = outw + 2; h = outh + 2; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, 0, 0.f); const float* bias = _bias; // BEGIN transform input Mat bottom_blob_tm; { int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; bottom_blob_tm.create(4, 16 * w_tm/8 * h_tm/8, inch); const int tiles = w_tm/8 * h_tm/8; // const float itm[8][8] = { // {1.0f, 0.0f, -5.25f, 0.00f, 5.25f, 0.00f, -1.0f, 0.0f}, // // {0.0f, 1.0f, 1.00f, -4.25f, -4.25f, 1.00f, 1.0f, 0.0f}, // {0.0f, -1.0f, 1.00f, 4.25f, -4.25f, -1.00f, 1.0f, 0.0f}, // // {0.0f, 0.5f, 0.25f, -2.50f, -1.25f, 2.00f, 1.0f, 0.0f}, // {0.0f, -0.5f, 0.25f, 2.50f, -1.25f, -2.00f, 1.0f, 0.0f}, // // {0.0f, 2.0f, 4.00f, -2.50f, -5.00f, 0.50f, 1.0f, 0.0f}, // {0.0f, -2.0f, 4.00f, 2.50f, -5.00f, -0.50f, 1.0f, 0.0f}, // // {0.0f, -1.0f, 0.00f, 5.25f, 0.00f, -5.25f, 0.0f, 1.0f} // }; // 0 = r00 - r06 + (r04 - r02) * 5.25 // 7 = r07 - r01 + (r03 - r05) * 5.25 // 1 = (r02 + r06 - r04 * 4.25) + (r01 - r03 * 4.25 + r05) // 2 = (r02 + r06 - r04 * 4.25) - (r01 - r03 * 4.25 + r05) // 3 = (r06 + r02 * 0.25 - r04 * 1.25) + (r01 * 0.5 - r03 * 2.5 + r05 * 2) // 4 = (r06 + r02 * 0.25 - r04 * 1.25) - (r01 * 0.5 - r03 * 2.5 + r05 * 2) // reuse r04 * 1.25 // reuse r03 * 2.5 // 5 = (r06 + (r02 - r04 * 1.25) * 4) + (r01 * 2 - r03 * 2.5 + r05 * 0.5) // 6 = (r06 + (r02 - r04 * 1.25) * 4) - (r01 * 2 - r03 * 2.5 + r05 * 0.5) #if __ARM_NEON const float coeff[8] = { 0.25f, 0.5f, -1.25f, 2.f, -2.5f, 4.f, 4.25f, 5.25f }; float32x4_t _coeff0 = vld1q_f32(coeff); float32x4_t _coeff1 = vld1q_f32(coeff+4); #endif // __ARM_NEON #pragma omp parallel for for (int q = 0; q<inch; q++) { const Mat img0 = bottom_blob_bordered.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); float tmp[8][8]; // tile for (int i=0; i<h_tm/8; i++) { for (int j=0; j<w_tm/8; j++) { #if __ARM_NEON const float* r0 = img0.row(i * 6) + j * 6; const float* r1 = r0 + w; const float* r2 = r0 + w*2; const float* r3 = r0 + w*3; #if __aarch64__ for (int m=0; m+3<8; m+=4) { float32x4_t _r0_0123 = vld1q_f32(r0); float32x4_t _r0_4567 = vld1q_f32(r0+4); float32x4_t _r1_0123 = vld1q_f32(r1); float32x4_t _r1_4567 = vld1q_f32(r1+4); float32x4_t _r2_0123 = vld1q_f32(r2); float32x4_t _r2_4567 = vld1q_f32(r2+4); float32x4_t _r3_0123 = vld1q_f32(r3); float32x4_t _r3_4567 = vld1q_f32(r3+4); float32x4x2_t _r01_00221133 = vtrnq_f32(_r0_0123, _r1_0123); float32x4x2_t _r01_44665577 = vtrnq_f32(_r0_4567, _r1_4567); float32x4x2_t _r23_00221133 = vtrnq_f32(_r2_0123, _r3_0123); float32x4x2_t _r23_44665577 = vtrnq_f32(_r2_4567, _r3_4567); // no vswp intrinsic :( float32x4_t _r_00 = vcombine_f32(vget_low_f32(_r01_00221133.val[0]), vget_low_f32(_r23_00221133.val[0])); float32x4_t _r_11 = vcombine_f32(vget_low_f32(_r01_00221133.val[1]), vget_low_f32(_r23_00221133.val[1])); float32x4_t _r_22 = vcombine_f32(vget_high_f32(_r01_00221133.val[0]), vget_high_f32(_r23_00221133.val[0])); float32x4_t _r_33 = vcombine_f32(vget_high_f32(_r01_00221133.val[1]), vget_high_f32(_r23_00221133.val[1])); float32x4_t _r_44 = vcombine_f32(vget_low_f32(_r01_44665577.val[0]), vget_low_f32(_r23_44665577.val[0])); float32x4_t _r_55 = vcombine_f32(vget_low_f32(_r01_44665577.val[1]), vget_low_f32(_r23_44665577.val[1])); float32x4_t _r_66 = vcombine_f32(vget_high_f32(_r01_44665577.val[0]), vget_high_f32(_r23_44665577.val[0])); float32x4_t _r_77 = vcombine_f32(vget_high_f32(_r01_44665577.val[1]), vget_high_f32(_r23_44665577.val[1])); float32x4_t _r_0_m_6 = vsubq_f32(_r_00, _r_66); float32x4_t _r_7_m_1 = vsubq_f32(_r_77, _r_11); float32x4_t _r_4_m_2 = vsubq_f32(_r_44, _r_22); float32x4_t _r_3_m_5 = vsubq_f32(_r_33, _r_55); float32x4_t _tmp0 = vmlaq_lane_f32(_r_0_m_6, _r_4_m_2, vget_high_f32(_coeff1), 1); float32x4_t _tmp7 = vmlaq_lane_f32(_r_7_m_1, _r_3_m_5, vget_high_f32(_coeff1), 1); vst1q_f32(&tmp[0][m], _tmp0); vst1q_f32(&tmp[7][m], _tmp7); float32x4_t _r_2_a_6 = vaddq_f32(_r_22, _r_66); float32x4_t _r_1_a_5 = vaddq_f32(_r_11, _r_55); float32x4_t _tmp12a = vmlsq_lane_f32(_r_2_a_6, _r_44, vget_high_f32(_coeff1), 0); float32x4_t _tmp12b = vmlsq_lane_f32(_r_1_a_5, _r_33, vget_high_f32(_coeff1), 0); float32x4_t _tmp1 = vaddq_f32(_tmp12a, _tmp12b); float32x4_t _tmp2 = vsubq_f32(_tmp12a, _tmp12b); vst1q_f32(&tmp[1][m], _tmp1); vst1q_f32(&tmp[2][m], _tmp2); float32x4_t _r_4_x_c = vmulq_lane_f32(_r_44, vget_high_f32(_coeff0), 0); float32x4_t _r_3_x_c = vmulq_lane_f32(_r_33, vget_low_f32(_coeff1), 0); float32x4_t _tmp34a = vaddq_f32(_r_66, _r_4_x_c); _tmp34a = vmlaq_lane_f32(_tmp34a, _r_22, vget_low_f32(_coeff0), 0); float32x4_t _tmp34b = vmlaq_lane_f32(_r_3_x_c, _r_11, vget_low_f32(_coeff0), 1); _tmp34b = vmlaq_lane_f32(_tmp34b, _r_55, vget_high_f32(_coeff0), 1); float32x4_t _tmp3 = vaddq_f32(_tmp34a, _tmp34b); float32x4_t _tmp4 = vsubq_f32(_tmp34a, _tmp34b); vst1q_f32(&tmp[3][m], _tmp3); vst1q_f32(&tmp[4][m], _tmp4); // reuse r04 * 1.25 // reuse r03 * 2.5 float32x4_t _r_2_a_4c = vaddq_f32(_r_22, _r_4_x_c); float32x4_t _tmp56a = vmlaq_lane_f32(_r_66, _r_2_a_4c, vget_low_f32(_coeff1), 1); float32x4_t _tmp56b = vmlaq_lane_f32(_r_3_x_c, _r_11, vget_high_f32(_coeff0), 1); _tmp56b = vmlaq_lane_f32(_tmp56b, _r_55, vget_low_f32(_coeff0), 1); float32x4_t _tmp5 = vaddq_f32(_tmp56a, _tmp56b); float32x4_t _tmp6 = vsubq_f32(_tmp56a, _tmp56b); vst1q_f32(&tmp[5][m], _tmp5); vst1q_f32(&tmp[6][m], _tmp6); r0 += w*4; r1 += w*4; r2 += w*4; r3 += w*4; } const float* t0 = tmp[0]; const float* t1 = tmp[1]; const float* t2 = tmp[2]; const float* t3 = tmp[3]; float* r0_tm0_0 = img0_tm.row(i * w_tm/8 + j); float* r0_tm0_4 = img0_tm.row(i * w_tm/8 + j + tiles); float* r0_tm1_0 = img0_tm.row(i * w_tm/8 + j + tiles*2); float* r0_tm1_4 = img0_tm.row(i * w_tm/8 + j + tiles*3); float* r0_tm2_0 = img0_tm.row(i * w_tm/8 + j + tiles*4); float* r0_tm2_4 = img0_tm.row(i * w_tm/8 + j + tiles*5); float* r0_tm3_0 = img0_tm.row(i * w_tm/8 + j + tiles*6); float* r0_tm3_4 = img0_tm.row(i * w_tm/8 + j + tiles*7); for (int m=0; m+3<8; m+=4) { float32x4_t _t0_0123 = vld1q_f32(t0); float32x4_t _t0_4567 = vld1q_f32(t0+4); float32x4_t _t1_0123 = vld1q_f32(t1); float32x4_t _t1_4567 = vld1q_f32(t1+4); float32x4_t _t2_0123 = vld1q_f32(t2); float32x4_t _t2_4567 = vld1q_f32(t2+4); float32x4_t _t3_0123 = vld1q_f32(t3); float32x4_t _t3_4567 = vld1q_f32(t3+4); float32x4x2_t _t01_00221133 = vtrnq_f32(_t0_0123, _t1_0123); float32x4x2_t _t01_44665577 = vtrnq_f32(_t0_4567, _t1_4567); float32x4x2_t _t23_00221133 = vtrnq_f32(_t2_0123, _t3_0123); float32x4x2_t _t23_44665577 = vtrnq_f32(_t2_4567, _t3_4567); // no vswp intrinsic :( float32x4_t _t_00 = vcombine_f32(vget_low_f32(_t01_00221133.val[0]), vget_low_f32(_t23_00221133.val[0])); float32x4_t _t_11 = vcombine_f32(vget_low_f32(_t01_00221133.val[1]), vget_low_f32(_t23_00221133.val[1])); float32x4_t _t_22 = vcombine_f32(vget_high_f32(_t01_00221133.val[0]), vget_high_f32(_t23_00221133.val[0])); float32x4_t _t_33 = vcombine_f32(vget_high_f32(_t01_00221133.val[1]), vget_high_f32(_t23_00221133.val[1])); float32x4_t _t_44 = vcombine_f32(vget_low_f32(_t01_44665577.val[0]), vget_low_f32(_t23_44665577.val[0])); float32x4_t _t_55 = vcombine_f32(vget_low_f32(_t01_44665577.val[1]), vget_low_f32(_t23_44665577.val[1])); float32x4_t _t_66 = vcombine_f32(vget_high_f32(_t01_44665577.val[0]), vget_high_f32(_t23_44665577.val[0])); float32x4_t _t_77 = vcombine_f32(vget_high_f32(_t01_44665577.val[1]), vget_high_f32(_t23_44665577.val[1])); float32x4_t _t_0_m_6 = vsubq_f32(_t_00, _t_66); float32x4_t _t_7_m_1 = vsubq_f32(_t_77, _t_11); float32x4_t _t_4_m_2 = vsubq_f32(_t_44, _t_22); float32x4_t _t_3_m_5 = vsubq_f32(_t_33, _t_55); float32x4_t _r0_tm_0_0 = vmlaq_lane_f32(_t_0_m_6, _t_4_m_2, vget_high_f32(_coeff1), 1); float32x4_t _r0_tm_4_3 = vmlaq_lane_f32(_t_7_m_1, _t_3_m_5, vget_high_f32(_coeff1), 1); r0_tm0_0[0] = vgetq_lane_f32(_r0_tm_0_0, 0); r0_tm1_0[0] = vgetq_lane_f32(_r0_tm_0_0, 1); r0_tm2_0[0] = vgetq_lane_f32(_r0_tm_0_0, 2); r0_tm3_0[0] = vgetq_lane_f32(_r0_tm_0_0, 3); r0_tm0_4[3] = vgetq_lane_f32(_r0_tm_4_3, 0); r0_tm1_4[3] = vgetq_lane_f32(_r0_tm_4_3, 1); r0_tm2_4[3] = vgetq_lane_f32(_r0_tm_4_3, 2); r0_tm3_4[3] = vgetq_lane_f32(_r0_tm_4_3, 3); float32x4_t _t_2_m_6 = vaddq_f32(_t_22, _t_66); float32x4_t _t_1_m_5 = vaddq_f32(_t_11, _t_55); float32x4_t _tmp12a = vmlsq_lane_f32(_t_2_m_6, _t_44, vget_high_f32(_coeff1), 0); float32x4_t _tmp12b = vmlsq_lane_f32(_t_1_m_5, _t_33, vget_high_f32(_coeff1), 0); float32x4_t _r0_tm_0_1 = vaddq_f32(_tmp12a, _tmp12b); float32x4_t _r0_tm_0_2 = vsubq_f32(_tmp12a, _tmp12b); r0_tm0_0[1] = vgetq_lane_f32(_r0_tm_0_1, 0); r0_tm1_0[1] = vgetq_lane_f32(_r0_tm_0_1, 1); r0_tm2_0[1] = vgetq_lane_f32(_r0_tm_0_1, 2); r0_tm3_0[1] = vgetq_lane_f32(_r0_tm_0_1, 3); r0_tm0_0[2] = vgetq_lane_f32(_r0_tm_0_2, 0); r0_tm1_0[2] = vgetq_lane_f32(_r0_tm_0_2, 1); r0_tm2_0[2] = vgetq_lane_f32(_r0_tm_0_2, 2); r0_tm3_0[2] = vgetq_lane_f32(_r0_tm_0_2, 3); float32x4_t _t_4_x_c = vmulq_lane_f32(_t_44, vget_high_f32(_coeff0), 0); float32x4_t _t_3_x_c = vmulq_lane_f32(_t_33, vget_low_f32(_coeff1), 0); float32x4_t _tmp34a = vaddq_f32(_t_66, _t_4_x_c); _tmp34a = vmlaq_lane_f32(_tmp34a, _t_22, vget_low_f32(_coeff0), 0); float32x4_t _tmp34b = vmlaq_lane_f32(_t_3_x_c, _t_11, vget_low_f32(_coeff0), 1); _tmp34b = vmlaq_lane_f32(_tmp34b, _t_55, vget_high_f32(_coeff0), 1); float32x4_t _r0_tm_0_3 = vaddq_f32(_tmp34a, _tmp34b); float32x4_t _r0_tm_4_0 = vsubq_f32(_tmp34a, _tmp34b); r0_tm0_0[3] = vgetq_lane_f32(_r0_tm_0_3, 0); r0_tm1_0[3] = vgetq_lane_f32(_r0_tm_0_3, 1); r0_tm2_0[3] = vgetq_lane_f32(_r0_tm_0_3, 2); r0_tm3_0[3] = vgetq_lane_f32(_r0_tm_0_3, 3); r0_tm0_4[0] = vgetq_lane_f32(_r0_tm_4_0, 0); r0_tm1_4[0] = vgetq_lane_f32(_r0_tm_4_0, 1); r0_tm2_4[0] = vgetq_lane_f32(_r0_tm_4_0, 2); r0_tm3_4[0] = vgetq_lane_f32(_r0_tm_4_0, 3); float32x4_t _t_2_a_4c = vaddq_f32(_t_22, _t_4_x_c); float32x4_t _tmp56a = vmlaq_lane_f32(_t_66, _t_2_a_4c, vget_low_f32(_coeff1), 1); float32x4_t _tmp56b = vmlaq_lane_f32(_t_3_x_c, _t_11, vget_high_f32(_coeff0), 1); _tmp56b = vmlaq_lane_f32(_tmp56b, _t_55, vget_low_f32(_coeff0), 1); float32x4_t _r0_tm_4_1 = vaddq_f32(_tmp56a, _tmp56b); float32x4_t _r0_tm_4_2 = vsubq_f32(_tmp56a, _tmp56b); r0_tm0_4[1] = vgetq_lane_f32(_r0_tm_4_1, 0); r0_tm1_4[1] = vgetq_lane_f32(_r0_tm_4_1, 1); r0_tm2_4[1] = vgetq_lane_f32(_r0_tm_4_1, 2); r0_tm3_4[1] = vgetq_lane_f32(_r0_tm_4_1, 3); r0_tm0_4[2] = vgetq_lane_f32(_r0_tm_4_2, 0); r0_tm1_4[2] = vgetq_lane_f32(_r0_tm_4_2, 1); r0_tm2_4[2] = vgetq_lane_f32(_r0_tm_4_2, 2); r0_tm3_4[2] = vgetq_lane_f32(_r0_tm_4_2, 3); t0 += 8*4; t1 += 8*4; t2 += 8*4; t3 += 8*4; r0_tm0_0 += img0_tm.w*tiles*2*4; r0_tm0_4 += img0_tm.w*tiles*2*4; r0_tm1_0 += img0_tm.w*tiles*2*4; r0_tm1_4 += img0_tm.w*tiles*2*4; r0_tm2_0 += img0_tm.w*tiles*2*4; r0_tm2_4 += img0_tm.w*tiles*2*4; r0_tm3_0 += img0_tm.w*tiles*2*4; r0_tm3_4 += img0_tm.w*tiles*2*4; } #else // __aarch64__ float* t0 = tmp[0]; float* t1 = tmp[1]; float* t2 = tmp[2]; float* t3 = tmp[3]; float* t4 = tmp[4]; float* t5 = tmp[5]; float* t6 = tmp[6]; float* t7 = tmp[7]; int stepw = w*4*4; asm volatile( // loop0 "vld1.f32 {d16-d19}, [%8], %26 \n" "vld1.f32 {d20-d23}, [%9], %26 \n" "vld1.f32 {d24-d27}, [%10], %26 \n" "vtrn.32 q8, q10 \n" "vld1.f32 {d28-d31}, [%11], %26 \n" "vtrn.32 q9, q11 \n" "vtrn.32 q12, q14 \n" "vtrn.32 q13, q15 \n" "vswp d17, d24 \n" "vswp d19, d26 \n" "vswp d21, d28 \n"// q8 = 00 q9 = 44 q10 = 11 q11 = 55 "vswp d23, d30 \n"// q12 = 22 q13 = 66 q14 = 33 q15 = 77 "vsub.f32 q2, q8, q13 \n" "vsub.f32 q3, q9, q12 \n" "vadd.f32 q4, q12, q13 \n" "vadd.f32 q5, q10, q11 \n" "vmla.f32 q2, q3, %f25[1] \n" "vmul.f32 q7, q14, %e25[0] \n"// q7 = _r_3_x_c "vmul.f32 q6, q9, %f24[0] \n"// q6 = _r_4_x_c "vmls.f32 q4, q9, %f25[0] \n" "vmls.f32 q5, q14, %f25[0] \n" "vst1.f32 {d4-d5}, [%0]! \n"// tmp[0][m] "vmov q3, q7 \n"// use q7 "vadd.f32 q2, q13, q6 \n"// use q6 "vmla.f32 q3, q10, %e24[1] \n" "vadd.f32 q8, q4, q5 \n" "vsub.f32 q9, q4, q5 \n" "vmov q5, q7 \n"// use q7 "vadd.f32 q6, q12, q6 \n"// use q6 "vmla.f32 q5, q10, %f24[1] \n" "vmov q4, q13 \n" "vmla.f32 q2, q12, %e24[0] \n" "vmla.f32 q3, q11, %f24[1] \n" "vst1.f32 {d16-d17}, [%1]! \n"// tmp[1][m] "vmla.f32 q4, q6, %e25[1] \n" "vmla.f32 q5, q11, %e24[1] \n" "vst1.f32 {d18-d19}, [%2]! \n"// tmp[2][m] "vadd.f32 q8, q2, q3 \n" "vsub.f32 q9, q2, q3 \n" "vsub.f32 q6, q15, q10 \n" "vsub.f32 q7, q14, q11 \n" "vadd.f32 q2, q4, q5 \n" "vsub.f32 q3, q4, q5 \n" "vst1.f32 {d16-d17}, [%3]! \n"// tmp[3][m] "vst1.f32 {d18-d19}, [%4]! \n"// tmp[4][m] "vmla.f32 q6, q7, %f25[1] \n" "vst1.f32 {d4-d5}, [%5]! \n"// tmp[5][m] "vst1.f32 {d6-d7}, [%6]! \n"// tmp[6][m] "vst1.f32 {d12-d13}, [%7]! \n"// tmp[7][m] // loop1 "vld1.f32 {d16-d19}, [%8] \n" "vld1.f32 {d20-d23}, [%9] \n" "vld1.f32 {d24-d27}, [%10] \n" "vtrn.32 q8, q10 \n" "vld1.f32 {d28-d31}, [%11] \n" "vtrn.32 q9, q11 \n" "vtrn.32 q12, q14 \n" "vtrn.32 q13, q15 \n" "vswp d17, d24 \n" "vswp d19, d26 \n" "vswp d21, d28 \n"// q8 = 00 q9 = 44 q10 = 11 q11 = 55 "vswp d23, d30 \n"// q12 = 22 q13 = 66 q14 = 33 q15 = 77 "vsub.f32 q2, q8, q13 \n" "vsub.f32 q3, q9, q12 \n" "vadd.f32 q4, q12, q13 \n" "vadd.f32 q5, q10, q11 \n" "vmla.f32 q2, q3, %f25[1] \n" "vmul.f32 q7, q14, %e25[0] \n"// q7 = _r_3_x_c "vmul.f32 q6, q9, %f24[0] \n"// q6 = _r_4_x_c "vmls.f32 q4, q9, %f25[0] \n" "vmls.f32 q5, q14, %f25[0] \n" "vst1.f32 {d4-d5}, [%0]! \n"// tmp[0][m] "vmov q3, q7 \n"// use q7 "vadd.f32 q2, q13, q6 \n"// use q6 "vmla.f32 q3, q10, %e24[1] \n" "vadd.f32 q8, q4, q5 \n" "vsub.f32 q9, q4, q5 \n" "vmov q5, q7 \n"// use q7 "vadd.f32 q6, q12, q6 \n"// use q6 "vmla.f32 q5, q10, %f24[1] \n" "vmov q4, q13 \n" "vmla.f32 q2, q12, %e24[0] \n" "vmla.f32 q3, q11, %f24[1] \n" "vst1.f32 {d16-d17}, [%1]! \n"// tmp[1][m] "vmla.f32 q4, q6, %e25[1] \n" "vmla.f32 q5, q11, %e24[1] \n" "vst1.f32 {d18-d19}, [%2]! \n"// tmp[2][m] "vadd.f32 q8, q2, q3 \n" "vsub.f32 q9, q2, q3 \n" "vsub.f32 q6, q15, q10 \n" "vsub.f32 q7, q14, q11 \n" "vadd.f32 q2, q4, q5 \n" "vsub.f32 q3, q4, q5 \n" "vst1.f32 {d16-d17}, [%3]! \n"// tmp[3][m] "vst1.f32 {d18-d19}, [%4]! \n"// tmp[4][m] "vmla.f32 q6, q7, %f25[1] \n" "vst1.f32 {d4-d5}, [%5]! \n"// tmp[5][m] "vst1.f32 {d6-d7}, [%6]! \n"// tmp[6][m] "vst1.f32 {d12-d13}, [%7]! \n"// tmp[7][m] : "=r"(t0), // %0 "=r"(t1), // %1 "=r"(t2), // %2 "=r"(t3), // %3 "=r"(t4), // %4 "=r"(t5), // %5 "=r"(t6), // %6 "=r"(t7), // %7 "=r"(r0), // %8 "=r"(r1), // %9 "=r"(r2), // %10 "=r"(r3) // %11 : "0"(t0), "1"(t1), "2"(t2), "3"(t3), "4"(t4), "5"(t5), "6"(t6), "7"(t7), "8"(r0), "9"(r1), "10"(r2), "11"(r3), "w"(_coeff0), // %24 "w"(_coeff1), // %25 "r"(stepw) // %26 : "memory", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); t0 = tmp[0]; t1 = tmp[1]; t2 = tmp[2]; t3 = tmp[3]; float* r0_tm0_0 = img0_tm.row(i * w_tm/8 + j); float* r0_tm0_4 = img0_tm.row(i * w_tm/8 + j + tiles); float* r0_tm1_0 = img0_tm.row(i * w_tm/8 + j + tiles*2); float* r0_tm1_4 = img0_tm.row(i * w_tm/8 + j + tiles*3); float* r0_tm2_0 = img0_tm.row(i * w_tm/8 + j + tiles*4); float* r0_tm2_4 = img0_tm.row(i * w_tm/8 + j + tiles*5); float* r0_tm3_0 = img0_tm.row(i * w_tm/8 + j + tiles*6); float* r0_tm3_4 = img0_tm.row(i * w_tm/8 + j + tiles*7); int step = img0_tm.w*tiles*2*4*4; asm volatile( // loop0 "vld1.f32 {d16-d19}, [%8] \n" "add %8, %8, #128 \n" "vld1.f32 {d20-d23}, [%9] \n" "add %9, %9, #128 \n" "vld1.f32 {d24-d27}, [%10] \n" "add %10, %10, #128 \n" "vtrn.32 q8, q10 \n" "vld1.f32 {d28-d31}, [%11] \n" "add %11, %11, #128 \n" "vtrn.32 q9, q11 \n" "vtrn.32 q12, q14 \n" "vtrn.32 q13, q15 \n" "vswp d17, d24 \n" "vswp d19, d26 \n" "vswp d21, d28 \n"// q8 = 00 q9 = 44 q10 = 11 q11 = 55 "vswp d23, d30 \n"// q12 = 22 q13 = 66 q14 = 33 q15 = 77 "vsub.f32 q2, q8, q13 \n" "vsub.f32 q3, q9, q12 \n" "vadd.f32 q4, q12, q13 \n" "vadd.f32 q5, q10, q11 \n" "vmla.f32 q2, q3, %f25[1] \n" "vmul.f32 q7, q14, %e25[0] \n"// q7 = _r_3_x_c "vmul.f32 q6, q9, %f24[0] \n"// q6 = _r_4_x_c "vmls.f32 q4, q9, %f25[0] \n" "vmls.f32 q5, q14, %f25[0] \n" "vst1.f32 {d4[0]}, [%0]! \n" "vst1.f32 {d4[1]}, [%2]! \n" "vmov q3, q7 \n"// use q7 "vst1.f32 {d5[0]}, [%4]! \n" "vst1.f32 {d5[1]}, [%6]! \n" "vadd.f32 q2, q13, q6 \n"// use q6 "vmla.f32 q3, q10, %e24[1] \n" "vadd.f32 q8, q4, q5 \n" "vsub.f32 q9, q4, q5 \n" "vmov q5, q7 \n"// use q7 "vadd.f32 q6, q12, q6 \n"// use q6 "vmla.f32 q5, q10, %f24[1] \n" "vmov q4, q13 \n" "vmla.f32 q2, q12, %e24[0] \n" "vmla.f32 q3, q11, %f24[1] \n" "vst1.f32 {d16[0]}, [%0]! \n" "vst1.f32 {d16[1]}, [%2]! \n" "vmla.f32 q4, q6, %e25[1] \n" "vst1.f32 {d17[0]}, [%4]! \n" "vst1.f32 {d17[1]}, [%6]! \n" "vmla.f32 q5, q11, %e24[1] \n" "vst1.f32 {d18[0]}, [%0]! \n" "vst1.f32 {d18[1]}, [%2]! \n" "vadd.f32 q8, q2, q3 \n" "vst1.f32 {d19[0]}, [%4]! \n" "vst1.f32 {d19[1]}, [%6]! \n" "vsub.f32 q9, q2, q3 \n" "vsub.f32 q6, q15, q10 \n" "vsub.f32 q7, q14, q11 \n" "vadd.f32 q2, q4, q5 \n" "vsub.f32 q3, q4, q5 \n" "vst1.f32 {d16[0]}, [%0], %26 \n" "vst1.f32 {d16[1]}, [%2], %26 \n" "vmla.f32 q6, q7, %f25[1] \n" "vst1.f32 {d17[0]}, [%4], %26 \n" "vst1.f32 {d17[1]}, [%6], %26 \n" "vtrn.32 q9, q2 \n" "vtrn.32 q3, q6 \n" "sub %0, %0, #12 \n" "sub %2, %2, #12 \n" "sub %4, %4, #12 \n" "sub %6, %6, #12 \n" "vswp d19, d6 \n" "vswp d5, d12 \n" "vst1.f32 {d18-d19}, [%1], %26 \n" "vst1.f32 {d4-d5}, [%3], %26 \n" "vst1.f32 {d6-d7}, [%5], %26 \n" "vst1.f32 {d12-d13}, [%7], %26 \n" // loop1 "vld1.f32 {d16-d19}, [%8] \n" "vld1.f32 {d20-d23}, [%9] \n" "vld1.f32 {d24-d27}, [%10] \n" "vtrn.32 q8, q10 \n" "vld1.f32 {d28-d31}, [%11] \n" "vtrn.32 q9, q11 \n" "vtrn.32 q12, q14 \n" "vtrn.32 q13, q15 \n" "vswp d17, d24 \n" "vswp d19, d26 \n" "vswp d21, d28 \n"// q8 = 00 q9 = 44 q10 = 11 q11 = 55 "vswp d23, d30 \n"// q12 = 22 q13 = 66 q14 = 33 q15 = 77 "vsub.f32 q2, q8, q13 \n" "vsub.f32 q3, q9, q12 \n" "vadd.f32 q4, q12, q13 \n" "vadd.f32 q5, q10, q11 \n" "vmla.f32 q2, q3, %f25[1] \n" "vmul.f32 q7, q14, %e25[0] \n"// q7 = _r_3_x_c "vmul.f32 q6, q9, %f24[0] \n"// q6 = _r_4_x_c "vmls.f32 q4, q9, %f25[0] \n" "vmls.f32 q5, q14, %f25[0] \n" "vst1.f32 {d4[0]}, [%0]! \n" "vst1.f32 {d4[1]}, [%2]! \n" "vmov q3, q7 \n"// use q7 "vst1.f32 {d5[0]}, [%4]! \n" "vst1.f32 {d5[1]}, [%6]! \n" "vadd.f32 q2, q13, q6 \n"// use q6 "vmla.f32 q3, q10, %e24[1] \n" "vadd.f32 q8, q4, q5 \n" "vsub.f32 q9, q4, q5 \n" "vmov q5, q7 \n"// use q7 "vadd.f32 q6, q12, q6 \n"// use q6 "vmla.f32 q5, q10, %f24[1] \n" "vmov q4, q13 \n" "vmla.f32 q2, q12, %e24[0] \n" "vmla.f32 q3, q11, %f24[1] \n" "vst1.f32 {d16[0]}, [%0]! \n" "vst1.f32 {d16[1]}, [%2]! \n" "vmla.f32 q4, q6, %e25[1] \n" "vst1.f32 {d17[0]}, [%4]! \n" "vst1.f32 {d17[1]}, [%6]! \n" "vmla.f32 q5, q11, %e24[1] \n" "vst1.f32 {d18[0]}, [%0]! \n" "vst1.f32 {d18[1]}, [%2]! \n" "vadd.f32 q8, q2, q3 \n" "vst1.f32 {d19[0]}, [%4]! \n" "vst1.f32 {d19[1]}, [%6]! \n" "vsub.f32 q9, q2, q3 \n" "vsub.f32 q6, q15, q10 \n" "vsub.f32 q7, q14, q11 \n" "vadd.f32 q2, q4, q5 \n" "vsub.f32 q3, q4, q5 \n" "vst1.f32 {d16[0]}, [%0] \n" "vst1.f32 {d16[1]}, [%2] \n" "vmla.f32 q6, q7, %f25[1] \n" "vst1.f32 {d17[0]}, [%4] \n" "vst1.f32 {d17[1]}, [%6] \n" "vtrn.32 q9, q2 \n" "vtrn.32 q3, q6 \n" "vswp d19, d6 \n" "vswp d5, d12 \n" "vst1.f32 {d18-d19}, [%1] \n" "vst1.f32 {d4-d5}, [%3] \n" "vst1.f32 {d6-d7}, [%5] \n" "vst1.f32 {d12-d13}, [%7] \n" : "=r"(r0_tm0_0), // %0 "=r"(r0_tm0_4), // %1 "=r"(r0_tm1_0), // %2 "=r"(r0_tm1_4), // %3 "=r"(r0_tm2_0), // %4 "=r"(r0_tm2_4), // %5 "=r"(r0_tm3_0), // %6 "=r"(r0_tm3_4), // %7 "=r"(t0), // %8 "=r"(t1), // %9 "=r"(t2), // %10 "=r"(t3) // %11 : "0"(r0_tm0_0), "1"(r0_tm0_4), "2"(r0_tm1_0), "3"(r0_tm1_4), "4"(r0_tm2_0), "5"(r0_tm2_4), "6"(r0_tm3_0), "7"(r0_tm3_4), "8"(t0), "9"(t1), "10"(t2), "11"(t3), "w"(_coeff0), // %24 "w"(_coeff1), // %25 "r"(step) // %26 : "memory", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); #endif // __aarch64__ #else const float* r0 = img0.row(i * 6) + j * 6; for (int m=0; m<8; m++) { tmp[0][m] = r0[0] - r0[6] + (r0[4] - r0[2]) * 5.25f; tmp[7][m] = r0[7] - r0[1] + (r0[3] - r0[5]) * 5.25f; float tmp12a = (r0[2] + r0[6] - r0[4] * 4.25f); float tmp12b = (r0[1] + r0[5] - r0[3] * 4.25f); tmp[1][m] = tmp12a + tmp12b; tmp[2][m] = tmp12a - tmp12b; float tmp34a = (r0[6] + r0[2] * 0.25f - r0[4] * 1.25f); float tmp34b = (r0[1] * 0.5f - r0[3] * 2.5f + r0[5] * 2.f); tmp[3][m] = tmp34a + tmp34b; tmp[4][m] = tmp34a - tmp34b; float tmp56a = (r0[6] + (r0[2] - r0[4] * 1.25f) * 4.f); float tmp56b = (r0[1] * 2.f - r0[3] * 2.5f + r0[5] * 0.5f); tmp[5][m] = tmp56a + tmp56b; tmp[6][m] = tmp56a - tmp56b; r0 += w; } float* r0_tm_0 = img0_tm.row(i * w_tm/8 + j); float* r0_tm_4 = img0_tm.row(i * w_tm/8 + j + tiles); for (int m=0; m<8; m++) { const float* tmp0 = tmp[m]; r0_tm_0[0] = tmp0[0] - tmp0[6] + (tmp0[4] - tmp0[2]) * 5.25f; r0_tm_4[3] = tmp0[7] - tmp0[1] + (tmp0[3] - tmp0[5]) * 5.25f; float tmp12a = (tmp0[2] + tmp0[6] - tmp0[4] * 4.25f); float tmp12b = (tmp0[1] - tmp0[3] * 4.25f + tmp0[5]); r0_tm_0[1] = tmp12a + tmp12b; r0_tm_0[2] = tmp12a - tmp12b; float tmp34a = (tmp0[6] + tmp0[2] * 0.25f - tmp0[4] * 1.25f); float tmp34b = (tmp0[1] * 0.5f - tmp0[3] * 2.5f + tmp0[5] * 2.f); r0_tm_0[3] = tmp34a + tmp34b; r0_tm_4[0] = tmp34a - tmp34b; float tmp56a = (tmp0[6] + (tmp0[2] - tmp0[4] * 1.25f) * 4.f); float tmp56b = (tmp0[1] * 2.f - tmp0[3] * 2.5f + tmp0[5] * 0.5f); r0_tm_4[1] = tmp56a + tmp56b; r0_tm_4[2] = tmp56a - tmp56b; r0_tm_0 += img0_tm.w * tiles * 2; r0_tm_4 += img0_tm.w * tiles * 2; } #endif // __ARM_NEON } } } } bottom_blob_bordered = Mat(); // END transform input // BEGIN dot Mat top_blob_tm; { int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; top_blob_tm.create(4, 16 * w_tm/8 * h_tm/8, outch); const int tiles = h_tm/8 * w_tm/8; int nn_outch = outch >> 2; int remain_outch_start = nn_outch << 2; #pragma omp parallel for for (int pp=0; pp<nn_outch; pp++) { int p = pp * 4; Mat out0_tm = top_blob_tm.channel(p); Mat out1_tm = top_blob_tm.channel(p+1); Mat out2_tm = top_blob_tm.channel(p+2); Mat out3_tm = top_blob_tm.channel(p+3); const float* ktm = kernel_tm.channel(pp); out0_tm.fill(0.f); out1_tm.fill(0.f); out2_tm.fill(0.f); out3_tm.fill(0.f); int q = 0; #if __ARM_NEON && __aarch64__ for (; q+3<inch; q+=4) { const float* r0 = bottom_blob_tm.channel(q); const float* r1 = bottom_blob_tm.channel(q+1); const float* r2 = bottom_blob_tm.channel(q+2); const float* r3 = bottom_blob_tm.channel(q+3); float* output0_tm = out0_tm; float* output1_tm = out1_tm; float* output2_tm = out2_tm; float* output3_tm = out3_tm; asm volatile( "mov w0, #16 \n"// w0 = r = 16 "0: \n" "prfm pldl1keep, [%8, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%8], #64 \n"// v0 v1 v2 v3 = _k00 _k01 _k02 _k03 "prfm pldl1keep, [%8, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%8], #64 \n"// v4 v5 v6 v7 = _k10 _k11 _k12 _k13 "prfm pldl1keep, [%8, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%8], #64 \n"// v8 v9 v10 v11 = _k20 _k21 _k22 _k23 "prfm pldl1keep, [%8, #512] \n" "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%8], #64 \n"// v12 v13 v14 v15 = _k30 _k31 _k32 _k33 // tile loop "lsr w1, %w18, #2 \n"// w1 = nn = tiles >> 2 "cmp w1, #0 \n" "beq 2f \n" //BEGIN tile loop "prfm pldl1keep, [%4, #128] \n"// "ld1 {v16.4s}, [%4], #16 \n" "1: \n" "prfm pldl1keep, [%0, #128] \n" "ld1 {v20.4s}, [%0] \n" "add x4, %0, #16 \n"// x4 = %0 next "fmla v20.4s, v16.4s, v0.4s \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v21.4s}, [%1] \n" "add x5, %1, #16 \n"// x5 = %1 next "fmla v21.4s, v16.4s, v4.4s \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v22.4s}, [%2] \n" "add x6, %2, #16 \n"// x6 = %2 next "fmla v22.4s, v16.4s, v8.4s \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v23.4s}, [%3] \n" "add x7, %3, #16 \n"// x7 = %3 next "prfm pldl1keep, [%5, #128] \n" "ld1 {v17.4s}, [%5], #16 \n" "fmla v23.4s, v16.4s, v12.4s \n" "prfm pldl1keep, [x4, #128] \n" "ld1 {v24.4s}, [x4] \n" "fmla v20.4s, v17.4s, v1.4s \n" "fmla v21.4s, v17.4s, v5.4s \n" "prfm pldl1keep, [%6, #128] \n" "ld1 {v18.4s}, [%6], #16 \n" "fmla v22.4s, v17.4s, v9.4s \n" "fmla v23.4s, v17.4s, v13.4s \n" "prfm pldl1keep, [x5, #128] \n" "ld1 {v25.4s}, [x5] \n" "fmla v20.4s, v18.4s, v2.4s \n" "fmla v21.4s, v18.4s, v6.4s \n" "prfm pldl1keep, [%7, #128] \n" "ld1 {v19.4s}, [%7], #16 \n" "fmla v22.4s, v18.4s, v10.4s \n" "fmla v23.4s, v18.4s, v14.4s \n" "prfm pldl1keep, [x6, #128] \n" "ld1 {v26.4s}, [x6] \n" "fmla v20.4s, v19.4s, v3.4s \n" "fmla v21.4s, v19.4s, v7.4s \n" "prfm pldl1keep, [%4, #128] \n" "ld1 {v16.4s}, [%4], #16 \n" "fmla v22.4s, v19.4s, v11.4s \n" "fmla v23.4s, v19.4s, v15.4s \n" /////// "prfm pldl1keep, [x7, #128] \n" "ld1 {v27.4s}, [x7] \n" "st1 {v20.4s}, [%0] \n" "add %0, %0, #32 \n" "fmla v24.4s, v16.4s, v0.4s \n" "fmla v25.4s, v16.4s, v4.4s \n" "prfm pldl1keep, [%5, #128] \n" "ld1 {v17.4s}, [%5], #16 \n" "fmla v26.4s, v16.4s, v8.4s \n" "fmla v27.4s, v16.4s, v12.4s \n" "prfm pldl1keep, [%0, #128] \n" "ld1 {v20.4s}, [%0] \n" "st1 {v21.4s}, [%1] \n" "add %1, %1, #32 \n" "fmla v24.4s, v17.4s, v1.4s \n" "fmla v25.4s, v17.4s, v5.4s \n" "prfm pldl1keep, [%6, #128] \n" "ld1 {v18.4s}, [%6], #16 \n" "fmla v26.4s, v17.4s, v9.4s \n" "fmla v27.4s, v17.4s, v13.4s \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v21.4s}, [%1] \n" "st1 {v22.4s}, [%2] \n" "add %2, %2, #32 \n" "fmla v24.4s, v18.4s, v2.4s \n" "fmla v25.4s, v18.4s, v6.4s \n" "prfm pldl1keep, [%7, #128] \n" "ld1 {v19.4s}, [%7], #16 \n" "fmla v26.4s, v18.4s, v10.4s \n" "fmla v27.4s, v18.4s, v14.4s \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v22.4s}, [%2] \n" "st1 {v23.4s}, [%3] \n" "add %3, %3, #32 \n" "fmla v24.4s, v19.4s, v3.4s \n" "fmla v25.4s, v19.4s, v7.4s \n" "prfm pldl1keep, [%4, #128] \n" "ld1 {v16.4s}, [%4], #16 \n" "fmla v26.4s, v19.4s, v11.4s \n" "fmla v27.4s, v19.4s, v15.4s \n" /////// "prfm pldl1keep, [%3, #128] \n" "ld1 {v23.4s}, [%3] \n" "st1 {v24.4s}, [x4] \n" "add x4, x4, #32 \n" "fmla v20.4s, v16.4s, v0.4s \n" "fmla v21.4s, v16.4s, v4.4s \n" "prfm pldl1keep, [%5, #128] \n" "ld1 {v17.4s}, [%5], #16 \n" "fmla v22.4s, v16.4s, v8.4s \n" "fmla v23.4s, v16.4s, v12.4s \n" "prfm pldl1keep, [x4, #128] \n" "ld1 {v24.4s}, [x4] \n" "st1 {v25.4s}, [x5] \n" "add x5, x5, #32 \n" "fmla v20.4s, v17.4s, v1.4s \n" "fmla v21.4s, v17.4s, v5.4s \n" "prfm pldl1keep, [%6, #128] \n" "ld1 {v18.4s}, [%6], #16 \n" "fmla v22.4s, v17.4s, v9.4s \n" "fmla v23.4s, v17.4s, v13.4s \n" "prfm pldl1keep, [x5, #128] \n" "ld1 {v25.4s}, [x5] \n" "st1 {v26.4s}, [x6] \n" "add x6, x6, #32 \n" "fmla v20.4s, v18.4s, v2.4s \n" "fmla v21.4s, v18.4s, v6.4s \n" "prfm pldl1keep, [%7, #128] \n" "ld1 {v19.4s}, [%7], #16 \n" "fmla v22.4s, v18.4s, v10.4s \n" "fmla v23.4s, v18.4s, v14.4s \n" "prfm pldl1keep, [x6, #128] \n" "ld1 {v26.4s}, [x6] \n" "st1 {v27.4s}, [x7] \n" "add x7, x7, #32 \n" "fmla v20.4s, v19.4s, v3.4s \n" "fmla v21.4s, v19.4s, v7.4s \n" "prfm pldl1keep, [%4, #128] \n" "ld1 {v16.4s}, [%4], #16 \n" "fmla v22.4s, v19.4s, v11.4s \n" "fmla v23.4s, v19.4s, v15.4s \n" /////// "prfm pldl1keep, [x7, #128] \n" "ld1 {v27.4s}, [x7] \n" "st1 {v20.4s}, [%0] \n" "fmla v24.4s, v16.4s, v0.4s \n" "fmla v25.4s, v16.4s, v4.4s \n" "prfm pldl1keep, [%5, #128] \n" "ld1 {v17.4s}, [%5], #16 \n" "fmla v26.4s, v16.4s, v8.4s \n" "fmla v27.4s, v16.4s, v12.4s \n" "st1 {v21.4s}, [%1] \n" "fmla v24.4s, v17.4s, v1.4s \n" "fmla v25.4s, v17.4s, v5.4s \n" "prfm pldl1keep, [%6, #128] \n" "ld1 {v18.4s}, [%6], #16 \n" "fmla v26.4s, v17.4s, v9.4s \n" "fmla v27.4s, v17.4s, v13.4s \n" "st1 {v22.4s}, [%2] \n" "fmla v24.4s, v18.4s, v2.4s \n" "fmla v25.4s, v18.4s, v6.4s \n" "prfm pldl1keep, [%7, #128] \n" "ld1 {v19.4s}, [%7], #16 \n" "fmla v26.4s, v18.4s, v10.4s \n" "fmla v27.4s, v18.4s, v14.4s \n" "st1 {v23.4s}, [%3] \n" "fmla v24.4s, v19.4s, v3.4s \n" "fmla v25.4s, v19.4s, v7.4s \n" "prfm pldl1keep, [%4, #128] \n" "ld1 {v16.4s}, [%4], #16 \n" "fmla v26.4s, v19.4s, v11.4s \n" "fmla v27.4s, v19.4s, v15.4s \n" "st1 {v24.4s}, [x4], #16 \n" "mov %0, x4 \n" "st1 {v25.4s}, [x5], #16 \n" "mov %1, x5 \n" "subs w1, w1, #1 \n" "st1 {v26.4s}, [x6], #16 \n" "mov %2, x6 \n" "st1 {v27.4s}, [x7], #16 \n" "mov %3, x7 \n" "bne 1b \n" "sub %4, %4, #16 \n" //END tile loop "2: \n" // remain loop "and w1, %w18, #3 \n"// w1 = remain = tiles & 3; "cmp w1, #0 \n" "beq 4f \n" //BEGIN remain loop "3: \n" "prfm pldl1keep, [%4, #128] \n" "ld1 {v16.4s}, [%4], #16 \n" "prfm pldl1keep, [%0, #128] \n" "ld1 {v20.4s}, [%0] \n" "fmla v20.4s, v16.4s, v0.4s \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v21.4s}, [%1] \n" "fmla v21.4s, v16.4s, v4.4s \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v22.4s}, [%2] \n" "fmla v22.4s, v16.4s, v8.4s \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v23.4s}, [%3] \n" "fmla v23.4s, v16.4s, v12.4s \n" "prfm pldl1keep, [%5, #128] \n" "ld1 {v17.4s}, [%5], #16 \n" "fmla v20.4s, v17.4s, v1.4s \n" "fmla v21.4s, v17.4s, v5.4s \n" "fmla v22.4s, v17.4s, v9.4s \n" "fmla v23.4s, v17.4s, v13.4s \n" "prfm pldl1keep, [%6, #128] \n" "ld1 {v18.4s}, [%6], #16 \n" "fmla v20.4s, v18.4s, v2.4s \n" "fmla v21.4s, v18.4s, v6.4s \n" "fmla v22.4s, v18.4s, v10.4s \n" "fmla v23.4s, v18.4s, v14.4s \n" "prfm pldl1keep, [%7, #128] \n" "ld1 {v19.4s}, [%7], #16 \n" "fmla v20.4s, v19.4s, v3.4s \n" "fmla v21.4s, v19.4s, v7.4s \n" "fmla v22.4s, v19.4s, v11.4s \n" "fmla v23.4s, v19.4s, v15.4s \n" "st1 {v20.4s}, [%0], #16 \n" "st1 {v21.4s}, [%1], #16 \n" "subs w1, w1, #1 \n" "st1 {v22.4s}, [%2], #16 \n" "st1 {v23.4s}, [%3], #16 \n" "bne 3b \n" //END remain loop "4: \n" "subs w0, w0, #1 \n" "bne 0b \n" : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(r0), // %4 "=r"(r1), // %5 "=r"(r2), // %6 "=r"(r3), // %7 "=r"(ktm) // %8 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(r0), "5"(r1), "6"(r2), "7"(r3), "8"(ktm), "r"(tiles) // %18 : "cc", "memory", "x0", "x1", "x4", "x5", "x6", "x7", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27" ); } #endif // __ARM_NEON && __aarch64__ for (; q+1<inch; q+=2) { const float* r0 = bottom_blob_tm.channel(q); const float* r1 = bottom_blob_tm.channel(q+1); float* output0_tm = out0_tm; float* output1_tm = out1_tm; float* output2_tm = out2_tm; float* output3_tm = out3_tm; #if __ARM_NEON #if __aarch64__ asm volatile( "mov w0, #16 \n"// w0 = r = 16 "0: \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v0.4s, v1.4s}, [%6], #32 \n"// v0 v1 = _k00 _k01 "prfm pldl1keep, [%6, #256] \n" "ld1 {v2.4s, v3.4s}, [%6], #32 \n"// v2 v3 = _k10 _k11 "prfm pldl1keep, [%6, #256] \n" "ld1 {v4.4s, v5.4s}, [%6], #32 \n"// v4 v5 = _k20 _k21 "prfm pldl1keep, [%6, #256] \n" "ld1 {v6.4s, v7.4s}, [%6], #32 \n"// v6 v7 = _k30 _k31 // tile loop "lsr w1, %w14, #2 \n"// w1 = nn = tiles >> 2 "cmp w1, #0 \n" "beq 2f \n" //BEGIN tile loop "prfm pldl1keep, [%4, #128] \n" "ld1 {v20.4s}, [%4], #16 \n" "1: \n" "prfm pldl1keep, [%0, #128] \n" "ld1 {v16.4s}, [%0] \n" "fmla v16.4s, v20.4s, v0.4s \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v17.4s}, [%1] \n" "fmla v17.4s, v20.4s, v2.4s \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v18.4s}, [%2] \n" "fmla v18.4s, v20.4s, v4.4s \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v19.4s}, [%3] \n" "fmla v19.4s, v20.4s, v6.4s \n" "prfm pldl1keep, [%5, #128] \n" "ld1 {v21.4s}, [%5], #16 \n" "fmla v16.4s, v21.4s, v1.4s \n" "fmla v17.4s, v21.4s, v3.4s \n" "prfm pldl1keep, [%4, #128] \n" "ld1 {v20.4s}, [%4], #16 \n" "fmla v18.4s, v21.4s, v5.4s \n" "fmla v19.4s, v21.4s, v7.4s \n" "st1 {v16.4s}, [%0], #16 \n" "st1 {v17.4s}, [%1], #16 \n" //// "prfm pldl1keep, [%0, #128] \n" "ld1 {v16.4s}, [%0] \n" "fmla v16.4s, v20.4s, v0.4s \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v17.4s}, [%1] \n" "fmla v17.4s, v20.4s, v2.4s \n" "st1 {v18.4s}, [%2], #16 \n" "st1 {v19.4s}, [%3], #16 \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v18.4s}, [%2] \n" "fmla v18.4s, v20.4s, v4.4s \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v19.4s}, [%3] \n" "fmla v19.4s, v20.4s, v6.4s \n" "prfm pldl1keep, [%5, #128] \n" "ld1 {v21.4s}, [%5], #16 \n" "fmla v16.4s, v21.4s, v1.4s \n" "fmla v17.4s, v21.4s, v3.4s \n" "prfm pldl1keep, [%4, #128] \n" "ld1 {v20.4s}, [%4], #16 \n" "fmla v18.4s, v21.4s, v5.4s \n" "fmla v19.4s, v21.4s, v7.4s \n" "st1 {v16.4s}, [%0], #16 \n" "st1 {v17.4s}, [%1], #16 \n" //// "prfm pldl1keep, [%0, #128] \n" "ld1 {v16.4s}, [%0] \n" "fmla v16.4s, v20.4s, v0.4s \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v17.4s}, [%1] \n" "fmla v17.4s, v20.4s, v2.4s \n" "st1 {v18.4s}, [%2], #16 \n" "st1 {v19.4s}, [%3], #16 \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v18.4s}, [%2] \n" "fmla v18.4s, v20.4s, v4.4s \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v19.4s}, [%3] \n" "fmla v19.4s, v20.4s, v6.4s \n" "prfm pldl1keep, [%5, #128] \n" "ld1 {v21.4s}, [%5], #16 \n" "fmla v16.4s, v21.4s, v1.4s \n" "fmla v17.4s, v21.4s, v3.4s \n" "prfm pldl1keep, [%4, #128] \n" "ld1 {v20.4s}, [%4], #16 \n" "fmla v18.4s, v21.4s, v5.4s \n" "fmla v19.4s, v21.4s, v7.4s \n" "st1 {v16.4s}, [%0], #16 \n" "st1 {v17.4s}, [%1], #16 \n" //// "prfm pldl1keep, [%0, #128] \n" "ld1 {v16.4s}, [%0] \n" "fmla v16.4s, v20.4s, v0.4s \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v17.4s}, [%1] \n" "fmla v17.4s, v20.4s, v2.4s \n" "st1 {v18.4s}, [%2], #16 \n" "st1 {v19.4s}, [%3], #16 \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v18.4s}, [%2] \n" "fmla v18.4s, v20.4s, v4.4s \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v19.4s}, [%3] \n" "fmla v19.4s, v20.4s, v6.4s \n" "prfm pldl1keep, [%5, #128] \n" "ld1 {v21.4s}, [%5], #16 \n" "fmla v16.4s, v21.4s, v1.4s \n" "fmla v17.4s, v21.4s, v3.4s \n" "prfm pldl1keep, [%4, #128] \n" "ld1 {v20.4s}, [%4], #16 \n" "fmla v18.4s, v21.4s, v5.4s \n" "fmla v19.4s, v21.4s, v7.4s \n" "st1 {v16.4s}, [%0], #16 \n" "st1 {v17.4s}, [%1], #16 \n" "subs w1, w1, #1 \n" "st1 {v18.4s}, [%2], #16 \n" "st1 {v19.4s}, [%3], #16 \n" "bne 1b \n" "sub %4, %4, #16 \n" //END tile loop "2: \n" // remain loop "and w1, %w14, #3 \n"// w1 = remain = tiles & 3; "cmp w1, #0 \n" "beq 4f \n" //BEGIN remain loop "3: \n" "prfm pldl1keep, [%4, #128] \n" "ld1 {v20.4s}, [%4], #16 \n" "prfm pldl1keep, [%0, #128] \n" "ld1 {v16.4s}, [%0] \n" "fmla v16.4s, v20.4s, v0.4s \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v17.4s}, [%1] \n" "fmla v17.4s, v20.4s, v2.4s \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v18.4s}, [%2] \n" "fmla v18.4s, v20.4s, v4.4s \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v19.4s}, [%3] \n" "fmla v19.4s, v20.4s, v6.4s \n" "prfm pldl1keep, [%5, #128] \n" "ld1 {v21.4s}, [%5], #16 \n" "fmla v16.4s, v21.4s, v1.4s \n" "fmla v17.4s, v21.4s, v3.4s \n" "fmla v18.4s, v21.4s, v5.4s \n" "fmla v19.4s, v21.4s, v7.4s \n" "st1 {v16.4s}, [%0], #16 \n" "st1 {v17.4s}, [%1], #16 \n" "subs w1, w1, #1 \n" "st1 {v18.4s}, [%2], #16 \n" "st1 {v19.4s}, [%3], #16 \n" "bne 3b \n" //END remain loop "4: \n" "subs w0, w0, #1 \n" "bne 0b \n" : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(r0), // %4 "=r"(r1), // %5 "=r"(ktm) // %6 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(r0), "5"(r1), "6"(ktm), "r"(tiles) // %14 : "cc", "memory", "x0", "x1", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21" ); #else asm volatile( "mov r0, #16 \n"// r0 = r = 16 "0: \n" "pld [%6, #256] \n" "vld1.f32 {d0-d3}, [%6 :128]! \n"// q0 q1 = _k00 _k01 "pld [%6, #256] \n" "vld1.f32 {d4-d7}, [%6 :128]! \n"// q2 q3 = _k10 _k11 "pld [%6, #256] \n" "vld1.f32 {d8-d11}, [%6 :128]! \n"// q4 q5 = _k20 _k21 "pld [%6, #256] \n" "vld1.f32 {d12-d15}, [%6 :128]! \n"// q6 q7 = _k30 _k31 // tile loop "lsr r1, %14, #2 \n"// r1 = nn = tiles >> 2 "cmp r1, #0 \n" "beq 2f \n" //BEGIN tile loop "pld [%4, #128] \n" "vld1.f32 {d24-d25}, [%4 :128]! \n"// q12 = _r0 "1: \n" "pld [%0, #128] \n" "vld1.f32 {d16-d17}, [%0 :128] \n"// q8 = _output0_tm "vmla.f32 q8, q12, q0 \n" "pld [%1, #128] \n" "vld1.f32 {d18-d19}, [%1 :128] \n"// q9 = _output1_tm "vmla.f32 q9, q12, q2 \n" "pld [%2, #128] \n" "vld1.f32 {d20-d21}, [%2 :128] \n"// q10 = _output2_tm "vmla.f32 q10, q12, q4 \n" "pld [%3, #128] \n" "vld1.f32 {d22-d23}, [%3 :128] \n"// q11 = _output3_tm "vmla.f32 q11, q12, q6 \n" "pld [%5, #128] \n" "vld1.f32 {d26-d27}, [%5 :128]! \n"// q13 = _r1 "vmla.f32 q8, q13, q1 \n" "vmla.f32 q9, q13, q3 \n" "pld [%4, #128] \n" "vld1.f32 {d24-d25}, [%4 :128]! \n"// q12 = _r0 "vmla.f32 q10, q13, q5 \n" "vmla.f32 q11, q13, q7 \n" "vst1.f32 {d16-d17}, [%0 :128]! \n" "vst1.f32 {d18-d19}, [%1 :128]! \n" //// "pld [%0, #128] \n" "vld1.f32 {d16-d17}, [%0 :128] \n"// q8 = _output0_tm "vmla.f32 q8, q12, q0 \n" "pld [%1, #128] \n" "vld1.f32 {d18-d19}, [%1 :128] \n"// q9 = _output1_tm "vmla.f32 q9, q12, q2 \n" "vst1.f32 {d20-d21}, [%2 :128]! \n" "vst1.f32 {d22-d23}, [%3 :128]! \n" "pld [%2, #128] \n" "vld1.f32 {d20-d21}, [%2 :128] \n"// q10 = _output2_tm "vmla.f32 q10, q12, q4 \n" "pld [%3, #128] \n" "vld1.f32 {d22-d23}, [%3 :128] \n"// q11 = _output3_tm "vmla.f32 q11, q12, q6 \n" "pld [%5, #128] \n" "vld1.f32 {d26-d27}, [%5 :128]! \n"// q13 = _r1 "vmla.f32 q8, q13, q1 \n" "vmla.f32 q9, q13, q3 \n" "pld [%4, #128] \n" "vld1.f32 {d24-d25}, [%4 :128]! \n"// q12 = _r0 "vmla.f32 q10, q13, q5 \n" "vmla.f32 q11, q13, q7 \n" "vst1.f32 {d16-d17}, [%0 :128]! \n" "vst1.f32 {d18-d19}, [%1 :128]! \n" //// "pld [%0, #128] \n" "vld1.f32 {d16-d17}, [%0 :128] \n"// q8 = _output0_tm "vmla.f32 q8, q12, q0 \n" "pld [%1, #128] \n" "vld1.f32 {d18-d19}, [%1 :128] \n"// q9 = _output1_tm "vmla.f32 q9, q12, q2 \n" "vst1.f32 {d20-d21}, [%2 :128]! \n" "vst1.f32 {d22-d23}, [%3 :128]! \n" "pld [%2, #128] \n" "vld1.f32 {d20-d21}, [%2 :128] \n"// q10 = _output2_tm "vmla.f32 q10, q12, q4 \n" "pld [%3, #128] \n" "vld1.f32 {d22-d23}, [%3 :128] \n"// q11 = _output3_tm "vmla.f32 q11, q12, q6 \n" "pld [%5, #128] \n" "vld1.f32 {d26-d27}, [%5 :128]! \n"// q13 = _r1 "vmla.f32 q8, q13, q1 \n" "vmla.f32 q9, q13, q3 \n" "pld [%4, #128] \n" "vld1.f32 {d24-d25}, [%4 :128]! \n"// q12 = _r0 "vmla.f32 q10, q13, q5 \n" "vmla.f32 q11, q13, q7 \n" "vst1.f32 {d16-d17}, [%0 :128]! \n" "vst1.f32 {d18-d19}, [%1 :128]! \n" //// "pld [%0, #128] \n" "vld1.f32 {d16-d17}, [%0 :128] \n"// q8 = _output0_tm "vmla.f32 q8, q12, q0 \n" "pld [%1, #128] \n" "vld1.f32 {d18-d19}, [%1 :128] \n"// q9 = _output1_tm "vmla.f32 q9, q12, q2 \n" "vst1.f32 {d20-d21}, [%2 :128]! \n" "vst1.f32 {d22-d23}, [%3 :128]! \n" "pld [%2, #128] \n" "vld1.f32 {d20-d21}, [%2 :128] \n"// q10 = _output2_tm "vmla.f32 q10, q12, q4 \n" "pld [%3, #128] \n" "vld1.f32 {d22-d23}, [%3 :128] \n"// q11 = _output3_tm "vmla.f32 q11, q12, q6 \n" "pld [%5, #128] \n" "vld1.f32 {d26-d27}, [%5 :128]! \n"// q13 = _r1 "vmla.f32 q8, q13, q1 \n" "vmla.f32 q9, q13, q3 \n" "pld [%4, #128] \n" "vld1.f32 {d24-d25}, [%4 :128]! \n"// q12 = _r0 "vmla.f32 q10, q13, q5 \n" "vmla.f32 q11, q13, q7 \n" "vst1.f32 {d16-d17}, [%0 :128]! \n" "vst1.f32 {d18-d19}, [%1 :128]! \n" "subs r1, #1 \n" "vst1.f32 {d20-d21}, [%2 :128]! \n" "vst1.f32 {d22-d23}, [%3 :128]! \n" "bne 1b \n" "sub %4, %4, #16 \n" //END tile loop "2: \n" // remain loop "and r1, %14, #3 \n"// r1 = remain = tiles & 3; "cmp r1, #0 \n" "beq 4f \n" //BEGIN remain loop "3: \n" "pld [%4, #128] \n" "vld1.f32 {d24-d25}, [%4 :128]! \n"// q12 = _r0 "pld [%0, #128] \n" "vld1.f32 {d16-d17}, [%0 :128] \n"// q8 = _output0_tm "vmla.f32 q8, q12, q0 \n" "pld [%1, #128] \n" "vld1.f32 {d18-d19}, [%1 :128] \n"// q9 = _output1_tm "vmla.f32 q9, q12, q2 \n" "pld [%2, #128] \n" "vld1.f32 {d20-d21}, [%2 :128] \n"// q10 = _output2_tm "vmla.f32 q10, q12, q4 \n" "pld [%3, #128] \n" "vld1.f32 {d22-d23}, [%3 :128] \n"// q11 = _output3_tm "vmla.f32 q11, q12, q6 \n" "pld [%5, #128] \n" "vld1.f32 {d26-d27}, [%5 :128]! \n"// q13 = _r1 "vmla.f32 q8, q13, q1 \n" "vmla.f32 q9, q13, q3 \n" "vmla.f32 q10, q13, q5 \n" "vmla.f32 q11, q13, q7 \n" "vst1.f32 {d16-d17}, [%0 :128]! \n" "vst1.f32 {d18-d19}, [%1 :128]! \n" "subs r1, #1 \n" "vst1.f32 {d20-d21}, [%2 :128]! \n" "vst1.f32 {d22-d23}, [%3 :128]! \n" "bne 3b \n" //END remain loop "4: \n" "subs r0, #1 \n" "bne 0b \n" : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(r0), // %4 "=r"(r1), // %5 "=r"(ktm) // %6 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(r0), "5"(r1), "6"(ktm), "r"(tiles) // %14 : "cc", "memory", "r0", "r1", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13" ); #endif // __aarch64__ #else for (int r=0; r<16; r++) { for (int t=0; t<tiles; t++) { for (int m=0; m<4; m++) { output0_tm[m] += r0[m] * ktm[0 +m]; output0_tm[m] += r1[m] * ktm[4 +m]; output1_tm[m] += r0[m] * ktm[8 +m]; output1_tm[m] += r1[m] * ktm[12+m]; output2_tm[m] += r0[m] * ktm[16+m]; output2_tm[m] += r1[m] * ktm[20+m]; output3_tm[m] += r0[m] * ktm[24+m]; output3_tm[m] += r1[m] * ktm[28+m]; } r0 += 4; r1 += 4; output0_tm += 4; output1_tm += 4; output2_tm += 4; output3_tm += 4; } ktm += 32; } #endif // __ARM_NEON } for (; q<inch; q++) { const float* r0 = bottom_blob_tm.channel(q); float* output0_tm = out0_tm; float* output1_tm = out1_tm; float* output2_tm = out2_tm; float* output3_tm = out3_tm; #if __ARM_NEON #if __aarch64__ asm volatile( "mov w0, #16 \n"// w0 = r = 16 "0: \n" "prfm pldl1keep, [%5, #256] \n" "ld1 {v0.4s, v1.4s}, [%5], #32 \n"// v0 v1 = _k00 _k10 "prfm pldl1keep, [%5, #256] \n" "ld1 {v2.4s, v3.4s}, [%5], #32 \n"// v2 v3 = _k20 _k30 // tile loop "mov w1, %w12 \n"// w1 = tiles "cmp w1, #0 \n" "beq 2f \n" //BEGIN tile loop "1: \n" "prfm pldl1keep, [%4, #128] \n" "ld1 {v16.4s}, [%4], #16 \n" "prfm pldl1keep, [%0, #128] \n" "ld1 {v17.4s}, [%0] \n" "fmla v17.4s, v16.4s, v0.4s \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v18.4s}, [%1] \n" "fmla v18.4s, v16.4s, v1.4s \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v19.4s}, [%2] \n" "fmla v19.4s, v16.4s, v2.4s \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v20.4s}, [%3] \n" "fmla v20.4s, v16.4s, v3.4s \n" "st1 {v17.4s}, [%0], #16 \n" "st1 {v18.4s}, [%1], #16 \n" "subs w1, w1, #1 \n" "st1 {v19.4s}, [%2], #16 \n" "st1 {v20.4s}, [%3], #16 \n" "bne 1b \n" //END tile loop "2: \n" "subs w0, w0, #1 \n" "bne 0b \n" : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(r0), // %4 "=r"(ktm) // %5 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(r0), "5"(ktm), "r"(tiles) // %12 : "cc", "memory", "x0", "x1", "v0", "v1", "v2", "v3", "v16", "v17", "v18", "v19", "v20" ); #else asm volatile( "mov r0, #16 \n"// r0 = r = 16 "0: \n" "pld [%5, #256] \n" "vld1.f32 {d0-d3}, [%5 :128]! \n"// q0 q1 = _k00 _k10 "pld [%5, #256] \n" "vld1.f32 {d4-d7}, [%5 :128]! \n"// q2 q3 = _k20 _k30 // tile loop "mov r1, %12 \n"// r1 = tiles "cmp r1, #0 \n" "beq 2f \n" //BEGIN tile loop "1: \n" "pld [%4, #128] \n" "vld1.f32 {d24-d25}, [%4 :128]! \n"// q12 = _r0 "pld [%0, #128] \n" "vld1.f32 {d16-d17}, [%0 :128] \n"// q8 = _output0_tm "vmla.f32 q8, q12, q0 \n" "pld [%1, #128] \n" "vld1.f32 {d18-d19}, [%1 :128] \n"// q9 = _output1_tm "vmla.f32 q9, q12, q1 \n" "pld [%2, #128] \n" "vld1.f32 {d20-d21}, [%2 :128] \n"// q10 = _output2_tm "vmla.f32 q10, q12, q2 \n" "pld [%3, #128] \n" "vld1.f32 {d22-d23}, [%3 :128] \n"// q11 = _output3_tm "vmla.f32 q11, q12, q3 \n" "vst1.f32 {d16-d17}, [%0 :128]! \n" "vst1.f32 {d18-d19}, [%1 :128]! \n" "subs r1, #1 \n" "vst1.f32 {d20-d21}, [%2 :128]! \n" "vst1.f32 {d22-d23}, [%3 :128]! \n" "bne 1b \n" //END tile loop "2: \n" "subs r0, #1 \n" "bne 0b \n" : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(r0), // %4 "=r"(ktm) // %5 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(r0), "5"(ktm), "r"(tiles) // %12 : "cc", "memory", "r0", "r1", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13" ); #endif // __aarch64__ #else for (int r=0; r<16; r++) { for (int t=0; t<tiles; t++) { for (int m=0; m<4; m++) { output0_tm[m] += r0[m] * ktm[0 +m]; output1_tm[m] += r0[m] * ktm[4 +m]; output2_tm[m] += r0[m] * ktm[8 +m]; output3_tm[m] += r0[m] * ktm[12+m]; } r0 += 4; output0_tm += 4; output1_tm += 4; output2_tm += 4; output3_tm += 4; } ktm += 16; } #endif // __ARM_NEON } } #pragma omp parallel for for (int p = remain_outch_start; p<outch; p++) { Mat out0_tm = top_blob_tm.channel(p); const float* ktm = (const float*)kernel_tm.channel(nn_outch) + 8*8 * inch * (p-remain_outch_start); out0_tm.fill(0.f); int q = 0; for (; q<inch; q++) { const float* r0 = bottom_blob_tm.channel(q); float* output0_tm = out0_tm; for (int r=0; r<16; r++) { #if __ARM_NEON float32x4_t _k00 = vld1q_f32(ktm); ktm += 4; #endif // __ARM_NEON // tile for (int i=0; i<tiles; i++) { #if __ARM_NEON #if __aarch64__ asm volatile( "prfm pldl1keep, [%1, #128] \n" "ld1 {v17.4s}, [%1], #16 \n" "prfm pldl1keep, [%0, #128] \n" "ld1 {v16.4s}, [%0] \n" "fmla v16.4s, v17.4s, %4.4s \n" "st1 {v16.4s}, [%0], #16 \n" : "=r"(output0_tm), // %0 "=r"(r0) // %1 : "0"(output0_tm), "1"(r0), "w"(_k00) // %4 : "cc", "memory", "v16", "v17" ); #else asm volatile( "pld [%1, #128] \n" "vld1.f32 {d18-d19}, [%1 :128]! \n"// q9 = _r0 "pld [%0, #128] \n" "vld1.f32 {d16-d17}, [%0 :128] \n"// q8 = _output0_tm "vmla.f32 q8, q9, %q4 \n" "vst1.f32 {d16-d17}, [%0 :128]! \n" : "=r"(output0_tm), // %0 "=r"(r0) // %1 : "0"(output0_tm), "1"(r0), "w"(_k00) // %4 : "cc", "memory", "q8", "q9" ); #endif // __aarch64__ #else for (int m=0; m<4; m++) { output0_tm[m] += r0[m] * ktm[m]; } r0 += 4; output0_tm += 4; #endif // __ARM_NEON } #if !__ARM_NEON ktm += 4; #endif // __ARM_NEON } } } } bottom_blob_tm = Mat(); // END dot // BEGIN transform output Mat top_blob_bordered; top_blob_bordered.create(outw, outh, outch); { // const float otm[6][8] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 32.0f, 32.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 16.0f,-16.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 8.0f, 8.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 4.0f, -4.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 16.0f, 16.0f, 2.0f, 2.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 32.0f, -32.0f, 1.0f, -1.0f, 1.0f} // }; // 0 = r0 + (r1 + r2) + (r3 + r4) + (r5 + r6) * 32 // 1 = (r1 - r2) + (r3 - r4) * 2 + (r5 - r6) * 16 // 2 = (r1 + r2) + (r3 + r4) * 4 + (r5 + r6) * 8 // 3 = (r1 - r2) + (r3 - r4) * 8 + (r5 - r6) * 4 // 4 = (r1 + r2) + (r3 + r4) * 16+ (r5 + r6) * 2 // 5 = r7 + (r1 - r2) + (r3 - r4) * 32+ (r5 - r6) #if __ARM_NEON const float coeff[4] = { 4.f, 8.f, 16.f, 32.f }; float32x4_t _coeff = vld1q_f32(coeff); #endif // __ARM_NEON int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = w_tm/8 * h_tm/8; #pragma omp parallel for for (int p = 0; p<outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob_bordered.channel(p); const float bias0 = bias ? bias[p] : 0.f; #if __ARM_NEON float32x2_t _bias0 = vdup_n_f32(bias0); #endif // __ARM_NEON float tmp[6][8]; // tile for (int i=0; i<outh/6; i++) { for (int j=0; j<outw/6; j++) { #if __ARM_NEON const float* output0_tm0_0 = out0_tm.row(i * w_tm/8 + j); const float* output0_tm0_4 = out0_tm.row(i * w_tm/8 + j + tiles); const float* output0_tm1_0 = out0_tm.row(i * w_tm/8 + j + tiles*2); const float* output0_tm1_4 = out0_tm.row(i * w_tm/8 + j + tiles*3); const float* output0_tm2_0 = out0_tm.row(i * w_tm/8 + j + tiles*4); const float* output0_tm2_4 = out0_tm.row(i * w_tm/8 + j + tiles*5); const float* output0_tm3_0 = out0_tm.row(i * w_tm/8 + j + tiles*6); const float* output0_tm3_4 = out0_tm.row(i * w_tm/8 + j + tiles*7); #if __aarch64__ for (int m=0; m+3<8; m+=4) { float32x4_t _output0_tm0_0123 = vld1q_f32(output0_tm0_0); float32x4_t _output0_tm0_4567 = vld1q_f32(output0_tm0_4); float32x4_t _output0_tm1_0123 = vld1q_f32(output0_tm1_0); float32x4_t _output0_tm1_4567 = vld1q_f32(output0_tm1_4); float32x4_t _output0_tm2_0123 = vld1q_f32(output0_tm2_0); float32x4_t _output0_tm2_4567 = vld1q_f32(output0_tm2_4); float32x4_t _output0_tm3_0123 = vld1q_f32(output0_tm3_0); float32x4_t _output0_tm3_4567 = vld1q_f32(output0_tm3_4); float32x4x2_t _output0_tm01_00221133 = vtrnq_f32(_output0_tm0_0123, _output0_tm1_0123); float32x4x2_t _output0_tm01_44665577 = vtrnq_f32(_output0_tm0_4567, _output0_tm1_4567); float32x4x2_t _output0_tm23_00221133 = vtrnq_f32(_output0_tm2_0123, _output0_tm3_0123); float32x4x2_t _output0_tm23_44665577 = vtrnq_f32(_output0_tm2_4567, _output0_tm3_4567); // no vswp intrinsic :( float32x4_t _output0_tm_00 = vcombine_f32(vget_low_f32(_output0_tm01_00221133.val[0]), vget_low_f32(_output0_tm23_00221133.val[0])); float32x4_t _output0_tm_11 = vcombine_f32(vget_low_f32(_output0_tm01_00221133.val[1]), vget_low_f32(_output0_tm23_00221133.val[1])); float32x4_t _output0_tm_22 = vcombine_f32(vget_high_f32(_output0_tm01_00221133.val[0]), vget_high_f32(_output0_tm23_00221133.val[0])); float32x4_t _output0_tm_33 = vcombine_f32(vget_high_f32(_output0_tm01_00221133.val[1]), vget_high_f32(_output0_tm23_00221133.val[1])); float32x4_t _output0_tm_44 = vcombine_f32(vget_low_f32(_output0_tm01_44665577.val[0]), vget_low_f32(_output0_tm23_44665577.val[0])); float32x4_t _output0_tm_55 = vcombine_f32(vget_low_f32(_output0_tm01_44665577.val[1]), vget_low_f32(_output0_tm23_44665577.val[1])); float32x4_t _output0_tm_66 = vcombine_f32(vget_high_f32(_output0_tm01_44665577.val[0]), vget_high_f32(_output0_tm23_44665577.val[0])); float32x4_t _output0_tm_77 = vcombine_f32(vget_high_f32(_output0_tm01_44665577.val[1]), vget_high_f32(_output0_tm23_44665577.val[1])); float32x4_t _tmp024a = vaddq_f32(_output0_tm_11, _output0_tm_22); float32x4_t _tmp135a = vsubq_f32(_output0_tm_11, _output0_tm_22); float32x4_t _tmp024b = vaddq_f32(_output0_tm_33, _output0_tm_44); float32x4_t _tmp135b = vsubq_f32(_output0_tm_33, _output0_tm_44); float32x4_t _tmp024c = vaddq_f32(_output0_tm_55, _output0_tm_66); float32x4_t _tmp135c = vsubq_f32(_output0_tm_55, _output0_tm_66); float32x4_t _tmp0 = vaddq_f32(_output0_tm_00, _tmp024a); _tmp0 = vmlaq_lane_f32(_tmp0, _tmp024c, vget_high_f32(_coeff), 1); _tmp0 = vaddq_f32(_tmp0, _tmp024b); float32x4_t _tmp2 = vmlaq_lane_f32(_tmp024a, _tmp024b, vget_low_f32(_coeff), 0); _tmp2 = vmlaq_lane_f32(_tmp2, _tmp024c, vget_low_f32(_coeff), 1); float32x4_t _tmp4 = vmlaq_lane_f32(_tmp024a, _tmp024b, vget_high_f32(_coeff), 0); _tmp4 = vaddq_f32(_tmp4, _tmp024c); _tmp4 = vaddq_f32(_tmp4, _tmp024c); vst1q_f32(&tmp[0][m], _tmp0); vst1q_f32(&tmp[2][m], _tmp2); vst1q_f32(&tmp[4][m], _tmp4); float32x4_t _tmp1 = vmlaq_lane_f32(_tmp135a, _tmp135c, vget_high_f32(_coeff), 0); _tmp1 = vaddq_f32(_tmp1, _tmp135b); _tmp1 = vaddq_f32(_tmp1, _tmp135b); float32x4_t _tmp3 = vmlaq_lane_f32(_tmp135a, _tmp135b, vget_low_f32(_coeff), 1); _tmp3 = vmlaq_lane_f32(_tmp3, _tmp135c, vget_low_f32(_coeff), 0); float32x4_t _tmp5 = vaddq_f32(_output0_tm_77, _tmp135a); _tmp5 = vmlaq_lane_f32(_tmp5, _tmp135b, vget_high_f32(_coeff), 1); _tmp5 = vaddq_f32(_tmp5, _tmp135c); vst1q_f32(&tmp[1][m], _tmp1); vst1q_f32(&tmp[3][m], _tmp3); vst1q_f32(&tmp[5][m], _tmp5); output0_tm0_0 += out0_tm.w * tiles * 2*4; output0_tm0_4 += out0_tm.w * tiles * 2*4; output0_tm1_0 += out0_tm.w * tiles * 2*4; output0_tm1_4 += out0_tm.w * tiles * 2*4; output0_tm2_0 += out0_tm.w * tiles * 2*4; output0_tm2_4 += out0_tm.w * tiles * 2*4; output0_tm3_0 += out0_tm.w * tiles * 2*4; output0_tm3_4 += out0_tm.w * tiles * 2*4; } const float* t0 = tmp[0]; const float* t1 = tmp[1]; float* output0 = out0.row(i * 6) + j * 6; float* output1 = output0 + outw; for (int m=0; m+1<6; m+=2) { float32x4_t _t0_0123 = vld1q_f32(t0); float32x4_t _t0_4567 = vld1q_f32(t0+4); float32x4_t _t1_0123 = vld1q_f32(t1); float32x4_t _t1_4567 = vld1q_f32(t1+4); float32x4x2_t _t01_00221133 = vtrnq_f32(_t0_0123, _t1_0123); float32x4x2_t _t01_44665577 = vtrnq_f32(_t0_4567, _t1_4567); float32x2_t _t_00 = vget_low_f32(_t01_00221133.val[0]); float32x2_t _t_11 = vget_low_f32(_t01_00221133.val[1]); float32x2_t _t_22 = vget_high_f32(_t01_00221133.val[0]); float32x2_t _t_33 = vget_high_f32(_t01_00221133.val[1]); float32x2_t _t_44 = vget_low_f32(_t01_44665577.val[0]); float32x2_t _t_55 = vget_low_f32(_t01_44665577.val[1]); float32x2_t _t_66 = vget_high_f32(_t01_44665577.val[0]); float32x2_t _t_77 = vget_high_f32(_t01_44665577.val[1]); float32x2_t _tmp024a = vadd_f32(_t_11, _t_22); float32x2_t _tmp135a = vsub_f32(_t_11, _t_22); float32x2_t _tmp024b = vadd_f32(_t_33, _t_44); float32x2_t _tmp135b = vsub_f32(_t_33, _t_44); float32x2_t _tmp024c = vadd_f32(_t_55, _t_66); float32x2_t _tmp135c = vsub_f32(_t_55, _t_66); float32x2_t _output_0 = vadd_f32(_t_00, _tmp024a); _output_0 = vmla_lane_f32(_output_0, _tmp024c, vget_high_f32(_coeff), 1); _output_0 = vadd_f32(_output_0, _tmp024b); _output_0 = vadd_f32(_output_0, _bias0); float32x2_t _output_2 = vmla_lane_f32(_tmp024a, _tmp024b, vget_low_f32(_coeff), 0); _output_2 = vmla_lane_f32(_output_2, _tmp024c, vget_low_f32(_coeff), 1); _output_2 = vadd_f32(_output_2, _bias0); float32x2_t _output_4 = vmla_lane_f32(_tmp024a, _tmp024b, vget_high_f32(_coeff), 0); _output_4 = vadd_f32(_output_4, _tmp024c); _output_4 = vadd_f32(_output_4, _tmp024c); _output_4 = vadd_f32(_output_4, _bias0); output0[0] = vget_lane_f32(_output_0, 0); output1[0] = vget_lane_f32(_output_0, 1); output0[2] = vget_lane_f32(_output_2, 0); output1[2] = vget_lane_f32(_output_2, 1); output0[4] = vget_lane_f32(_output_4, 0); output1[4] = vget_lane_f32(_output_4, 1); float32x2_t _output_1 = vmla_lane_f32(_tmp135a, _tmp135c, vget_high_f32(_coeff), 0); _output_1 = vadd_f32(_output_1, _tmp135b); _output_1 = vadd_f32(_output_1, _tmp135b); _output_1 = vadd_f32(_output_1, _bias0); float32x2_t _output_3 = vmla_lane_f32(_tmp135a, _tmp135b, vget_low_f32(_coeff), 1); _output_3 = vmla_lane_f32(_output_3, _tmp135c, vget_low_f32(_coeff), 0); _output_3 = vadd_f32(_output_3, _bias0); float32x2_t _output_5 = vadd_f32(_t_77, _tmp135a); _output_5 = vmla_lane_f32(_output_5, _tmp135b, vget_high_f32(_coeff), 1); _output_5 = vadd_f32(_output_5, _tmp135c); _output_5 = vadd_f32(_output_5, _bias0); output0[1] = vget_lane_f32(_output_1, 0); output1[1] = vget_lane_f32(_output_1, 1); output0[3] = vget_lane_f32(_output_3, 0); output1[3] = vget_lane_f32(_output_3, 1); output0[5] = vget_lane_f32(_output_5, 0); output1[5] = vget_lane_f32(_output_5, 1); t0 += 8*2; t1 += 8*2; output0 += outw*2; output1 += outw*2; } #else // __aarch64__ float* t0 = tmp[0]; float* t1 = tmp[1]; int step = out0_tm.w * tiles * 2*4 *4; asm volatile( // loop0 "vld1.f32 {d16-d17}, [%2], %21 \n" "vld1.f32 {d18-d19}, [%3], %21 \n" "vld1.f32 {d20-d21}, [%4], %21 \n" "vld1.f32 {d22-d23}, [%5], %21 \n" "vld1.f32 {d24-d25}, [%6], %21 \n" "vld1.f32 {d26-d27}, [%7], %21 \n" "vld1.f32 {d28-d29}, [%8], %21 \n" "vld1.f32 {d30-d31}, [%9], %21 \n" "vtrn.32 q8, q10 \n" "vtrn.32 q9, q11 \n" "vtrn.32 q12, q14 \n" "vtrn.32 q13, q15 \n" "vswp d17, d24 \n" "vswp d19, d26 \n" "vswp d21, d28 \n"// q8 = 00 q9 = 44 q10 = 11 q11 = 55 "vswp d23, d30 \n"// q12 = 22 q13 = 66 q14 = 33 q15 = 77 "vadd.f32 q2, q10, q12 \n" "vsub.f32 q3, q10, q12 \n" "vadd.f32 q4, q14, q9 \n" "vsub.f32 q5, q14, q9 \n" "vadd.f32 q6, q11, q13 \n" "vsub.f32 q7, q11, q13 \n"// spare q9 q10 q11 q12 q13 q14 "vmov q9, q3 \n" "vadd.f32 q8, q8, q2 \n" "vmla.f32 q9, q7, %f20[0] \n" "vmov q12, q2 \n" "vmov q10, q2 \n" "vmov q11, q3 \n" "vmla.f32 q12, q4, %f20[0] \n" "vadd.f32 q15, q15, q3 \n" "vmla.f32 q8, q6, %f20[1] \n" "vadd.f32 q9, q9, q5 \n" "vmla.f32 q10, q4, %e20[0] \n" "vmla.f32 q11, q5, %e20[1] \n" "vadd.f32 q12, q12, q6 \n" "vmla.f32 q15, q5, %f20[1] \n" "vadd.f32 q8, q8, q4 \n" "vadd.f32 q9, q9, q5 \n" "vmla.f32 q10, q6, %e20[1] \n" "vmla.f32 q11, q7, %e20[0] \n" "vadd.f32 q12, q12, q6 \n" "vadd.f32 q15, q15, q7 \n" "vst1.f32 {d16-d17}, [%0] \n" "add %0, %0, #64 \n" "vst1.f32 {d18-d19}, [%1] \n" "add %1, %1, #64 \n" "vst1.f32 {d20-d21}, [%0] \n" "add %0, %0, #64 \n" "vst1.f32 {d22-d23}, [%1] \n" "add %1, %1, #64 \n" "vst1.f32 {d24-d25}, [%0] \n" "sub %0, %0, #112 \n" "vst1.f32 {d30-d31}, [%1] \n" "sub %1, %1, #112 \n" // loop1 "vld1.f32 {d16-d17}, [%2] \n" "vld1.f32 {d18-d19}, [%3] \n" "vld1.f32 {d20-d21}, [%4] \n" "vld1.f32 {d22-d23}, [%5] \n" "vld1.f32 {d24-d25}, [%6] \n" "vld1.f32 {d26-d27}, [%7] \n" "vld1.f32 {d28-d29}, [%8] \n" "vld1.f32 {d30-d31}, [%9] \n" "vtrn.32 q8, q10 \n" "vtrn.32 q9, q11 \n" "vtrn.32 q12, q14 \n" "vtrn.32 q13, q15 \n" "vswp d17, d24 \n" "vswp d19, d26 \n" "vswp d21, d28 \n"// q8 = 00 q9 = 44 q10 = 11 q11 = 55 "vswp d23, d30 \n"// q12 = 22 q13 = 66 q14 = 33 q15 = 77 "vadd.f32 q2, q10, q12 \n" "vsub.f32 q3, q10, q12 \n" "vadd.f32 q4, q14, q9 \n" "vsub.f32 q5, q14, q9 \n" "vadd.f32 q6, q11, q13 \n" "vsub.f32 q7, q11, q13 \n"// spare q9 q10 q11 q12 q13 q14 "vmov q9, q3 \n" "vadd.f32 q8, q8, q2 \n" "vmla.f32 q9, q7, %f20[0] \n" "vmov q12, q2 \n" "vmov q10, q2 \n" "vmov q11, q3 \n" "vmla.f32 q12, q4, %f20[0] \n" "vadd.f32 q15, q15, q3 \n" "vmla.f32 q8, q6, %f20[1] \n" "vadd.f32 q9, q9, q5 \n" "vmla.f32 q10, q4, %e20[0] \n" "vmla.f32 q11, q5, %e20[1] \n" "vadd.f32 q12, q12, q6 \n" "vmla.f32 q15, q5, %f20[1] \n" "vadd.f32 q8, q8, q4 \n" "vadd.f32 q9, q9, q5 \n" "vmla.f32 q10, q6, %e20[1] \n" "vmla.f32 q11, q7, %e20[0] \n" "vadd.f32 q12, q12, q6 \n" "vadd.f32 q15, q15, q7 \n" "vst1.f32 {d16-d17}, [%0] \n" "add %0, %0, #64 \n" "vst1.f32 {d18-d19}, [%1] \n" "add %1, %1, #64 \n" "vst1.f32 {d20-d21}, [%0] \n" "add %0, %0, #64 \n" "vst1.f32 {d22-d23}, [%1] \n" "add %1, %1, #64 \n" "vst1.f32 {d24-d25}, [%0] \n" "vst1.f32 {d30-d31}, [%1] \n" : "=r"(t0), // %0 "=r"(t1), // %1 "=r"(output0_tm0_0), // %2 "=r"(output0_tm0_4), // %3 "=r"(output0_tm1_0), // %4 "=r"(output0_tm1_4), // %5 "=r"(output0_tm2_0), // %6 "=r"(output0_tm2_4), // %7 "=r"(output0_tm3_0), // %8 "=r"(output0_tm3_4) // %9 : "0"(t0), "1"(t1), "2"(output0_tm0_0), "3"(output0_tm0_4), "4"(output0_tm1_0), "5"(output0_tm1_4), "6"(output0_tm2_0), "7"(output0_tm2_4), "8"(output0_tm3_0), "9"(output0_tm3_4), "w"(_coeff), // %20 "r"(step) // %21 : "memory", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); t0 = tmp[0]; t1 = tmp[1]; float* output0 = out0.row(i * 6) + j * 6; float* output1 = output0 + outw; int stepw = outw*2 * 4; asm volatile( // loop0 "vld1.f32 {d16-d19}, [%2] \n" "vld1.f32 {d20-d23}, [%3] \n" "add %2, %2, #64 \n" "add %3, %3, #64 \n" "vtrn.32 q8, q10 \n"// q8 = 0 2 q10 = 1 3 "vtrn.32 q9, q11 \n"// q9 = 4 6 q11 = 5 7 "vadd.f32 d4, d20, d17 \n" "vsub.f32 d5, d20, d17 \n" "vadd.f32 d6, d21, d18 \n" "vsub.f32 d7, d21, d18 \n" "vadd.f32 d8, d22, d19 \n" "vsub.f32 d9, d22, d19 \n"// spare d17 ~ d22 "vmov d20, d5 \n" "vmov d18, d4 \n" "vadd.f32 d16, d16, d4 \n" "vmla.f32 d20, d9, %f8[0] \n" "vmov d17, d4 \n" "vmov d21, d5 \n" "vmla.f32 d18, d6, %f8[0] \n" "vadd.f32 d22, d23, d5 \n" "vmla.f32 d16, d8, %f8[1] \n" "vadd.f32 d20, d20, d7 \n" "vmla.f32 d17, d6, %e8[0] \n" "vmla.f32 d21, d7, %e8[1] \n" "vadd.f32 d18, d18, d8 \n" "vmla.f32 d22, d7, %f8[1] \n" "vadd.f32 d16, d16, d6 \n" "vadd.f32 d20, d20, d7 \n" "vmla.f32 d17, d8, %e8[1] \n" "vmla.f32 d21, d9, %e8[0] \n" "vadd.f32 d18, d18, d8 \n" "vadd.f32 d22, d22, d9 \n" "vadd.f32 d16, d16, %P9 \n"// _bias0 "vadd.f32 d20, d20, %P9 \n"// _bias0 "vadd.f32 d17, d17, %P9 \n"// _bias0 "vadd.f32 d21, d21, %P9 \n"// _bias0 "vadd.f32 d18, d18, %P9 \n"// _bias0 "vadd.f32 d22, d22, %P9 \n"// _bias0 "vtrn.f32 q8, q10 \n" "vtrn.f32 d18, d22 \n" "vst1.f32 {d16-d18}, [%0], %10 \n" "vst1.f32 {d20-d22}, [%1], %10 \n" // loop1 "vld1.f32 {d16-d19}, [%2] \n" "vld1.f32 {d20-d23}, [%3] \n" "add %2, %2, #64 \n" "add %3, %3, #64 \n" "vtrn.32 q8, q10 \n"// q8 = 0 2 q10 = 1 3 "vtrn.32 q9, q11 \n"// q9 = 4 6 q11 = 5 7 "vadd.f32 d4, d20, d17 \n" "vsub.f32 d5, d20, d17 \n" "vadd.f32 d6, d21, d18 \n" "vsub.f32 d7, d21, d18 \n" "vadd.f32 d8, d22, d19 \n" "vsub.f32 d9, d22, d19 \n"// spare d17 ~ d22 "vmov d20, d5 \n" "vmov d18, d4 \n" "vadd.f32 d16, d16, d4 \n" "vmla.f32 d20, d9, %f8[0] \n" "vmov d17, d4 \n" "vmov d21, d5 \n" "vmla.f32 d18, d6, %f8[0] \n" "vadd.f32 d22, d23, d5 \n" "vmla.f32 d16, d8, %f8[1] \n" "vadd.f32 d20, d20, d7 \n" "vmla.f32 d17, d6, %e8[0] \n" "vmla.f32 d21, d7, %e8[1] \n" "vadd.f32 d18, d18, d8 \n" "vmla.f32 d22, d7, %f8[1] \n" "vadd.f32 d16, d16, d6 \n" "vadd.f32 d20, d20, d7 \n" "vmla.f32 d17, d8, %e8[1] \n" "vmla.f32 d21, d9, %e8[0] \n" "vadd.f32 d18, d18, d8 \n" "vadd.f32 d22, d22, d9 \n" "vadd.f32 d16, d16, %P9 \n"// _bias0 "vadd.f32 d20, d20, %P9 \n"// _bias0 "vadd.f32 d17, d17, %P9 \n"// _bias0 "vadd.f32 d21, d21, %P9 \n"// _bias0 "vadd.f32 d18, d18, %P9 \n"// _bias0 "vadd.f32 d22, d22, %P9 \n"// _bias0 "vtrn.f32 q8, q10 \n" "vtrn.f32 d18, d22 \n" "vst1.f32 {d16-d18}, [%0], %10 \n" "vst1.f32 {d20-d22}, [%1], %10 \n" // loop2 "vld1.f32 {d16-d19}, [%2] \n" "vld1.f32 {d20-d23}, [%3] \n" "add %2, %2, #64 \n" "add %3, %3, #64 \n" "vtrn.32 q8, q10 \n"// q8 = 0 2 q10 = 1 3 "vtrn.32 q9, q11 \n"// q9 = 4 6 q11 = 5 7 "vadd.f32 d4, d20, d17 \n" "vsub.f32 d5, d20, d17 \n" "vadd.f32 d6, d21, d18 \n" "vsub.f32 d7, d21, d18 \n" "vadd.f32 d8, d22, d19 \n" "vsub.f32 d9, d22, d19 \n"// spare d17 ~ d22 "vmov d20, d5 \n" "vmov d18, d4 \n" "vadd.f32 d16, d16, d4 \n" "vmla.f32 d20, d9, %f8[0] \n" "vmov d17, d4 \n" "vmov d21, d5 \n" "vmla.f32 d18, d6, %f8[0] \n" "vadd.f32 d22, d23, d5 \n" "vmla.f32 d16, d8, %f8[1] \n" "vadd.f32 d20, d20, d7 \n" "vmla.f32 d17, d6, %e8[0] \n" "vmla.f32 d21, d7, %e8[1] \n" "vadd.f32 d18, d18, d8 \n" "vmla.f32 d22, d7, %f8[1] \n" "vadd.f32 d16, d16, d6 \n" "vadd.f32 d20, d20, d7 \n" "vmla.f32 d17, d8, %e8[1] \n" "vmla.f32 d21, d9, %e8[0] \n" "vadd.f32 d18, d18, d8 \n" "vadd.f32 d22, d22, d9 \n" "vadd.f32 d16, d16, %P9 \n"// _bias0 "vadd.f32 d20, d20, %P9 \n"// _bias0 "vadd.f32 d17, d17, %P9 \n"// _bias0 "vadd.f32 d21, d21, %P9 \n"// _bias0 "vadd.f32 d18, d18, %P9 \n"// _bias0 "vadd.f32 d22, d22, %P9 \n"// _bias0 "vtrn.f32 q8, q10 \n" "vtrn.f32 d18, d22 \n" "vst1.f32 {d16-d18}, [%0], %10 \n" "vst1.f32 {d20-d22}, [%1], %10 \n" : "=r"(output0), // %0 "=r"(output1), // %1 "=r"(t0), // %2 "=r"(t1) // %3 : "0"(output0), "1"(output1), "2"(t0), "3"(t1), "w"(_coeff), // %8 "w"(_bias0), // %9 "r"(stepw) // %10 : "memory", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); #endif // __aarch64__ #else const float* output0_tm_0 = out0_tm.row(i * w_tm/8 + j); const float* output0_tm_4 = out0_tm.row(i * w_tm/8 + j + tiles); for (int m=0; m<8; m++) { float tmp024a = output0_tm_0[1] + output0_tm_0[2]; float tmp135a = output0_tm_0[1] - output0_tm_0[2]; float tmp024b = output0_tm_0[3] + output0_tm_4[0]; float tmp135b = output0_tm_0[3] - output0_tm_4[0]; float tmp024c = output0_tm_4[1] + output0_tm_4[2]; float tmp135c = output0_tm_4[1] - output0_tm_4[2]; tmp[0][m] = output0_tm_0[0] + tmp024a + tmp024b + tmp024c * 32; tmp[2][m] = tmp024a + tmp024b * 4 + tmp024c * 8; tmp[4][m] = tmp024a + tmp024b * 16 + tmp024c + tmp024c; tmp[1][m] = tmp135a + tmp135b + tmp135b + tmp135c * 16; tmp[3][m] = tmp135a + tmp135b * 8 + tmp135c * 4; tmp[5][m] = output0_tm_4[3] + tmp135a + tmp135b * 32 + tmp135c; output0_tm_0 += out0_tm.w * tiles * 2; output0_tm_4 += out0_tm.w * tiles * 2; } float* output0 = out0.row(i * 6) + j * 6; for (int m=0; m<6; m++) { const float* tmp0 = tmp[m]; float tmp024a = tmp0[1] + tmp0[2]; float tmp135a = tmp0[1] - tmp0[2]; float tmp024b = tmp0[3] + tmp0[4]; float tmp135b = tmp0[3] - tmp0[4]; float tmp024c = tmp0[5] + tmp0[6]; float tmp135c = tmp0[5] - tmp0[6]; output0[0] = bias0 + tmp0[0] + tmp024a + tmp024b + tmp024c * 32; output0[2] = bias0 + tmp024a + tmp024b * 4 + tmp024c * 8; output0[4] = bias0 + tmp024a + tmp024b * 16 + tmp024c + tmp024c; output0[1] = bias0 + tmp135a + tmp135b + tmp135b + tmp135c * 16; output0[3] = bias0 + tmp135a + tmp135b * 8 + tmp135c * 4; output0[5] = bias0 + tmp0[7] + tmp135a + tmp135b * 32 + tmp135c; output0 += outw; } #endif // __ARM_NEON } } } } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w); } static void conv3x3s2_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int tailstep = w - 2*outw + w; const float* kernel = _kernel; const float* bias = _bias; int nn_outch = outch >> 1; int remain_outch_start = nn_outch << 1; #pragma omp parallel for for (int pp=0; pp<nn_outch; pp++) { int p = pp * 2; Mat out0 = top_blob.channel(p); Mat out1 = top_blob.channel(p+1); const float bias0 = bias ? bias[p] : 0.f; const float bias1 = bias ? bias[p+1] : 0.f; out0.fill(bias0); out1.fill(bias1); const float* k0 = kernel + p*inch*9; const float* k1 = kernel + (p+1)*inch*9; for (int q=0; q<inch; q++) { float* outptr0 = out0; float* outptr1 = out1; const float* img0 = bottom_blob.channel(q); const float* r0 = img0; const float* r1 = img0 + w; const float* r2 = img0 + w*2; #if __ARM_NEON float32x4_t _k00 = vld1q_f32(k0); float32x4_t _k03 = vld1q_f32(k0+3); float32x4_t _k06 = vld1q_f32(k0+6); float32x4_t _k10 = vld1q_f32(k1); float32x4_t _k13 = vld1q_f32(k1+3); float32x4_t _k16 = vld1q_f32(k1+6); #endif // __ARM_NEON int i = 0; for (; i < outh; i++) { #if __ARM_NEON int nn = outw >> 2; int remain = outw & 3; #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ if (nn > 0) { asm volatile( "prfm pldl1keep, [%3, #256] \n" "ld2 {v8.4s, v9.4s}, [%3], #32 \n"// v8 v9 = r0 "0: \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v6.4s}, [%1] \n"// v6 = _sum0 "fmul v12.4s, v8.4s, %12.s[0] \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v7.4s}, [%2] \n"// v7 = _sum1 "fmul v13.4s, v8.4s, %15.s[0] \n" "prfm pldl1keep, [%3, #128] \n" "ld2 {v10.4s, v11.4s}, [%3] \n"// v10 "fmla v6.4s, v9.4s, %12.s[1] \n" "ext v14.16b, v8.16b, v10.16b, #4\n" "fmla v7.4s, v9.4s, %15.s[1] \n" "prfm pldl1keep, [%4, #256] \n" "ld2 {v8.4s, v9.4s}, [%4], #32 \n"// r1 "fmla v12.4s, v14.4s, %12.s[2] \n" "fmla v13.4s, v14.4s, %15.s[2] \n" "prfm pldl1keep, [%4, #128] \n" "ld2 {v10.4s, v11.4s}, [%4] \n" "fmla v6.4s, v8.4s, %13.s[0] \n" "fmla v7.4s, v8.4s, %16.s[0] \n" "ext v14.16b, v8.16b, v10.16b, #4\n" "fmla v12.4s, v9.4s, %13.s[1] \n" "fmla v13.4s, v9.4s, %16.s[1] \n" "prfm pldl1keep, [%5, #256] \n" "ld2 {v8.4s, v9.4s}, [%5], #32 \n"// r2 "fmla v6.4s, v14.4s, %13.s[2] \n" "fmla v7.4s, v14.4s, %16.s[2] \n" "prfm pldl1keep, [%5, #128] \n" "ld2 {v10.4s, v11.4s}, [%5] \n" "fmla v12.4s, v8.4s, %14.s[0] \n" "fmla v13.4s, v8.4s, %17.s[0] \n" "ext v14.16b, v8.16b, v10.16b, #4\n" "fmla v6.4s, v9.4s, %14.s[1] \n" "fmla v7.4s, v9.4s, %17.s[1] \n" "fmla v12.4s, v14.4s, %14.s[2] \n" "fmla v13.4s, v14.4s, %17.s[2] \n" "prfm pldl1keep, [%3, #256] \n" "ld2 {v8.4s, v9.4s}, [%3], #32 \n"// v8 v9 = r0 "fadd v6.4s, v6.4s, v12.4s \n" "fadd v7.4s, v7.4s, v13.4s \n" "subs %w0, %w0, #1 \n" "st1 {v6.4s}, [%1], #16 \n" "st1 {v7.4s}, [%2], #16 \n" "bne 0b \n" "sub %3, %3, #32 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(r0), // %3 "=r"(r1), // %4 "=r"(r2) // %5 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(r0), "4"(r1), "5"(r2), "w"(_k00), // %12 "w"(_k03), // %13 "w"(_k06), // %14 "w"(_k10), // %15 "w"(_k13), // %16 "w"(_k16) // %17 : "cc", "memory", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15" ); } #else if (nn > 0) { asm volatile( "pld [%3, #256] \n" "vld2.f32 {d16-d19}, [%3]! \n"// q8 q9 = r0 "0: \n" "pld [%1, #128] \n" "vld1.f32 {d12-d13}, [%1] \n"// q6 = _sum0 "vmul.f32 q12, q8, %e12[0] \n" "pld [%2, #128] \n" "vld1.f32 {d14-d15}, [%2] \n"// q7 = _sum1 "vmul.f32 q13, q8, %e15[0] \n" "pld [%3, #128] \n" "vld2.f32 {d20-d21}, [%3] \n"// q10 "vmla.f32 q6, q9, %e12[1] \n" "vext.32 q11, q8, q10, #1 \n" "vmla.f32 q7, q9, %e15[1] \n" "pld [%4, #256] \n" "vld2.f32 {d16-d19}, [%4]! \n"// r1 "vmla.f32 q12, q11, %f12[0] \n" "vmla.f32 q13, q11, %f15[0] \n" "pld [%4, #128] \n" "vld2.f32 {d20-d21}, [%4] \n" "vmla.f32 q6, q8, %e13[0] \n" "vmla.f32 q7, q8, %e16[0] \n" "vext.32 q11, q8, q10, #1 \n" "vmla.f32 q12, q9, %e13[1] \n" "vmla.f32 q13, q9, %e16[1] \n" "pld [%5, #256] \n" "vld2.f32 {d16-d19}, [%5]! \n"// r2 "vmla.f32 q6, q11, %f13[0] \n" "vmla.f32 q7, q11, %f16[0] \n" "pld [%5, #128] \n" "vld2.f32 {d20-d21}, [%5] \n" "vmla.f32 q12, q8, %e14[0] \n" "vmla.f32 q13, q8, %e17[0] \n" "vext.32 q11, q8, q10, #1 \n" "vmla.f32 q6, q9, %e14[1] \n" "vmla.f32 q7, q9, %e17[1] \n" "vmla.f32 q12, q11, %f14[0] \n" "vmla.f32 q13, q11, %f17[0] \n" "pld [%3, #256] \n" "vld2.f32 {d16-d19}, [%3]! \n"// q8 q9 = r0 "vadd.f32 q6, q6, q12 \n" "vadd.f32 q7, q7, q13 \n" "subs %0, #1 \n" "vst1.f32 {d12-d13}, [%1]! \n" "vst1.f32 {d14-d15}, [%2]! \n" "bne 0b \n" "sub %3, #32 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(r0), // %3 "=r"(r1), // %4 "=r"(r2) // %5 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(r0), "4"(r1), "5"(r2), "w"(_k00), // %12 "w"(_k03), // %13 "w"(_k06), // %14 "w"(_k10), // %15 "w"(_k13), // %16 "w"(_k16) // %17 : "cc", "memory", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { #if __ARM_NEON float32x4_t _r00 = vld1q_f32(r0); float32x4_t _r10 = vld1q_f32(r1); float32x4_t _r20 = vld1q_f32(r2); float32x4_t _sum0 = vmulq_f32(_r00, _k00); float32x4_t _sum1 = vmulq_f32(_r00, _k10); _sum0 = vmlaq_f32(_sum0, _r10, _k03); _sum1 = vmlaq_f32(_sum1, _r10, _k13); _sum0 = vmlaq_f32(_sum0, _r20, _k06); _sum1 = vmlaq_f32(_sum1, _r20, _k16); _sum0 = vsetq_lane_f32(*outptr0, _sum0, 3); _sum1 = vsetq_lane_f32(*outptr1, _sum1, 3); #if __aarch64__ *outptr0 = vaddvq_f32(_sum0); *outptr1 = vaddvq_f32(_sum1); #else float32x2_t _ss0 = vadd_f32(vget_low_f32(_sum0), vget_high_f32(_sum0)); float32x2_t _ss1 = vadd_f32(vget_low_f32(_sum1), vget_high_f32(_sum1)); float32x2_t _ss01 = vpadd_f32(_ss0, _ss1); *outptr0 = vget_lane_f32(_ss01, 0); *outptr1 = vget_lane_f32(_ss01, 1); #endif // __aarch64__ #else float sum0 = 0.f; float sum1 = 0.f; sum0 += r0[0] * k0[0]; sum0 += r0[1] * k0[1]; sum0 += r0[2] * k0[2]; sum0 += r1[0] * k0[3]; sum0 += r1[1] * k0[4]; sum0 += r1[2] * k0[5]; sum0 += r2[0] * k0[6]; sum0 += r2[1] * k0[7]; sum0 += r2[2] * k0[8]; sum1 += r0[0] * k1[0]; sum1 += r0[1] * k1[1]; sum1 += r0[2] * k1[2]; sum1 += r1[0] * k1[3]; sum1 += r1[1] * k1[4]; sum1 += r1[2] * k1[5]; sum1 += r2[0] * k1[6]; sum1 += r2[1] * k1[7]; sum1 += r2[2] * k1[8]; *outptr0 += sum0; *outptr1 += sum1; #endif // __ARM_NEON r0 += 2; r1 += 2; r2 += 2; outptr0++; outptr1++; } r0 += tailstep; r1 += tailstep; r2 += tailstep; } k0 += 9; k1 += 9; } } #pragma omp parallel for for (int p=remain_outch_start; p<outch; p++) { Mat out = top_blob.channel(p); const float bias0 = bias ? bias[p] : 0.f; out.fill(bias0); const float* kernel0 = kernel + p*inch*9; for (int q=0; q<inch; q++) { float* outptr = out; const float* img0 = bottom_blob.channel(q); const float* r0 = img0; const float* r1 = img0 + w; const float* r2 = img0 + w*2; const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; #if __ARM_NEON float32x4_t _k0123 = vld1q_f32(k0); float32x4_t _k3456 = vld1q_f32(k1); float32x4_t _k6789 = vld1q_f32(k2); #endif // __ARM_NEON int i = 0; for (; i < outh; i++) { #if __ARM_NEON int nn = outw >> 2; int remain = outw & 3; #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ if (nn > 0) { asm volatile( "prfm pldl1keep, [%2, #256] \n" "ld2 {v2.4s, v3.4s}, [%2], #32 \n" "0: \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v0.4s}, [%1] \n" "fmla v0.4s, v2.4s, %10.s[0] \n" "fmul v10.4s, v3.4s, %10.s[1] \n" "prfm pldl1keep, [%2, #256] \n" "ld2 {v8.4s, v9.4s}, [%2] \n" "ext v1.16b, v2.16b, v8.16b, #4 \n" "fmul v11.4s, v1.4s, %10.s[2] \n" "prfm pldl1keep, [%3, #256] \n" "ld2 {v2.4s, v3.4s}, [%3], #32 \n" "fmla v0.4s, v2.4s, %11.s[0] \n" "fmla v10.4s, v3.4s, %11.s[1] \n" "prfm pldl1keep, [%3, #256] \n" "ld2 {v8.4s, v9.4s}, [%3] \n" "ext v1.16b, v2.16b, v8.16b, #4 \n" "fmla v11.4s, v1.4s, %11.s[2] \n" "prfm pldl1keep, [%4, #256] \n" "ld2 {v2.4s, v3.4s}, [%4], #32 \n" "fmla v0.4s, v2.4s, %12.s[0] \n" "fmla v10.4s, v3.4s, %12.s[1] \n" "prfm pldl1keep, [%4, #256] \n" "ld2 {v8.4s, v9.4s}, [%4] \n" "ext v1.16b, v2.16b, v8.16b, #4 \n" "fmla v11.4s, v1.4s, %12.s[2] \n" "prfm pldl1keep, [%2, #256] \n" "ld2 {v2.4s, v3.4s}, [%2], #32 \n" "fadd v0.4s, v0.4s, v10.4s \n" "fadd v0.4s, v0.4s, v11.4s \n" "subs %w0, %w0, #1 \n" "st1 {v0.4s}, [%1], #16 \n" "bne 0b \n" "sub %2, %2, #32 \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2) // %4 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "w"(_k0123), // %10 "w"(_k3456), // %11 "w"(_k6789) // %12 : "cc", "memory", "v0", "v1", "v2", "v3", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15" ); } #else if (nn > 0) { asm volatile( "pld [%2, #256] \n" "vld2.f32 {d4-d7}, [%2]! \n" "0: \n" "pld [%1, #128] \n" "vld1.f32 {d0-d1}, [%1] \n" "vmla.f32 q0, q2, %e10[0] \n" "vmul.f32 q10, q3, %e10[1] \n" "pld [%2, #128] \n" "vld2.f32 {d16-d17}, [%2] \n" "vext.32 q1, q2, q8, #1 \n" "vmul.f32 q11, q1, %f10[0] \n" "pld [%3, #256] \n" "vld2.f32 {d4-d7}, [%3]! \n" "vmla.f32 q0, q2, %e11[0] \n" "vmla.f32 q10, q3, %e11[1] \n" "pld [%3, #128] \n" "vld2.f32 {d16-d17}, [%3] \n" "vext.32 q1, q2, q8, #1 \n" "vmla.f32 q11, q1, %f11[0] \n" "pld [%4, #256] \n" "vld2.f32 {d4-d7}, [%4]! \n" "vmla.f32 q0, q2, %e12[0] \n" "vmla.f32 q10, q3, %e12[1] \n" "pld [%4, #128] \n" "vld2.f32 {d16-d17}, [%4] \n" "vext.32 q1, q2, q8, #1 \n" "vmla.f32 q11, q1, %f12[0] \n" "pld [%2, #256] \n" "vld2.f32 {d4-d7}, [%2]! \n" "vadd.f32 q0, q0, q10 \n" "vadd.f32 q0, q0, q11 \n" "subs %0, #1 \n" "vst1.f32 {d0-d1}, [%1]! \n" "bne 0b \n" "sub %2, #32 \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2) // %4 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "w"(_k0123), // %10 "w"(_k3456), // %11 "w"(_k6789) // %12 : "cc", "memory", "q0", "q1", "q2", "q3", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { #if __ARM_NEON float32x4_t _r00 = vld1q_f32(r0); float32x4_t _r10 = vld1q_f32(r1); float32x4_t _r20 = vld1q_f32(r2); float32x4_t _sum = vmulq_f32(_r00, _k0123); _sum = vmlaq_f32(_sum, _r10, _k3456); _sum = vmlaq_f32(_sum, _r20, _k6789); _sum = vsetq_lane_f32(*outptr, _sum, 3); #if __aarch64__ *outptr = vaddvq_f32(_sum); #else float32x2_t _ss = vadd_f32(vget_low_f32(_sum), vget_high_f32(_sum)); _ss = vpadd_f32(_ss, _ss); *outptr = vget_lane_f32(_ss, 0); #endif // __aarch64__ #else float sum = 0; sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; *outptr += sum; #endif // __ARM_NEON r0 += 2; r1 += 2; r2 += 2; outptr++; } r0 += tailstep; r1 += tailstep; r2 += tailstep; } kernel0 += 9; } } }
opi.c
#include <math.h> #include <stdio.h> #include <stdlib.h> #include <omp.h> int main(int argc, char **argv){ double ttime = omp_get_wtime(); long long int Ninside = 0; // number of random points inside 1/4 circle long long int Ntests = 1000000000; long long n; int Nthreads = atoi(argv[1]); omp_set_num_threads(Nthreads); struct drand48_data buff; double estpi = 0; #pragma omp parallel num_threads(Nthreads) reduction(+ : Ninside) { srand48_r(12345*omp_get_thread_num(), &buff); #pragma omp for for(n=0;n<Ntests;++n){ double x; double y; drand48_r(&buff, &x); drand48_r(&buff, &y); if(x*x+y*y<1){ ++Ninside; } } } estpi = 4.*(Ninside/(double)Ntests); double time = omp_get_wtime() - stime; printf("estPi = %lf\n", estpi); printf("time=%lf\n", time); return 0; }
libomp_interface.h
// clang-format off // This file does not contain any code; it just contains additional text and formatting // for doxygen. //===----------------------------------------------------------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// /*! @mainpage LLVM&nbsp; OpenMP* Runtime Library Interface @section sec_intro Introduction This document describes the interface provided by the LLVM &nbsp;OpenMP\other runtime library to the compiler. Routines that are directly called as simple functions by user code are not currently described here, since their definition is in the OpenMP specification available from http://openmp.org The aim here is to explain the interface from the compiler to the runtime. The overall design is described, and each function in the interface has its own description. (At least, that's the ambition, we may not be there yet). @section sec_building Quickly Building the Runtime For the impatient, we cover building the runtime as the first topic here. CMake is used to build the OpenMP runtime. For details and a full list of options for the CMake build system, see <tt>README.rst</tt> in the source code repository. These instructions will provide the most typical build. In-LLVM-tree build:. @code $ cd where-you-want-to-live Check out openmp into llvm/projects $ cd where-you-want-to-build $ mkdir build && cd build $ cmake path/to/llvm -DCMAKE_C_COMPILER=<C compiler> -DCMAKE_CXX_COMPILER=<C++ compiler> $ make omp @endcode Out-of-LLVM-tree build: @code $ cd where-you-want-to-live Check out openmp $ cd where-you-want-to-live/openmp $ mkdir build && cd build $ cmake path/to/openmp -DCMAKE_C_COMPILER=<C compiler> -DCMAKE_CXX_COMPILER=<C++ compiler> $ make @endcode @section sec_supported Supported RTL Build Configurations The architectures supported are IA-32 architecture, Intel&reg;&nbsp; 64, and Intel&reg;&nbsp; Many Integrated Core Architecture. The build configurations supported are shown in the table below. <table border=1> <tr><th> <th>icc/icl<th>gcc<th>clang <tr><td>Linux\other OS<td>Yes(1,5)<td>Yes(2,4)<td>Yes(4,6,7) <tr><td>FreeBSD\other<td>Yes(1,5)<td>Yes(2,4)<td>Yes(4,6,7,8) <tr><td>OS X\other<td>Yes(1,3,4)<td>No<td>Yes(4,6,7) <tr><td>Windows\other OS<td>Yes(1,4)<td>No<td>No </table> (1) On IA-32 architecture and Intel&reg;&nbsp; 64, icc/icl versions 12.x are supported (12.1 is recommended).<br> (2) gcc version 4.7 is supported.<br> (3) For icc on OS X\other, OS X\other version 10.5.8 is supported.<br> (4) Intel&reg;&nbsp; Many Integrated Core Architecture not supported.<br> (5) On Intel&reg;&nbsp; Many Integrated Core Architecture, icc/icl versions 13.0 or later are required.<br> (6) Clang\other version 3.3 is supported.<br> (7) Clang\other currently does not offer a software-implemented 128 bit extended precision type. Thus, all entry points reliant on this type are removed from the library and cannot be called in the user program. The following functions are not available: @code __kmpc_atomic_cmplx16_* __kmpc_atomic_float16_* __kmpc_atomic_*_fp @endcode (8) Community contribution provided AS IS, not tested by Intel. Supported Architectures: IBM(R) Power 7 and Power 8 <table border=1> <tr><th> <th>gcc<th>clang <tr><td>Linux\other OS<td>Yes(1,2)<td>Yes(3,4) </table> (1) On Power 7, gcc version 4.8.2 is supported.<br> (2) On Power 8, gcc version 4.8.2 is supported.<br> (3) On Power 7, clang version 3.7 is supported.<br> (4) On Power 8, clang version 3.7 is supported.<br> @section sec_frontend Front-end Compilers that work with this RTL The following compilers are known to do compatible code generation for this RTL: icc/icl, gcc. Code generation is discussed in more detail later in this document. @section sec_outlining Outlining The runtime interface is based on the idea that the compiler "outlines" sections of code that are to run in parallel into separate functions that can then be invoked in multiple threads. For instance, simple code like this @code void foo() { #pragma omp parallel { ... do something ... } } @endcode is converted into something that looks conceptually like this (where the names used are merely illustrative; the real library function names will be used later after we've discussed some more issues...) @code static void outlinedFooBody() { ... do something ... } void foo() { __OMP_runtime_fork(outlinedFooBody, (void*)0); // Not the real function name! } @endcode @subsection SEC_SHAREDVARS Addressing shared variables In real uses of the OpenMP\other API there are normally references from the outlined code to shared variables that are in scope in the containing function. Therefore the containing function must be able to address these variables. The runtime supports two alternate ways of doing this. @subsubsection SEC_SEC_OT Current Technique The technique currently supported by the runtime library is to receive a separate pointer to each shared variable that can be accessed from the outlined function. This is what is shown in the example below. We hope soon to provide an alternative interface to support the alternate implementation described in the next section. The alternative implementation has performance advantages for small parallel regions that have many shared variables. @subsubsection SEC_SEC_PT Future Technique The idea is to treat the outlined function as though it were a lexically nested function, and pass it a single argument which is the pointer to the parent's stack frame. Provided that the compiler knows the layout of the parent frame when it is generating the outlined function it can then access the up-level variables at appropriate offsets from the parent frame. This is a classical compiler technique from the 1960s to support languages like Algol (and its descendants) that support lexically nested functions. The main benefit of this technique is that there is no code required at the fork point to marshal the arguments to the outlined function. Since the runtime knows statically how many arguments must be passed to the outlined function, it can easily copy them to the thread's stack frame. Therefore the performance of the fork code is independent of the number of shared variables that are accessed by the outlined function. If it is hard to determine the stack layout of the parent while generating the outlined code, it is still possible to use this approach by collecting all of the variables in the parent that are accessed from outlined functions into a single `struct` which is placed on the stack, and whose address is passed to the outlined functions. In this way the offsets of the shared variables are known (since they are inside the struct) without needing to know the complete layout of the parent stack-frame. From the point of view of the runtime either of these techniques is equivalent, since in either case it only has to pass a single argument to the outlined function to allow it to access shared variables. A scheme like this is how gcc\other generates outlined functions. @section SEC_INTERFACES Library Interfaces The library functions used for specific parts of the OpenMP\other language implementation are documented in different modules. - @ref BASIC_TYPES fundamental types used by the runtime in many places - @ref DEPRECATED functions that are in the library but are no longer required - @ref STARTUP_SHUTDOWN functions for initializing and finalizing the runtime - @ref PARALLEL functions for implementing `omp parallel` - @ref THREAD_STATES functions for supporting thread state inquiries - @ref WORK_SHARING functions for work sharing constructs such as `omp for`, `omp sections` - @ref THREADPRIVATE functions to support thread private data, copyin etc - @ref SYNCHRONIZATION functions to support `omp critical`, `omp barrier`, `omp master`, reductions etc - @ref ATOMIC_OPS functions to support atomic operations - @ref STATS_GATHERING macros to support developer profiling of libomp - Documentation on tasking has still to be written... @section SEC_EXAMPLES Examples @subsection SEC_WORKSHARING_EXAMPLE Work Sharing Example This example shows the code generated for a parallel for with reduction and dynamic scheduling. @code extern float foo( void ); int main () { int i; float r = 0.0; #pragma omp parallel for schedule(dynamic) reduction(+:r) for ( i = 0; i < 10; i ++ ) { r += foo(); } } @endcode The transformed code looks like this. @code extern float foo( void ); int main () { static int zero = 0; auto int gtid; auto float r = 0.0; __kmpc_begin( & loc3, 0 ); // The gtid is not actually required in this example so could be omitted; // We show its initialization here because it is often required for calls into // the runtime and should be locally cached like this. gtid = __kmpc_global thread num( & loc3 ); __kmpc_fork call( & loc7, 1, main_7_parallel_3, & r ); __kmpc_end( & loc0 ); return 0; } struct main_10_reduction_t_5 { float r_10_rpr; }; static kmp_critical_name lck = { 0 }; static ident_t loc10; // loc10.flags should contain KMP_IDENT_ATOMIC_REDUCE bit set // if compiler has generated an atomic reduction. void main_7_parallel_3( int *gtid, int *btid, float *r_7_shp ) { auto int i_7_pr; auto int lower, upper, liter, incr; auto struct main_10_reduction_t_5 reduce; reduce.r_10_rpr = 0.F; liter = 0; __kmpc_dispatch_init_4( & loc7,*gtid, 35, 0, 9, 1, 1 ); while ( __kmpc_dispatch_next_4( & loc7, *gtid, & liter, & lower, & upper, & incr ) ) { for( i_7_pr = lower; upper >= i_7_pr; i_7_pr ++ ) reduce.r_10_rpr += foo(); } switch( __kmpc_reduce_nowait( & loc10, *gtid, 1, 4, & reduce, main_10_reduce_5, & lck ) ) { case 1: *r_7_shp += reduce.r_10_rpr; __kmpc_end_reduce_nowait( & loc10, *gtid, & lck ); break; case 2: __kmpc_atomic_float4_add( & loc10, *gtid, r_7_shp, reduce.r_10_rpr ); break; default:; } } void main_10_reduce_5( struct main_10_reduction_t_5 *reduce_lhs, struct main_10_reduction_t_5 *reduce_rhs ) { reduce_lhs->r_10_rpr += reduce_rhs->r_10_rpr; } @endcode @defgroup BASIC_TYPES Basic Types Types that are used throughout the runtime. @defgroup DEPRECATED Deprecated Functions Functions in this group are for backwards compatibility only, and should not be used in new code. @defgroup STARTUP_SHUTDOWN Startup and Shutdown These functions are for library initialization and shutdown. @defgroup PARALLEL Parallel (fork/join) These functions are used for implementing <tt>\#pragma omp parallel</tt>. @defgroup THREAD_STATES Thread Information These functions return information about the currently executing thread. @defgroup WORK_SHARING Work Sharing These functions are used for implementing <tt>\#pragma omp for</tt>, <tt>\#pragma omp sections</tt>, <tt>\#pragma omp single</tt> and <tt>\#pragma omp master</tt> constructs. When handling loops, there are different functions for each of the signed and unsigned 32 and 64 bit integer types which have the name suffixes `_4`, `_4u`, `_8` and `_8u`. The semantics of each of the functions is the same, so they are only described once. Static loop scheduling is handled by @ref __kmpc_for_static_init_4 and friends. Only a single call is needed, since the iterations to be executed by any give thread can be determined as soon as the loop parameters are known. Dynamic scheduling is handled by the @ref __kmpc_dispatch_init_4 and @ref __kmpc_dispatch_next_4 functions. The init function is called once in each thread outside the loop, while the next function is called each time that the previous chunk of work has been exhausted. @defgroup SYNCHRONIZATION Synchronization These functions are used for implementing barriers. @defgroup THREADPRIVATE Thread private data support These functions support copyin/out and thread private data. @defgroup STATS_GATHERING Statistics Gathering from OMPTB These macros support profiling the libomp library. Use --stats=on when building with build.pl to enable and then use the KMP_* macros to profile (through counts or clock ticks) libomp during execution of an OpenMP program. @section sec_stats_env_vars Environment Variables This section describes the environment variables relevant to stats-gathering in libomp @code KMP_STATS_FILE @endcode This environment variable is set to an output filename that will be appended *NOT OVERWRITTEN* if it exists. If this environment variable is undefined, the statistics will be output to stderr @code KMP_STATS_THREADS @endcode This environment variable indicates to print thread-specific statistics as well as aggregate statistics. Each thread's statistics will be shown as well as the collective sum of all threads. The values "true", "on", "1", "yes" will all indicate to print per thread statistics. @defgroup TASKING Tasking support These functions support tasking constructs. @defgroup USER User visible functions These functions can be called directly by the user, but are runtime library specific, rather than being OpenMP interfaces. */
adjointadvection.h
//***************************************************************************** // Title : src/equation/adjointadvection.h // Author : Tanabe Yuta // Date : 2021/08/03 // Copyright : (C)2021 TanabeYuta //***************************************************************************** #pragma once #include <cmath> #include "adjointnavierstokes.h" #ifdef _USE_AVX_DEFINES #include "../equation_avx/adjointadvection_avx.h" #endif namespace { const int SetT = 1; const int SetQ = 2; } namespace PANSLBM2 { namespace AAD { // Function of updating macroscopic values of AAD for 2D template<class T, template<class>class Q> void Macro(T &_item, T &_iqx, T &_iqy, const T *_g0, const T *_g, int _idx) { _item = Q<T>::ei[0]*_g0[_idx]; _iqx = T(); _iqy = T(); for (int c = 1; c <Q<T>::nc; ++c) { T gei = Q<T>::ei[c]*_g[Q<T>::IndexF(_idx, c)]; _item += gei; _iqx += gei*Q<T>::cx[c]; _iqy += gei*Q<T>::cy[c]; } } // Function of updating macroscopic values of AAD for 3D template<class T, template<class>class Q> void Macro(T &_item, T &_iqx, T &_iqy, T &_iqz, const T *_g0, const T *_g, int _idx) { _item = Q<T>::ei[0]*_g0[_idx]; _iqx = T(); _iqy = T(); _iqz = T(); for (int c = 1; c <Q<T>::nc; ++c) { T gei = Q<T>::ei[c]*_g[Q<T>::IndexF(_idx, c)]; _item += gei; _iqx += gei*Q<T>::cx[c]; _iqy += gei*Q<T>::cy[c]; _iqz += gei*Q<T>::cz[c]; } } // Function of getting equilibrium of AAD for 2D template<class T, template<class>class Q> void Equilibrium(T *_geq, T _item, T _iqx, T _iqy, T _ux, T _uy) { T coef = _item + 3.0*(_ux*_iqx + _uy*_iqy); for (int c = 0; c < Q<T>::nc; ++c) { _geq[c] = coef; } } // Function of getting equilibrium of AAD for 3D template<class T, template<class>class Q> void Equilibrium(T *_geq, T _item, T _iqx, T _iqy, T _iqz, T _ux, T _uy, T _uz) { T coef = _item + 3.0*(_ux*_iqx + _uy*_iqy + _uz*_iqz); for (int c = 0; c < Q<T>::nc; ++c) { _geq[c] = coef; } } // Function of applying external force with Brinkman model and advection of AAD for 2D template<class T, template<class>class P> void ExternalForceBrinkman(T _rho, T _ux, T _uy, T _imx, T _imy, T _tem, T _iqx, T _iqy, T _omegag, T *_f0, T *_f, T _alpha, int _idx) { T coef = 3.0/(_rho + _alpha); _f0[_idx] += coef*( -_ux*(_tem*_iqx*_omegag - _alpha*_imx) + -_uy*(_tem*_iqy*_omegag - _alpha*_imy) ); for (int c = 1; c < P<T>::nc; ++c) { _f[P<T>::IndexF(_idx, c)] += coef*( (P<T>::cx[c] - _ux)*(_tem*_iqx*_omegag - _alpha*_imx) + (P<T>::cy[c] - _uy)*(_tem*_iqy*_omegag - _alpha*_imy) ); } } // Function of applying external force with Brinkman model and advection of AAD for 3D template<class T, template<class>class P> void ExternalForceBrinkman(T _rho, T _ux, T _uy, T _uz, T _imx, T _imy, T _imz, T _tem, T _iqx, T _iqy, T _iqz, T _omegag, T *_f0, T *_f, T _alpha, int _idx) { T coef = 3.0/(_rho + _alpha); _f0[_idx] += coef*( -_ux*(_tem*_iqx*_omegag - _alpha*_imx) + -_uy*(_tem*_iqy*_omegag - _alpha*_imy) + -_uz*(_tem*_iqz*_omegag - _alpha*_imz) ); for (int c = 1; c < P<T>::nc; ++c) { _f[P<T>::IndexF(_idx, c)] += coef*( (P<T>::cx[c] - _ux)*(_tem*_iqx*_omegag - _alpha*_imx) + (P<T>::cy[c] - _uy)*(_tem*_iqy*_omegag - _alpha*_imy) + (P<T>::cz[c] - _uz)*(_tem*_iqz*_omegag - _alpha*_imz) ); } } // Function of applying external force with heat exchange of AAD for 2D/3D template<class T, template<class>class Q> void ExternalForceHeatExchange(T _item, T *_g0, T *_g, T _beta, int _idx) { T coef = _beta*(1.0 + _item)/(1.0 + _beta); _g0[_idx] -= coef; for (int c = 1; c < Q<T>::nc; ++c) { _g[Q<T>::IndexF(_idx, c)] -= coef; } } // Function of applying external force with natural convection of AAD for 2D template<class T, template<class>class Q> void ExternalForceNaturalConvection(T _imx, T _imy, T _gx, T _gy, T *_g0, T *_g, int _idx) { T coef = 3.0*(_imx*_gx + _imy*_gy); _g0[_idx] += coef; for (int c = 1; c < Q<T>::nc; ++c) { _g[Q<T>::IndexF(_idx, c)] += coef; } } // Function of applying external force with natural convection of AAD for 3D template<class T, template<class>class Q> void ExternalForceNaturalConvection(T _imx, T _imy, T _imz, T _gx, T _gy, T _gz, T *_g0, T *_g, int _idx) { T coef = 3.0*(_imx*_gx + _imy*_gy + _imz*_gz); _g0[_idx] += coef; for (int c = 1; c < Q<T>::nc; ++c) { _g[Q<T>::IndexF(_idx, c)] += coef; } } // Function of Update macro, External force(Brinkman, Heat exchange) and Collide of AAD for 2D template<class T, template<class>class P, template<class>class Q> void MacroBrinkmanCollideHeatExchange( P<T>& _p, const T *_rho, const T *_ux, const T *_uy, T *_ip, T *_iux, T *_iuy, T *_imx, T *_imy, const T *_alpha, T _viscosity, Q<T>& _q, const T *_tem, T *_item, T *_iqx, T *_iqy, const T *_beta, T _diffusivity, bool _issave = false ) { T omegaf = 1.0/(3.0*_viscosity + 0.5), iomegaf = 1.0 - omegaf, feq[P<T>::nc]; T omegag = 1.0/(3.0*_diffusivity + 0.5), iomegag = 1.0 - omegag, geq[Q<T>::nc]; #pragma omp parallel for private(feq, geq) for (int idx = 0; idx < _p.nxyz; ++idx) { // Update macro T ip, iux, iuy, imx, imy; ANS::Macro<T, P>(ip, iux, iuy, imx, imy, _rho[idx], _ux[idx], _uy[idx], _p.f0, _p.f, idx); T item, iqx, iqy; Macro<T, Q>(item, iqx, iqy, _q.f0, _q.f, idx); // External force with Brinkman model ExternalForceBrinkman<T, P>(_rho[idx], _ux[idx], _uy[idx], imx, imy, _tem[idx], iqx, iqy, omegag, _p.f0, _p.f, _alpha[idx], idx); ANS::Macro<T, P>(ip, iux, iuy, imx, imy, _rho[idx], _ux[idx], _uy[idx], _p.f0, _p.f, idx); ExternalForceHeatExchange<T, Q>(item, _q.f0, _q.f, _beta[idx], idx); Macro<T, Q>(item, iqx, iqy, _q.f0, _q.f, idx); // Save macro if need if (_issave) { _ip[idx] = ip; _iux[idx] = iux; _iuy[idx] = iuy; _imx[idx] = imx; _imy[idx] = imy; _item[idx] = item; _iqx[idx] = iqx; _iqy[idx] = iqy; } // Collide ANS::Equilibrium<T, P>(feq, _ux[idx], _uy[idx], ip, iux, iuy); _p.f0[idx] = iomegaf*_p.f0[idx] + omegaf*feq[0]; for (int c = 1; c < P<T>::nc; ++c) { int idxf = P<T>::IndexF(idx, c); _p.f[idxf] = iomegaf*_p.f[idxf] + omegaf*feq[c]; } Equilibrium<T, Q>(geq, item, iqx, iqy, _ux[idx], _uy[idx]); _q.f0[idx] = iomegag*_q.f0[idx] + omegag*geq[0]; for (int c = 1; c < Q<T>::nc; ++c) { int idxf = Q<T>::IndexF(idx, c); _q.f[idxf] = iomegag*_q.f[idxf] + omegag*geq[c]; } } } // Function of Update macro, External force(Brinkman, Heat exchange) and Collide of AAD for 3D template<class T, template<class>class P, template<class>class Q> void MacroBrinkmanCollideHeatExchange( P<T>& _p, const T *_rho, const T *_ux, const T *_uy, const T *_uz, T *_ip, T *_iux, T *_iuy, T *_iuz, T *_imx, T *_imy, T *_imz, const T *_alpha, T _viscosity, Q<T>& _q, const T *_tem, T *_item, T *_iqx, T *_iqy, T *_iqz, const T *_beta, T _diffusivity, bool _issave = false ) { T omegaf = 1.0/(3.0*_viscosity + 0.5), iomegaf = 1.0 - omegaf, feq[P<T>::nc]; T omegag = 1.0/(3.0*_diffusivity + 0.5), iomegag = 1.0 - omegag, geq[Q<T>::nc]; #pragma omp parallel for private(feq, geq) for (int idx = 0; idx < _p.nxyz; ++idx) { // Update macro T ip, iux, iuy, iuz, imx, imy, imz; ANS::Macro<T, P>(ip, iux, iuy, iuz, imx, imy, imz, _rho[idx], _ux[idx], _uy[idx], _uz[idx], _p.f0, _p.f, idx); T item, iqx, iqy, iqz; Macro<T, Q>(item, iqx, iqy, iqz, _q.f0, _q.f, idx); // External force with Brinkman model ExternalForceBrinkman<T, P>(_rho[idx], _ux[idx], _uy[idx], _uz[idx], imx, imy, imz, _tem[idx], iqx, iqy, iqz, omegag, _p.f0, _p.f, _alpha[idx], idx); ANS::Macro<T, P>(ip, iux, iuy, iuz, imx, imy, imz, _rho[idx], _ux[idx], _uy[idx], _uz[idx], _p.f0, _p.f, idx); ExternalForceHeatExchange<T, Q>(item, _q.f0, _q.f, _beta[idx], idx); Macro<T, Q>(item, iqx, iqy, iqz, _q.f0, _q.f, idx); // Save macro if need if (_issave) { _ip[idx] = ip; _iux[idx] = iux; _iuy[idx] = iuy; _iuz[idx] = iuz; _imx[idx] = imx; _imy[idx] = imy; _imz[idx] = imz; _item[idx] = item; _iqx[idx] = iqx; _iqy[idx] = iqy; _iqz[idx] = iqz; } // Collide ANS::Equilibrium<T, P>(feq, _ux[idx], _uy[idx], _uz[idx], ip, iux, iuy, iuz); _p.f0[idx] = iomegaf*_p.f0[idx] + omegaf*feq[0]; for (int c = 1; c < P<T>::nc; ++c) { int idxf = P<T>::IndexF(idx, c); _p.f[idxf] = iomegaf*_p.f[idxf] + omegaf*feq[c]; } Equilibrium<T, Q>(geq, item, iqx, iqy, iqz, _ux[idx], _uy[idx], _uz[idx]); _q.f0[idx] = iomegag*_q.f0[idx] + omegag*geq[0]; for (int c = 1; c < Q<T>::nc; ++c) { int idxf = Q<T>::IndexF(idx, c); _q.f[idxf] = iomegag*_q.f[idxf] + omegag*geq[c]; } } } // Function of Update macro and Collide of AAD for 2D template<class T, template<class>class P, template<class>class Q> void MacroBrinkmanCollideForceConvection( P<T>& _p, const T *_rho, const T *_ux, const T *_uy, T *_ip, T *_iux, T *_iuy, T *_imx, T *_imy, const T *_alpha, T _viscosity, Q<T>& _q, const T *_tem, T *_item, T *_iqx, T *_iqy, const T *_diffusivity, bool _issave = false, T *_ig = nullptr ) { T omegaf = 1.0/(3.0*_viscosity + 0.5), iomegaf = 1.0 - omegaf, feq[P<T>::nc], geq[Q<T>::nc]; #pragma omp parallel for private(feq, geq) for (int idx = 0; idx < _p.nxyz; ++idx) { T omegag = 1.0/(3.0*_diffusivity[idx] + 0.5), iomegag = 1.0 - omegag; // Update macro T ip, iux, iuy, imx, imy; ANS::Macro<T, P>(ip, iux, iuy, imx, imy, _rho[idx], _ux[idx], _uy[idx], _p.f0, _p.f, idx); T item, iqx, iqy; Macro<T, Q>(item, iqx, iqy, _q.f0, _q.f, idx); // External force with Brinkman model ExternalForceBrinkman<T, P>(_rho[idx], _ux[idx], _uy[idx], imx, imy, _tem[idx], iqx, iqy, omegag, _p.f0, _p.f, _alpha[idx], idx); ANS::Macro<T, P>(ip, iux, iuy, imx, imy, _rho[idx], _ux[idx], _uy[idx], _p.f0, _p.f, idx); // Save macro if need if (_issave) { _ip[idx] = ip; _iux[idx] = iux; _iuy[idx] = iuy; _imx[idx] = imx; _imy[idx] = imy; _item[idx] = item; _iqx[idx] = iqx; _iqy[idx] = iqy; if (_ig) { int offsetf = Q<T>::nc*idx; _ig[offsetf] = _q.f0[idx]; for (int c = 1; c < Q<T>::nc; ++c) { _ig[offsetf + c] = _q.f[Q<T>::IndexF(idx, c)]; } } } // Collide ANS::Equilibrium<T, P>(feq, _ux[idx], _uy[idx], ip, iux, iuy); _p.f0[idx] = iomegaf*_p.f0[idx] + omegaf*feq[0]; for (int c = 1; c < P<T>::nc; ++c) { int idxf = P<T>::IndexF(idx, c); _p.f[idxf] = iomegaf*_p.f[idxf] + omegaf*feq[c]; } Equilibrium<T, Q>(geq, item, iqx, iqy, _ux[idx], _uy[idx]); _q.f0[idx] = iomegag*_q.f0[idx] + omegag*geq[0]; for (int c = 1; c < Q<T>::nc; ++c) { int idxf = Q<T>::IndexF(idx, c); _q.f[idxf] = iomegag*_q.f[idxf] + omegag*geq[c]; } } } // Function of Update macro and Collide of AAD for 3D template<class T, template<class>class P, template<class>class Q> void MacroBrinkmanCollideForceConvection( P<T>& _p, const T *_rho, const T *_ux, const T *_uy, const T *_uz, T *_ip, T *_iux, T *_iuy, T *_iuz, T *_imx, T *_imy, T *_imz, const T *_alpha, T _viscosity, Q<T>& _q, const T *_tem, T *_item, T *_iqx, T *_iqy, T *_iqz, const T *_diffusivity, bool _issave = false, T *_ig = nullptr ) { T omegaf = 1.0/(3.0*_viscosity + 0.5), iomegaf = 1.0 - omegaf, feq[P<T>::nc], geq[Q<T>::nc]; #pragma omp parallel for private(feq, geq) for (int idx = 0; idx < _p.nxyz; ++idx) { T omegag = 1.0/(3.0*_diffusivity[idx] + 0.5), iomegag = 1.0 - omegag; // Update macro T ip, iux, iuy, iuz, imx, imy, imz; ANS::Macro<T, P>(ip, iux, iuy, iuz, imx, imy, imz, _rho[idx], _ux[idx], _uy[idx], _uz[idx], _p.f0, _p.f, idx); T item, iqx, iqy, iqz; Macro<T, Q>(item, iqx, iqy, iqz, _q.f0, _q.f, idx); // External force with Brinkman model ExternalForceBrinkman<T, P>(_rho[idx], _ux[idx], _uy[idx], _uz[idx], imx, imy, imz, _tem[idx], iqx, iqy, iqz, omegag, _p.f0, _p.f, _alpha[idx], idx); ANS::Macro<T, P>(ip, iux, iuy, iuz, imx, imy, imz, _rho[idx], _ux[idx], _uy[idx], _uz[idx], _p.f0, _p.f, idx); // Save macro if need if (_issave) { _ip[idx] = ip; _iux[idx] = iux; _iuy[idx] = iuy; _imx[idx] = imx; _imy[idx] = imy; _imz[idx] = imz; _item[idx] = item; _iqx[idx] = iqx; _iqy[idx] = iqy; _iqz[idx] = iqz; if (_ig) { int offsetf = Q<T>::nc*idx; _ig[offsetf] = _q.f0[idx]; for (int c = 1; c < Q<T>::nc; ++c) { _ig[offsetf + c] = _q.f[Q<T>::IndexF(idx, c)]; } } } // Collide ANS::Equilibrium<T, P>(feq, _ux[idx], _uy[idx], _uz[idx], ip, iux, iuy, iuz); _p.f0[idx] = iomegaf*_p.f0[idx] + omegaf*feq[0]; for (int c = 1; c < P<T>::nc; ++c) { int idxf = P<T>::IndexF(idx, c); _p.f[idxf] = iomegaf*_p.f[idxf] + omegaf*feq[c]; } Equilibrium<T, Q>(geq, item, iqx, iqy, iqz, _ux[idx], _uy[idx], _uz[idx]); _q.f0[idx] = iomegag*_q.f0[idx] + omegag*geq[0]; for (int c = 1; c < Q<T>::nc; ++c) { int idxf = Q<T>::IndexF(idx, c); _q.f[idxf] = iomegag*_q.f[idxf] + omegag*geq[c]; } } } // Function of Update macro and Collide of AAD for 2D template<class T, template<class>class P, template<class>class Q> void MacroBrinkmanCollideNaturalConvection( P<T>& _p, const T *_rho, const T *_ux, const T *_uy, T *_ip, T *_iux, T *_iuy, T *_imx, T *_imy, const T *_alpha, T _viscosity, Q<T>& _q, const T *_tem, T *_item, T *_iqx, T *_iqy, const T *_diffusivity, T _gx, T _gy, bool _issave = false, T *_ig = nullptr ) { T omegaf = 1.0/(3.0*_viscosity + 0.5), iomegaf = 1.0 - omegaf, feq[P<T>::nc], geq[Q<T>::nc]; #pragma omp parallel for private(feq, geq) for (int idx = 0; idx < _p.nxyz; ++idx) { T omegag = 1.0/(3.0*_diffusivity[idx] + 0.5), iomegag = 1.0 - omegag; // Update macro T ip, iux, iuy, imx, imy; ANS::Macro<T, P>(ip, iux, iuy, imx, imy, _rho[idx], _ux[idx], _uy[idx], _p.f0, _p.f, idx); T item, iqx, iqy; Macro<T, Q>(item, iqx, iqy, _q.f0, _q.f, idx); // External force with Brinkman model ExternalForceBrinkman<T, P>(_rho[idx], _ux[idx], _uy[idx], imx, imy, _tem[idx], iqx, iqy, omegag, _p.f0, _p.f, _alpha[idx], idx); ANS::Macro<T, P>(ip, iux, iuy, imx, imy, _rho[idx], _ux[idx], _uy[idx], _p.f0, _p.f, idx); ExternalForceNaturalConvection<T, Q>(imx, imy, _gx, _gy, _q.f0, _q.f, idx); Macro<T, Q>(item, iqx, iqy, _q.f0, _q.f, idx); // Save macro if need if (_issave) { _ip[idx] = ip; _iux[idx] = iux; _iuy[idx] = iuy; _imx[idx] = imx; _imy[idx] = imy; _item[idx] = item; _iqx[idx] = iqx; _iqy[idx] = iqy; if (_ig) { int offsetf = Q<T>::nc*idx; _ig[offsetf] = _q.f0[idx]; for (int c = 1; c < Q<T>::nc; ++c) { _ig[offsetf + c] = _q.f[Q<T>::IndexF(idx, c)]; } } } // Collide ANS::Equilibrium<T, P>(feq, _ux[idx], _uy[idx], ip, iux, iuy); _p.f0[idx] = iomegaf*_p.f0[idx] + omegaf*feq[0]; for (int c = 1; c < P<T>::nc; ++c) { int idxf = P<T>::IndexF(idx, c); _p.f[idxf] = iomegaf*_p.f[idxf] + omegaf*feq[c]; } Equilibrium<T, Q>(geq, item, iqx, iqy, _ux[idx], _uy[idx]); _q.f0[idx] = iomegag*_q.f0[idx] + omegag*geq[0]; for (int c = 1; c < Q<T>::nc; ++c) { int idxf = Q<T>::IndexF(idx, c); _q.f[idxf] = iomegag*_q.f[idxf] + omegag*geq[c]; } } } // Function of Update macro and Collide of AAD for 3D template<class T, template<class>class P, template<class>class Q> void MacroBrinkmanCollideNaturalConvection( P<T>& _p, const T *_rho, const T *_ux, const T *_uy, const T *_uz, T *_ip, T *_iux, T *_iuy, T *_iuz, T *_imx, T *_imy, T *_imz, const T *_alpha, T _viscosity, Q<T>& _q, const T *_tem, T *_item, T *_iqx, T *_iqy, T *_iqz, const T *_diffusivity, T _gx, T _gy, T _gz, bool _issave = false, T *_ig = nullptr ) { T omegaf = 1.0/(3.0*_viscosity + 0.5), iomegaf = 1.0 - omegaf, feq[P<T>::nc], geq[Q<T>::nc]; #pragma omp parallel for private(feq, geq) for (int idx = 0; idx < _p.nxyz; ++idx) { T omegag = 1.0/(3.0*_diffusivity[idx] + 0.5), iomegag = 1.0 - omegag; // Update macro T ip, iux, iuy, iuz, imx, imy, imz; ANS::Macro<T, P>(ip, iux, iuy, iuz, imx, imy, imz, _rho[idx], _ux[idx], _uy[idx], _uz[idx], _p.f0, _p.f, idx); T item, iqx, iqy, iqz; Macro<T, Q>(item, iqx, iqy, iqz, _q.f0, _q.f, idx); // External force with Brinkman model ExternalForceBrinkman<T, P>(_rho[idx], _ux[idx], _uy[idx], _uz[idx], imx, imy, imz, _tem[idx], iqx, iqy, iqz, omegag, _p.f0, _p.f, _alpha[idx], idx); ANS::Macro<T, P>(ip, iux, iuy, iuz, imx, imy, imz, _rho[idx], _ux[idx], _uy[idx], _uz[idx], _p.f0, _p.f, idx); ExternalForceNaturalConvection<T, Q>(imx, imy, imz, _gx, _gy, _gz, _q.f0, _q.f, idx); Macro<T, Q>(item, iqx, iqy, iqz, _q.f0, _q.f, idx); // Save macro if need if (_issave) { _ip[idx] = ip; _iux[idx] = iux; _iuy[idx] = iuy; _iuz[idx] = iuz; _imx[idx] = imx; _imy[idx] = imy; _imz[idx] = imz; _item[idx] = item; _iqx[idx] = iqx; _iqy[idx] = iqy; _iqz[idx] = iqz; if (_ig) { int offsetf = Q<T>::nc*idx; _ig[offsetf] = _q.f0[idx]; for (int c = 1; c < Q<T>::nc; ++c) { _ig[offsetf + c] = _q.f[Q<T>::IndexF(idx, c)]; } } } // Collide ANS::Equilibrium<T, P>(feq, _ux[idx], _uy[idx], _uz[idx], ip, iux, iuy, iuz); _p.f0[idx] = iomegaf*_p.f0[idx] + omegaf*feq[0]; for (int c = 1; c < P<T>::nc; ++c) { int idxf = P<T>::IndexF(idx, c); _p.f[idxf] = iomegaf*_p.f[idxf] + omegaf*feq[c]; } Equilibrium<T, Q>(geq, item, iqx, iqy, iqz, _ux[idx], _uy[idx], _uz[idx]); _q.f0[idx] = iomegag*_q.f0[idx] + omegag*geq[0]; for (int c = 1; c < Q<T>::nc; ++c) { int idxf = Q<T>::IndexF(idx, c); _q.f[idxf] = iomegag*_q.f[idxf] + omegag*geq[c]; } } } // Function of setting initial condition of AAD for 2D template<class T, template<class>class Q> void InitialCondition(Q<T>& _q, const T *_ux, const T *_uy, const T *_item, const T *_iqx, const T *_iqy) { T geq[Q<T>::nc]; for (int idx = 0; idx < _q.nxyz; ++idx) { Equilibrium<T, Q>(geq, _item[idx], _iqx[idx], _iqy[idx], _ux[idx], _uy[idx]); _q.f0[idx] = geq[0]; for (int c = 1; c < Q<T>::nc; ++c) { _q.f[Q<T>::IndexF(idx, c)] = geq[c]; } } } // Function of setting initial condition of AAD for 3D template<class T, template<class>class Q> void InitialCondition(Q<T>& _q, const T *_ux, const T *_uy, const T *_uz, const T *_item, const T *_iqx, const T *_iqy, const T *_iqz) { T geq[Q<T>::nc]; for (int idx = 0; idx < _q.nxyz; ++idx) { Equilibrium<T, Q>(geq, _item[idx], _iqx[idx], _iqy[idx], _iqz[idx], _ux[idx], _uy[idx], _uz[idx]); _q.f0[idx] = geq[0]; for (int c = 1; c < Q<T>::nc; ++c) { _q.f[Q<T>::IndexF(idx, c)] = geq[c]; } } } // Function of setting boundary condition set iT of AAD for D2Q9 template<class T, template<class>class Q, class Ff> void iBoundaryConditionSetT(Q<T>& _q, const T *_ux, const T *_uy, Ff _bctype) { // On xmin if (_q.PEx == 0) { for (int j = 0; j < _q.ny; ++j) { if (_bctype(0 + _q.offsetx, j + _q.offsety)) { int idx = _q.Index(0, j); T rho0 = -(4.0*(1.0 + 3.0*_ux[idx])*_q.f[Q<T>::IndexF(idx, 1)] + (1.0 + 3.0*_ux[idx] + 3.0*_uy[idx])*_q.f[Q<T>::IndexF(idx, 5)] + (1.0 + 3.0*_ux[idx] - 3.0*_uy[idx])*_q.f[Q<T>::IndexF(idx, 8)])/(6.0*(1.0 + 3.0*_ux[idx])); _q.f[Q<T>::IndexF(idx, 3)] = rho0; _q.f[Q<T>::IndexF(idx, 6)] = rho0; _q.f[Q<T>::IndexF(idx, 7)] = rho0; } } } // On xmax if (_q.PEx == _q.mx - 1) { for (int j = 0; j < _q.ny; ++j) { if (_bctype((_q.nx - 1) + _q.offsetx, j + _q.offsety)) { int idx = _q.Index(_q.nx - 1, j); T rho0 = -(4.0*(1.0 - 3.0*_ux[idx])*_q.f[Q<T>::IndexF(idx, 3)] + (1.0 - 3.0*_ux[idx] + 3.0*_uy[idx])*_q.f[Q<T>::IndexF(idx, 6)] + (1.0 - 3.0*_ux[idx] - 3.0*_uy[idx])*_q.f[Q<T>::IndexF(idx, 7)])/(6.0*(1.0 - 3.0*_ux[idx])); _q.f[Q<T>::IndexF(idx, 1)] = rho0; _q.f[Q<T>::IndexF(idx, 5)] = rho0; _q.f[Q<T>::IndexF(idx, 8)] = rho0; } } } // On ymin if (_q.PEy == 0) { for (int i = 0; i < _q.nx; ++i) { if (_bctype(i + _q.offsetx, 0 + _q.offsety)) { int idx = _q.Index(i, 0); T rho0 = -(4.0*(1.0 + 3.0*_uy[idx])*_q.f[Q<T>::IndexF(idx, 2)] + (1.0 + 3.0*_ux[idx] + 3.0*_uy[idx])*_q.f[Q<T>::IndexF(idx, 5)] + (1.0 - 3.0*_ux[idx] + 3.0*_uy[idx])*_q.f[Q<T>::IndexF(idx, 6)])/(6.0*(1.0 + 3.0*_uy[idx])); _q.f[Q<T>::IndexF(idx, 4)] = rho0; _q.f[Q<T>::IndexF(idx, 7)] = rho0; _q.f[Q<T>::IndexF(idx, 8)] = rho0; } } } // On ymax if (_q.PEy == _q.my - 1) { for (int i = 0; i < _q.nx; ++i) { if (_bctype(i + _q.offsetx, (_q.ny - 1) + _q.offsety)) { int idx = _q.Index(i, _q.ny - 1); T rho0 = -(4.0*(1.0 - 3.0*_uy[idx])*_q.f[Q<T>::IndexF(idx, 4)] + (1.0 - 3.0*_ux[idx] - 3.0*_uy[idx])*_q.f[Q<T>::IndexF(idx, 7)] + (1.0 + 3.0*_ux[idx] - 3.0*_uy[idx])*_q.f[Q<T>::IndexF(idx, 8)])/(6.0*(1.0 - 3.0*_uy[idx])); _q.f[Q<T>::IndexF(idx, 2)] = rho0; _q.f[Q<T>::IndexF(idx, 5)] = rho0; _q.f[Q<T>::IndexF(idx, 6)] = rho0; } } } } // Function of setting boundary condition set iT of AAD for D3Q15 template<class T, template<class>class Q, class Ff> void iBoundaryConditionSetT(Q<T>& _q, const T *_ux, const T *_uy, const T *_uz, Ff _bctype) { // On xmin if (_q.PEx == 0) { for (int j = 0; j < _q.ny; ++j) { for (int k = 0; k < _q.nz; ++k) { if (_bctype(0 + _q.offsetx, j + _q.offsety, k + _q.offsetz)) { int idx = _q.Index(0, j, k); T rho0 = -(8.0*_q.f[Q<T>::IndexF(idx, 1)] + _q.f[Q<T>::IndexF(idx, 7)] + _q.f[Q<T>::IndexF(idx, 9)] + _q.f[Q<T>::IndexF(idx, 10)] + _q.f[Q<T>::IndexF(idx, 12)])/12.0 -_uy[idx]*(_q.f[Q<T>::IndexF(idx, 7)] - _q.f[Q<T>::IndexF(idx, 9)] + _q.f[Q<T>::IndexF(idx, 10)] - _q.f[Q<T>::IndexF(idx, 12)])/(4.0*(1.0 + 3.0*_ux[idx])) -_uz[idx]*(_q.f[Q<T>::IndexF(idx, 7)] + _q.f[Q<T>::IndexF(idx, 9)] - _q.f[Q<T>::IndexF(idx, 10)] - _q.f[Q<T>::IndexF(idx, 12)])/(4.0*(1.0 + 3.0*_ux[idx])); _q.f[Q<T>::IndexF(idx, 4)] = rho0; _q.f[Q<T>::IndexF(idx, 8)] = rho0; _q.f[Q<T>::IndexF(idx, 11)] = rho0; _q.f[Q<T>::IndexF(idx, 13)] = rho0; _q.f[Q<T>::IndexF(idx, 14)] = rho0; } } } } // On xmax if (_q.PEx == _q.mx - 1) { for (int j = 0; j < _q.ny; ++j) { for (int k = 0; k < _q.nz; ++k) { if (_bctype((_q.nx - 1) + _q.offsetx, j + _q.offsety, k + _q.offsetz)) { int idx = _q.Index(_q.nx - 1, j, k); T rho0 = -(8.0*_q.f[Q<T>::IndexF(idx, 4)] + _q.f[Q<T>::IndexF(idx, 8)] + _q.f[Q<T>::IndexF(idx, 11)] + _q.f[Q<T>::IndexF(idx, 13)] + _q.f[Q<T>::IndexF(idx, 14)])/12.0 -_uy[idx]*(_q.f[Q<T>::IndexF(idx, 8)] - _q.f[Q<T>::IndexF(idx, 11)] + _q.f[Q<T>::IndexF(idx, 13)] - _q.f[Q<T>::IndexF(idx, 14)])/(4.0*(1.0 - 3.0*_ux[idx])) -_uz[idx]*(_q.f[Q<T>::IndexF(idx, 8)] - _q.f[Q<T>::IndexF(idx, 11)] - _q.f[Q<T>::IndexF(idx, 13)] + _q.f[Q<T>::IndexF(idx, 14)])/(4.0*(1.0 - 3.0*_ux[idx])); _q.f[Q<T>::IndexF(idx, 1)] = rho0; _q.f[Q<T>::IndexF(idx, 7)] = rho0; _q.f[Q<T>::IndexF(idx, 9)] = rho0; _q.f[Q<T>::IndexF(idx, 10)] = rho0; _q.f[Q<T>::IndexF(idx, 12)] = rho0; } } } } // On ymin if (_q.PEy == 0) { for (int k = 0; k < _q.nz; ++k) { for (int i = 0; i < _q.nx; ++i) { if (_bctype(i + _q.offsetx, 0 + _q.offsety, k + _q.offsetz)) { int idx = _q.Index(i, 0, k); T rho0 = -(8.0*_q.f[Q<T>::IndexF(idx, 2)] + _q.f[Q<T>::IndexF(idx, 7)] + _q.f[Q<T>::IndexF(idx, 8)] + _q.f[Q<T>::IndexF(idx, 10)] + _q.f[Q<T>::IndexF(idx, 13)])/12.0 -_uz[idx]*(_q.f[Q<T>::IndexF(idx, 7)] + _q.f[Q<T>::IndexF(idx, 8)] - _q.f[Q<T>::IndexF(idx, 10)] - _q.f[Q<T>::IndexF(idx, 13)])/(4.0*(1.0 + 3.0*_uy[idx])) -_ux[idx]*(_q.f[Q<T>::IndexF(idx, 7)] - _q.f[Q<T>::IndexF(idx, 8)] + _q.f[Q<T>::IndexF(idx, 10)] - _q.f[Q<T>::IndexF(idx, 13)])/(4.0*(1.0 + 3.0*_uy[idx])); _q.f[Q<T>::IndexF(idx, 5)] = rho0; _q.f[Q<T>::IndexF(idx, 9)] = rho0; _q.f[Q<T>::IndexF(idx, 11)] = rho0; _q.f[Q<T>::IndexF(idx, 12)] = rho0; _q.f[Q<T>::IndexF(idx, 14)] = rho0; } } } } // On ymax if (_q.PEy == _q.my - 1) { for (int k = 0; k < _q.nz; ++k) { for (int i = 0; i < _q.nx; ++i) { if (_bctype(i + _q.offsetx, (_q.ny - 1) + _q.offsety, k + _q.offsetz)) { int idx = _q.Index(i, _q.ny - 1, k); T rho0 = -(8.0*_q.f[Q<T>::IndexF(idx, 5)] + _q.f[Q<T>::IndexF(idx, 9)] + _q.f[Q<T>::IndexF(idx, 11)] + _q.f[Q<T>::IndexF(idx, 12)] + _q.f[Q<T>::IndexF(idx, 14)])/12.0 -_uz[idx]*(_q.f[Q<T>::IndexF(idx, 9)] - _q.f[Q<T>::IndexF(idx, 11)] - _q.f[Q<T>::IndexF(idx, 12)] + _q.f[Q<T>::IndexF(idx, 14)])/(4.0*(1.0 - 3.0*_uy[idx])) -_ux[idx]*(_q.f[Q<T>::IndexF(idx, 9)] - _q.f[Q<T>::IndexF(idx, 11)] + _q.f[Q<T>::IndexF(idx, 12)] - _q.f[Q<T>::IndexF(idx, 14)])/(4.0*(1.0 - 3.0*_uy[idx])); _q.f[Q<T>::IndexF(idx, 2)] = rho0; _q.f[Q<T>::IndexF(idx, 7)] = rho0; _q.f[Q<T>::IndexF(idx, 8)] = rho0; _q.f[Q<T>::IndexF(idx, 10)] = rho0; _q.f[Q<T>::IndexF(idx, 13)] = rho0; } } } } // On zmin if (_q.PEz == 0) { for (int i = 0; i < _q.nx; ++i) { for (int j = 0; j < _q.ny; ++j) { if (_bctype(i + _q.offsetx, j + _q.offsety, 0 + _q.offsetz)) { int idx = _q.Index(i, j, 0); T rho0 = -(8.0*_q.f[Q<T>::IndexF(idx, 3)] + _q.f[Q<T>::IndexF(idx, 7)] + _q.f[Q<T>::IndexF(idx, 8)] + _q.f[Q<T>::IndexF(idx, 9)] + _q.f[Q<T>::IndexF(idx, 14)])/12.0 -_ux[idx]*(_q.f[Q<T>::IndexF(idx, 7)] - _q.f[Q<T>::IndexF(idx, 8)] + _q.f[Q<T>::IndexF(idx, 9)] - _q.f[Q<T>::IndexF(idx, 14)])/(4.0*(1.0 + 3.0*_uz[idx])) -_uy[idx]*(_q.f[Q<T>::IndexF(idx, 7)] + _q.f[Q<T>::IndexF(idx, 8)] - _q.f[Q<T>::IndexF(idx, 9)] - _q.f[Q<T>::IndexF(idx, 14)])/(4.0*(1.0 + 3.0*_uz[idx])); _q.f[Q<T>::IndexF(idx, 6)] = rho0; _q.f[Q<T>::IndexF(idx, 10)] = rho0; _q.f[Q<T>::IndexF(idx, 11)] = rho0; _q.f[Q<T>::IndexF(idx, 12)] = rho0; _q.f[Q<T>::IndexF(idx, 13)] = rho0; } } } } // On zmax if (_q.PEz == _q.mz - 1) { for (int i = 0; i < _q.nx; ++i) { for (int j = 0; j < _q.ny; ++j) { if (_bctype(i + _q.offsetx, j + _q.offsety, (_q.nz - 1) + _q.offsetz)) { int idx = _q.Index(i, j, _q.nz - 1); T rho0 = -(8.0*_q.f[Q<T>::IndexF(idx, 6)] + _q.f[Q<T>::IndexF(idx, 10)] + _q.f[Q<T>::IndexF(idx, 11)] + _q.f[Q<T>::IndexF(idx, 12)] + _q.f[Q<T>::IndexF(idx, 13)])/12.0 -_ux[idx]*(_q.f[Q<T>::IndexF(idx, 10)] - _q.f[Q<T>::IndexF(idx, 11)] + _q.f[Q<T>::IndexF(idx, 12)] - _q.f[Q<T>::IndexF(idx, 13)])/(4.0*(1.0 - 3.0*_uz[idx])) -_uy[idx]*(_q.f[Q<T>::IndexF(idx, 10)] - _q.f[Q<T>::IndexF(idx, 11)] - _q.f[Q<T>::IndexF(idx, 12)] + _q.f[Q<T>::IndexF(idx, 13)])/(4.0*(1.0 - 3.0*_uz[idx])); _q.f[Q<T>::IndexF(idx, 3)] = rho0; _q.f[Q<T>::IndexF(idx, 7)] = rho0; _q.f[Q<T>::IndexF(idx, 8)] = rho0; _q.f[Q<T>::IndexF(idx, 9)] = rho0; _q.f[Q<T>::IndexF(idx, 14)] = rho0; } } } } } // Function of setting boundary condition set iQ of AAD for D2Q9 template<class T, template<class>class Q, class Ff> void iBoundaryConditionSetQ(Q<T>& _q, const T *_ux, const T *_uy, Ff _bctype, T _eps = T()) { // On xmin if (_q.PEx == 0) { for (int j = 0; j < _q.ny; ++j) { if (_bctype(0 + _q.offsetx, j + _q.offsety)) { int idx = _q.Index(0, j); T rho0 = ( (1.0 + 3.0*_ux[idx])*(4.0*_q.f[Q<T>::IndexF(idx, 1)] + _q.f[Q<T>::IndexF(idx, 5)] + _q.f[Q<T>::IndexF(idx, 8)]) + 3.0*_uy[idx]*(_q.f[Q<T>::IndexF(idx, 5)] - _q.f[Q<T>::IndexF(idx, 8)]) - 12.0*_eps )/(6.0*(1.0 - 3.0*_ux[idx])); _q.f[Q<T>::IndexF(idx, 3)] = rho0; _q.f[Q<T>::IndexF(idx, 6)] = rho0; _q.f[Q<T>::IndexF(idx, 7)] = rho0; } } } // On xmax if (_q.PEx == _q.mx - 1) { for (int j = 0; j < _q.ny; ++j) { if (_bctype((_q.nx - 1) + _q.offsetx, j + _q.offsety)) { int idx = _q.Index(_q.nx - 1, j); T rho0 = ( (1.0 - 3.0*_ux[idx])*(4.0*_q.f[Q<T>::IndexF(idx, 3)] + _q.f[Q<T>::IndexF(idx, 6)] + _q.f[Q<T>::IndexF(idx, 7)]) + 3.0*_uy[idx]*(_q.f[Q<T>::IndexF(idx, 6)] - _q.f[Q<T>::IndexF(idx, 7)]) - 12.0*_eps )/(6.0*(1.0 + 3.0*_ux[idx])); _q.f[Q<T>::IndexF(idx, 1)] = rho0; _q.f[Q<T>::IndexF(idx, 5)] = rho0; _q.f[Q<T>::IndexF(idx, 8)] = rho0; } } } // On ymin if (_q.PEy == 0) { for (int i = 0; i < _q.nx; ++i) { if (_bctype(i + _q.offsetx, 0 + _q.offsety)) { int idx = _q.Index(i, 0); T rho0 = ( (1.0 + 3.0*_uy[idx])*(4.0*_q.f[Q<T>::IndexF(idx, 2)] + _q.f[Q<T>::IndexF(idx, 5)] + _q.f[Q<T>::IndexF(idx, 6)]) + 3.0*_ux[idx]*(_q.f[Q<T>::IndexF(idx, 5)] - _q.f[Q<T>::IndexF(idx, 6)]) - 12.0*_eps )/(6.0*(1.0 - 3.0*_uy[idx])); _q.f[Q<T>::IndexF(idx, 4)] = rho0; _q.f[Q<T>::IndexF(idx, 7)] = rho0; _q.f[Q<T>::IndexF(idx, 8)] = rho0; } } } // On ymax if (_q.PEy == _q.my - 1) { for (int i = 0; i < _q.nx; ++i) { if (_bctype(i + _q.offsetx, (_q.ny - 1) + _q.offsety)) { int idx = _q.Index(i, _q.ny - 1); T rho0 = ( (1.0 - 3.0*_uy[idx])*(4.0*_q.f[Q<T>::IndexF(idx, 4)] + _q.f[Q<T>::IndexF(idx, 7)] + _q.f[Q<T>::IndexF(idx, 8)]) + 3.0*_ux[idx]*(_q.f[Q<T>::IndexF(idx, 8)] - _q.f[Q<T>::IndexF(idx, 7)]) - 12.0*_eps )/(6.0*(1.0 + 3.0*_uy[idx])); _q.f[Q<T>::IndexF(idx, 2)] = rho0; _q.f[Q<T>::IndexF(idx, 5)] = rho0; _q.f[Q<T>::IndexF(idx, 6)] = rho0; } } } } // Function of setting boundary condition set iQ of AAD for D3Q15 template<class T, template<class>class Q, class Ff> void iBoundaryConditionSetQ(Q<T>& _q, const T *_ux, const T *_uy, const T *_uz, Ff _bctype, T _eps = T()) { // On xmin if (_q.PEx == 0) { for (int j = 0; j < _q.ny; ++j) { for (int k = 0; k < _q.nz; ++k) { if (_bctype(0 + _q.offsetx, j + _q.offsety, k + _q.offsetz)) { int idx = _q.Index(0, j, k); T rho0 = ( (1.0 + 3.0*_ux[idx])*(8.0*_q.f[Q<T>::IndexF(idx, 1)] + _q.f[Q<T>::IndexF(idx, 7)] + _q.f[Q<T>::IndexF(idx, 9)] + _q.f[Q<T>::IndexF(idx, 10)] + _q.f[Q<T>::IndexF(idx, 12)]) + 3.0*_uy[idx]*(_q.f[Q<T>::IndexF(idx, 7)] - _q.f[Q<T>::IndexF(idx, 9)] + _q.f[Q<T>::IndexF(idx, 10)] - _q.f[Q<T>::IndexF(idx, 12)]) + 3.0*_uz[idx]*(_q.f[Q<T>::IndexF(idx, 7)] + _q.f[Q<T>::IndexF(idx, 9)] - _q.f[Q<T>::IndexF(idx, 10)] - _q.f[Q<T>::IndexF(idx, 12)]) - 24.0*_eps )/(12.0*(1.0 - 3.0*_ux[idx])); _q.f[Q<T>::IndexF(idx, 4)] = rho0; _q.f[Q<T>::IndexF(idx, 8)] = rho0; _q.f[Q<T>::IndexF(idx, 11)] = rho0; _q.f[Q<T>::IndexF(idx, 13)] = rho0; _q.f[Q<T>::IndexF(idx, 14)] = rho0; } } } } // On xmax if (_q.PEx == _q.mx - 1) { for (int j = 0; j < _q.ny; ++j) { for (int k = 0; k < _q.nz; ++k) { if (_bctype((_q.nx - 1) + _q.offsetx, j + _q.offsety, k + _q.offsetz)) { int idx = _q.Index(_q.nx - 1, j, k); T rho0 = ( (1.0 - 3.0*_ux[idx])*(8.0*_q.f[Q<T>::IndexF(idx, 4)] + _q.f[Q<T>::IndexF(idx, 8)] + _q.f[Q<T>::IndexF(idx, 11)] + _q.f[Q<T>::IndexF(idx, 13)] + _q.f[Q<T>::IndexF(idx, 14)]) + 3.0*_uy[idx]*(_q.f[Q<T>::IndexF(idx, 8)] - _q.f[Q<T>::IndexF(idx, 11)] + _q.f[Q<T>::IndexF(idx, 13)] - _q.f[Q<T>::IndexF(idx, 14)]) + 3.0*_uz[idx]*(_q.f[Q<T>::IndexF(idx, 8)] - _q.f[Q<T>::IndexF(idx, 11)] - _q.f[Q<T>::IndexF(idx, 13)] + _q.f[Q<T>::IndexF(idx, 14)]) - 24.0*_eps )/(12.0*(1.0 + 3.0*_ux[idx])); _q.f[Q<T>::IndexF(idx, 1)] = rho0; _q.f[Q<T>::IndexF(idx, 7)] = rho0; _q.f[Q<T>::IndexF(idx, 9)] = rho0; _q.f[Q<T>::IndexF(idx, 10)] = rho0; _q.f[Q<T>::IndexF(idx, 12)] = rho0; } } } } // On ymin if (_q.PEy == 0) { for (int k = 0; k < _q.nz; ++k) { for (int i = 0; i < _q.nx; ++i) { if (_bctype(i + _q.offsetx, 0 + _q.offsety, k + _q.offsetz)) { int idx = _q.Index(i, 0, k); T rho0 = ( (1.0 + 3.0*_uy[idx])*(8.0*_q.f[Q<T>::IndexF(idx, 2)] + _q.f[Q<T>::IndexF(idx, 7)] + _q.f[Q<T>::IndexF(idx, 8)] + _q.f[Q<T>::IndexF(idx, 10)] + _q.f[Q<T>::IndexF(idx, 13)]) + 3.0*_uz[idx]*(_q.f[Q<T>::IndexF(idx, 7)] + _q.f[Q<T>::IndexF(idx, 8)] - _q.f[Q<T>::IndexF(idx, 10)] - _q.f[Q<T>::IndexF(idx, 13)]) + 3.0*_ux[idx]*(_q.f[Q<T>::IndexF(idx, 7)] - _q.f[Q<T>::IndexF(idx, 8)] + _q.f[Q<T>::IndexF(idx, 10)] - _q.f[Q<T>::IndexF(idx, 13)]) - 24.0*_eps )/(12.0*(1.0 - 3.0*_uy[idx])); _q.f[Q<T>::IndexF(idx, 5)] = rho0; _q.f[Q<T>::IndexF(idx, 9)] = rho0; _q.f[Q<T>::IndexF(idx, 11)] = rho0; _q.f[Q<T>::IndexF(idx, 12)] = rho0; _q.f[Q<T>::IndexF(idx, 14)] = rho0; } } } } // On ymax if (_q.PEy == _q.my - 1) { for (int k = 0; k < _q.nz; ++k) { for (int i = 0; i < _q.nx; ++i) { if (_bctype(i + _q.offsetx, (_q.ny - 1) + _q.offsety, k + _q.offsetz)) { int idx = _q.Index(i, _q.ny - 1, k); T rho0 = ( (1.0 - 3.0*_uy[idx])*(8.0*_q.f[Q<T>::IndexF(idx, 5)] + _q.f[Q<T>::IndexF(idx, 9)] + _q.f[Q<T>::IndexF(idx, 11)] + _q.f[Q<T>::IndexF(idx, 12)] + _q.f[Q<T>::IndexF(idx, 14)]) + _uz[idx]*(_q.f[Q<T>::IndexF(idx, 9)] - _q.f[Q<T>::IndexF(idx, 11)] - _q.f[Q<T>::IndexF(idx, 12)] + _q.f[Q<T>::IndexF(idx, 14)]) + _ux[idx]*(_q.f[Q<T>::IndexF(idx, 9)] - _q.f[Q<T>::IndexF(idx, 11)] + _q.f[Q<T>::IndexF(idx, 12)] - _q.f[Q<T>::IndexF(idx, 14)]) - 24.0*_eps )/(12.0*(1.0 + 3.0*_uy[idx])); _q.f[Q<T>::IndexF(idx, 2)] = rho0; _q.f[Q<T>::IndexF(idx, 7)] = rho0; _q.f[Q<T>::IndexF(idx, 8)] = rho0; _q.f[Q<T>::IndexF(idx, 10)] = rho0; _q.f[Q<T>::IndexF(idx, 13)] = rho0; } } } } // On zmin if (_q.PEz == 0) { for (int i = 0; i < _q.nx; ++i) { for (int j = 0; j < _q.ny; ++j) { if (_bctype(i + _q.offsetx, j + _q.offsety, 0 + _q.offsetz)) { int idx = _q.Index(i, j, 0); T rho0 = ( (1.0 + 3.0*_uz[idx])*(8.0*_q.f[Q<T>::IndexF(idx, 3)] + _q.f[Q<T>::IndexF(idx, 7)] + _q.f[Q<T>::IndexF(idx, 8)] + _q.f[Q<T>::IndexF(idx, 9)] + _q.f[Q<T>::IndexF(idx, 14)]) + _ux[idx]*(_q.f[Q<T>::IndexF(idx, 7)] - _q.f[Q<T>::IndexF(idx, 8)] + _q.f[Q<T>::IndexF(idx, 9)] - _q.f[Q<T>::IndexF(idx, 14)]) + _uy[idx]*(_q.f[Q<T>::IndexF(idx, 7)] + _q.f[Q<T>::IndexF(idx, 8)] - _q.f[Q<T>::IndexF(idx, 9)] - _q.f[Q<T>::IndexF(idx, 14)]) - 24.0*_eps )/(12.0*(1.0 - 3.0*_uz[idx])); _q.f[Q<T>::IndexF(idx, 6)] = rho0; _q.f[Q<T>::IndexF(idx, 10)] = rho0; _q.f[Q<T>::IndexF(idx, 11)] = rho0; _q.f[Q<T>::IndexF(idx, 12)] = rho0; _q.f[Q<T>::IndexF(idx, 13)] = rho0; } } } } // On zmax if (_q.PEz == _q.mz - 1) { for (int i = 0; i < _q.nx; ++i) { for (int j = 0; j < _q.ny; ++j) { if (_bctype(i + _q.offsetx, j + _q.offsety, (_q.nz - 1) + _q.offsetz)) { int idx = _q.Index(i, j, _q.nz - 1); T rho0 = ( (1.0 - 3.0*_uz[idx])*(8.0*_q.f[Q<T>::IndexF(idx, 6)] + _q.f[Q<T>::IndexF(idx, 10)] + _q.f[Q<T>::IndexF(idx, 11)] + _q.f[Q<T>::IndexF(idx, 12)] + _q.f[Q<T>::IndexF(idx, 13)]) + _ux[idx]*(_q.f[Q<T>::IndexF(idx, 10)] - _q.f[Q<T>::IndexF(idx, 11)] + _q.f[Q<T>::IndexF(idx, 12)] - _q.f[Q<T>::IndexF(idx, 13)]) + _uy[idx]*(_q.f[Q<T>::IndexF(idx, 10)] - _q.f[Q<T>::IndexF(idx, 11)] - _q.f[Q<T>::IndexF(idx, 12)] + _q.f[Q<T>::IndexF(idx, 13)]) - 24.0*_eps )/(12.0*(1.0 + 3.0*_uz[idx])); _q.f[Q<T>::IndexF(idx, 3)] = rho0; _q.f[Q<T>::IndexF(idx, 7)] = rho0; _q.f[Q<T>::IndexF(idx, 8)] = rho0; _q.f[Q<T>::IndexF(idx, 9)] = rho0; _q.f[Q<T>::IndexF(idx, 14)] = rho0; } } } } } // Function of setting boundary condition set iRho and iT or iQ of AAD for D2Q9 template<class T, template<class>class P, template<class>class Q, class Ff> void iBoundaryConditionSetRho(P<T>& _p, Q<T>& _q, const T *_rho, const T *_ux, const T *_uy, const T *_tem, Ff _bctype, T _eps = T()) { // On xmin if (_p.PEx == 0) { for (int j = 0; j < _q.ny; ++j) { if (_bctype(0 + _p.offsetx, j + _p.offsety)) { int idx = _q.Index(0, j); T rho0 = -(4.0*_p.f[P<T>::IndexF(idx, 1)] + _p.f[P<T>::IndexF(idx, 5)] + _p.f[P<T>::IndexF(idx, 8)])/3.0; T flux0 = T(); if (_bctype(0 + _p.offsetx, j + _p.offsety) == SetT) { flux0 = _tem[idx]*_uy[idx]*(_q.f[Q<T>::IndexF(idx, 5)] - _q.f[Q<T>::IndexF(idx, 8)])/(2.0*(1.0 + 3.0*_ux[idx])*_rho[idx]); } else if (_bctype(0 + _p.offsetx, j + _p.offsety) == SetQ) { flux0 = -_tem[idx]*( (4.0*_q.f[Q<T>::IndexF(idx, 1)] + _q.f[Q<T>::IndexF(idx, 5)] + _q.f[Q<T>::IndexF(idx, 8)])/3.0 + _uy[idx]*(_q.f[Q<T>::IndexF(idx, 5)] - _q.f[Q<T>::IndexF(idx, 8)])/2.0 )/((1.0 - 3.0*_ux[idx])*_rho[idx]); } T obj0 = _eps*2.0*_tem[idx]/((1.0 - 3.0*_ux[idx])*_rho[idx]); _p.f[P<T>::IndexF(idx, 3)] = _p.f[P<T>::IndexF(idx, 1)] + rho0 + flux0 + obj0; _p.f[P<T>::IndexF(idx, 6)] = _p.f[P<T>::IndexF(idx, 8)] + rho0 + flux0 + obj0; _p.f[P<T>::IndexF(idx, 7)] = _p.f[P<T>::IndexF(idx, 5)] + rho0 + flux0 + obj0; } } } // On xmax if (_p.PEx == _p.mx - 1) { for (int j = 0; j < _q.ny; ++j) { if (_bctype((_p.nx - 1) + _p.offsetx, j + _p.offsety)) { int idx = _q.Index(_q.nx - 1, j); T rho0 = -(4.0*_p.f[P<T>::IndexF(idx, 3)] + _p.f[P<T>::IndexF(idx, 6)] + _p.f[P<T>::IndexF(idx, 7)])/3.0; T flux0 = T(); if (_bctype((_p.nx - 1) + _p.offsetx, j + _p.offsety) == SetT) { flux0 = _tem[idx]*_uy[idx]*(_q.f[Q<T>::IndexF(idx, 6)] - _q.f[Q<T>::IndexF(idx, 7)])/(2.0*(1.0 - 3.0*_ux[idx])*_rho[idx]); } else if (_bctype((_p.nx - 1) + _p.offsetx, j + _p.offsety) == SetQ) { flux0 = -_tem[idx]*( (4.0*_q.f[Q<T>::IndexF(idx, 3)] + _q.f[Q<T>::IndexF(idx, 6)] + _q.f[Q<T>::IndexF(idx, 7)])/3.0 + _uy[idx]*(_q.f[Q<T>::IndexF(idx, 6)] - _q.f[Q<T>::IndexF(idx, 7)])/2.0 )/((1.0 + 3.0*_ux[idx])*_rho[idx]); } T obj0 = _eps*2.0*_tem[idx]/((1.0 + 3.0*_ux[idx])*_rho[idx]); _p.f[P<T>::IndexF(idx, 1)] = _p.f[P<T>::IndexF(idx, 3)] + rho0 + flux0 + obj0; _p.f[P<T>::IndexF(idx, 5)] = _p.f[P<T>::IndexF(idx, 7)] + rho0 + flux0 + obj0; _p.f[P<T>::IndexF(idx, 8)] = _p.f[P<T>::IndexF(idx, 6)] + rho0 + flux0 + obj0; } } } // On ymin if (_p.PEy == 0) { for (int i = 0; i < _q.nx; ++i) { if (_bctype(i + _p.offsetx, 0 + _p.offsety)) { int idx = _q.Index(i, 0); T rho0 = -(4.0*_p.f[P<T>::IndexF(idx, 2)] + _p.f[P<T>::IndexF(idx, 5)] + _p.f[P<T>::IndexF(idx, 6)])/3.0; T flux0 = T(); if (_bctype(i + _p.offsetx, 0 + _p.offsety) == SetT) { flux0 = _tem[idx]*_ux[idx]*(_q.f[Q<T>::IndexF(idx, 5)] - _q.f[Q<T>::IndexF(idx, 6)])/(2.0*(1.0 + 3.0*_uy[idx])*_rho[idx]); } else if (_bctype(i + _p.offsetx, 0 + _p.offsety) == SetQ) { flux0 = -_tem[idx]*( (4.0*_q.f[Q<T>::IndexF(idx, 2)] + _q.f[Q<T>::IndexF(idx, 5)] + _q.f[Q<T>::IndexF(idx, 6)])/3.0 + _ux[idx]*(_q.f[Q<T>::IndexF(idx, 5)] - _q.f[Q<T>::IndexF(idx, 6)])/2.0 )/((1.0 - 3.0*_uy[idx])*_rho[idx]); } T obj0 = _eps*2.0*_tem[idx]/((1.0 - 3.0*_uy[idx])*_rho[idx]); _p.f[P<T>::IndexF(idx, 4)] = _p.f[P<T>::IndexF(idx, 2)] + rho0 + flux0 + obj0; _p.f[P<T>::IndexF(idx, 7)] = _p.f[P<T>::IndexF(idx, 5)] + rho0 + flux0 + obj0; _p.f[P<T>::IndexF(idx, 8)] = _p.f[P<T>::IndexF(idx, 6)] + rho0 + flux0 + obj0; } } } // On ymax if (_p.PEy == _p.my - 1) { for (int i = 0; i < _q.nx; ++i) { if (_bctype(i + _p.offsetx, (_p.ny - 1) + _p.offsety)) { int idx = _q.Index(i, _q.ny - 1); T rho0 = -(4.0*_p.f[P<T>::IndexF(idx, 4)] + _p.f[P<T>::IndexF(idx, 7)] + _p.f[P<T>::IndexF(idx, 8)])/3.0; T flux0 = T(); if (_bctype(i + _p.offsetx, (_p.ny - 1) + _p.offsety) == SetT) { flux0 = _tem[idx]*_ux[idx]*(_q.f[Q<T>::IndexF(idx, 8)] - _q.f[Q<T>::IndexF(idx, 7)])/(2.0*(1.0 - 3.0*_uy[idx])*_rho[idx]); } else if (_bctype(i + _p.offsetx, (_p.ny - 1) + _p.offsety) == SetQ) { flux0 = -_tem[idx]*( (4.0*_q.f[Q<T>::IndexF(idx, 4)] + _q.f[Q<T>::IndexF(idx, 7)] + _q.f[Q<T>::IndexF(idx, 8)])/3.0 + _ux[idx]*(_q.f[Q<T>::IndexF(idx, 8)] - _q.f[Q<T>::IndexF(idx, 7)])/2.0 )/((1.0 + 3.0*_uy[idx])*_rho[idx]); } T obj0 = _eps*2.0*_tem[idx]/((1.0 + 3.0*_uy[idx])*_rho[idx]); _p.f[P<T>::IndexF(idx, 2)] = _p.f[P<T>::IndexF(idx, 4)] + rho0 + flux0 + obj0; _p.f[P<T>::IndexF(idx, 5)] = _p.f[P<T>::IndexF(idx, 7)] + rho0 + flux0 + obj0; _p.f[P<T>::IndexF(idx, 6)] = _p.f[P<T>::IndexF(idx, 8)] + rho0 + flux0 + obj0; } } } } // Function of setting boundary condition set iRho and iT or iQ of AAD for D3Q15 template<class T, template<class>class P, template<class>class Q, class Ff> void iBoundaryConditionSetRho(P<T>& _p, Q<T>& _q, const T *_rho, const T *_ux, const T *_uy, const T *_uz, const T *_tem, Ff _bctypeg, T _eps = T()) { // On xmin if (_p.PEx == 0) { for (int j = 0; j < _p.ny; ++j) { for (int k = 0; k < _p.nz; ++k) { if (_bctype(0 + _p.offsetx, j + _p.offsety, k + _p.offsetz)) { int idx = _p.Index(0, j, k); T rho0 = -(8.0*_p.f[P<T>::IndexF(idx, 1)] + _p.f[P<T>::IndexF(idx, 7)] + _p.f[P<T>::IndexF(idx, 9)] + _p.f[P<T>::IndexF(idx, 10)] + _p.f[P<T>::IndexF(idx, 12)])/6.0; T flux0 = T(); if (_bctype(0 + _p.offsetx, j + _p.offsety, k + _p.offsetz) == SetT) { flux0 = _tem[idx]*( _uy[idx]*(_q.f[Q<T>::IndexF(idx, 7)] - _q.f[Q<T>::IndexF(idx, 9)] + _q.f[Q<T>::IndexF(idx, 10)] - _q.f[Q<T>::IndexF(idx, 12)])/4.0 + _uz[idx]*(_q.f[Q<T>::IndexF(idx, 7)] + _q.f[Q<T>::IndexF(idx, 9)] - _q.f[Q<T>::IndexF(idx, 10)] - _q.f[Q<T>::IndexF(idx, 12)])/4.0 )/(_rho[idx]*(1.0 + 3.0*_ux[idx])); } else if (_bctype(0 + _p.offsetx, j + _p.offsety, k + _p.offsetz) == SetQ) { flux0 = -_tem[idx]*( (8.0*_q.f[Q<T>::IndexF(idx, 1)] + _q.f[Q<T>::IndexF(idx, 7)] + _q.f[Q<T>::IndexF(idx, 9)] + _q.f[Q<T>::IndexF(idx, 10)] + _q.f[Q<T>::IndexF(idx, 12)])/6.0 + _uy[idx]*(_q.f[Q<T>::IndexF(idx, 7)] - _q.f[Q<T>::IndexF(idx, 9)] + _q.f[Q<T>::IndexF(idx, 10)] - _q.f[Q<T>::IndexF(idx, 12)])/4.0 + _uz[idx]*(_q.f[Q<T>::IndexF(idx, 7)] + _q.f[Q<T>::IndexF(idx, 9)] - _q.f[Q<T>::IndexF(idx, 10)] - _q.f[Q<T>::IndexF(idx, 12)])/4.0 )/(_rho[idx]*(1.0 - 3.0*_ux[idx])); } T obj0 = _eps*2.0*_tem[idx]/((1.0 - 3.0*_ux[idx])*_rho[idx]); _p.f[P<T>::IndexF(idx, 4)] = _p.f[P<T>::IndexF(idx, 1)] + rho0 + flux0 + obj0; _p.f[P<T>::IndexF(idx, 8)] = _p.f[P<T>::IndexF(idx, 12)] + rho0 + flux0 + obj0; _p.f[P<T>::IndexF(idx, 11)] = _p.f[P<T>::IndexF(idx, 7)] + rho0 + flux0 + obj0; _p.f[P<T>::IndexF(idx, 13)] = _p.f[P<T>::IndexF(idx, 9)] + rho0 + flux0 + obj0; _p.f[P<T>::IndexF(idx, 14)] = _p.f[P<T>::IndexF(idx, 10)] + rho0 + flux0 + obj0; } } } } // On xmax if (_p.PEx == _p.mx - 1) { for (int j = 0; j < _p.ny; ++j) { for (int k = 0; k < _p.nz; ++k) { if (_bctype((_p.nx - 1) + _p.offsetx, j + _p.offsety, k + _p.offsetz)) { int idx = _p.Index(_p.nx - 1, j, k); T rho0 = -(8.0*_p.f[P<T>::IndexF(idx, 4)] + _p.f[P<T>::IndexF(idx, 8)] + _p.f[P<T>::IndexF(idx, 11)] + _p.f[P<T>::IndexF(idx, 13)] + _p.f[P<T>::IndexF(idx, 14)])/6.0; T flux0 = T(); if (_bctype((_p.nx - 1) + _p.offsetx, j + _p.offsety, k + _p.offsetz) == SetT) { flux0 = _tem[idx]*( _uy[idx]*(_q.f[Q<T>::IndexF(idx, 8)] - _q.f[Q<T>::IndexF(idx, 11)] + _q.f[Q<T>::IndexF(idx, 13)] - _q.f[Q<T>::IndexF(idx, 14)])/4.0 + _uz[idx]*(_q.f[Q<T>::IndexF(idx, 8)] - _q.f[Q<T>::IndexF(idx, 11)] - _q.f[Q<T>::IndexF(idx, 13)] + _q.f[Q<T>::IndexF(idx, 14)])/4.0 )/(_rho[idx]*(1.0 - 3.0*_ux[idx])); } else if (_bctype((_p.nx - 1) + _p.offsetx, j + _p.offsety, k + _p.offsetz) == SetQ) { flux0 = -_tem[idx]*( (8.0*_q.f[Q<T>::IndexF(idx, 4)] + _q.f[Q<T>::IndexF(idx, 8)] + _q.f[Q<T>::IndexF(idx, 11)] + _q.f[Q<T>::IndexF(idx, 13)] + _q.f[Q<T>::IndexF(idx, 14)])/6.0 + _uy[idx]*(_q.f[Q<T>::IndexF(idx, 8)] - _q.f[Q<T>::IndexF(idx, 11)] + _q.f[Q<T>::IndexF(idx, 13)] - _q.f[Q<T>::IndexF(idx, 14)])/4.0 + _uz[idx]*(_q.f[Q<T>::IndexF(idx, 8)] - _q.f[Q<T>::IndexF(idx, 11)] - _q.f[Q<T>::IndexF(idx, 13)] + _q.f[Q<T>::IndexF(idx, 14)])/4.0 )/(_rho[idx]*(1.0 + 3.0*_ux[idx])); } T obj0 = _eps*2.0*_tem[idx]/((1.0 + 3.0*_ux[idx])*_rho[idx]); _p.f[P<T>::IndexF(idx, 1)] = _p.f[P<T>::IndexF(idx, 4)] + rho0 + flux0 + obj0; _p.f[P<T>::IndexF(idx, 7)] = _p.f[P<T>::IndexF(idx, 11)] + rho0 + flux0 + obj0; _p.f[P<T>::IndexF(idx, 9)] = _p.f[P<T>::IndexF(idx, 13)] + rho0 + flux0 + obj0; _p.f[P<T>::IndexF(idx, 10)] = _p.f[P<T>::IndexF(idx, 14)] + rho0 + flux0 + obj0; _p.f[P<T>::IndexF(idx, 12)] = _p.f[P<T>::IndexF(idx, 8)] + rho0 + flux0 + obj0; } } } } // On ymin if (_p.PEy == 0) { for (int k = 0; k < _p.nz; ++k) { for (int i = 0; i < _p.nx; ++i) { if (_bctype(i + _p.offsetx, 0 + _p.offsety, k + _p.offsetz)) { int idx = _p.Index(i, 0, k); T rho0 = -(8.0*_p.f[P<T>::IndexF(idx, 2)] + _p.f[P<T>::IndexF(idx, 7)] + _p.f[P<T>::IndexF(idx, 8)] + _p.f[P<T>::IndexF(idx, 10)] + _p.f[P<T>::IndexF(idx, 13)])/6.0; T flux0 = T(); if (_bctype(i + _p.offsetx, 0 + _p.offsety, k + _p.offsetz) == SetT) { flux0 = _tem[idx]*( _uz[idx]*(_q.f[Q<T>::IndexF(idx, 7)] + _q.f[Q<T>::IndexF(idx, 8)] - _q.f[Q<T>::IndexF(idx, 10)] - _q.f[Q<T>::IndexF(idx, 13)])/4.0 + _ux[idx]*(_q.f[Q<T>::IndexF(idx, 7)] - _q.f[Q<T>::IndexF(idx, 8)] + _q.f[Q<T>::IndexF(idx, 10)] - _q.f[Q<T>::IndexF(idx, 13)])/4.0 )/(_rho[idx]*(1.0 + 3.0*_uy[idx])); } else if (_bctype(i + _p.offsetx, 0 + _p.offsety, k + _p.offsetz) == SetQ) { flux0 = -_tem[idx]*( (8.0*_q.f[Q<T>::IndexF(idx, 2)] + _q.f[Q<T>::IndexF(idx, 7)] + _q.f[Q<T>::IndexF(idx, 8)] + _q.f[Q<T>::IndexF(idx, 10)] + _q.f[Q<T>::IndexF(idx, 13)])/6.0 + _uz[idx]*(_q.f[Q<T>::IndexF(idx, 7)] + _q.f[Q<T>::IndexF(idx, 8)] - _q.f[Q<T>::IndexF(idx, 10)] - _q.f[Q<T>::IndexF(idx, 13)])/4.0 + _ux[idx]*(_q.f[Q<T>::IndexF(idx, 7)] - _q.f[Q<T>::IndexF(idx, 8)] + _q.f[Q<T>::IndexF(idx, 10)] - _q.f[Q<T>::IndexF(idx, 13)])/4.0 )/(_rho[idx]*(1.0 - 3.0*_uy[idx])); } T obj0 = _eps*2.0*_tem[idx]/((1.0 - 3.0*_uy[idx])*_rho[idx]); _p.f[P<T>::IndexF(idx, 5)] = _p.f[P<T>::IndexF(idx, 2)] + rho0 + flux0 + obj0; _p.f[P<T>::IndexF(idx, 9)] = _p.f[P<T>::IndexF(idx, 13)] + rho0 + flux0 + obj0; _p.f[P<T>::IndexF(idx, 11)] = _p.f[P<T>::IndexF(idx, 7)] + rho0 + flux0 + obj0; _p.f[P<T>::IndexF(idx, 12)] = _p.f[P<T>::IndexF(idx, 8)] + rho0 + flux0 + obj0; _p.f[P<T>::IndexF(idx, 14)] = _p.f[P<T>::IndexF(idx, 10)] + rho0 + flux0 + obj0; } } } } // On ymax if (_p.PEy == _p.my - 1) { for (int k = 0; k < _p.nz; ++k) { for (int i = 0; i < _p.nx; ++i) { if (_bctype(i + _p.offsetx, (_p.ny - 1) + _p.offsety, k + _p.offsetz)) { int idx = _p.Index(i, _p.ny - 1, k); T rho0 = -(8.0*_p.f[P<T>::IndexF(idx, 5)] + _p.f[P<T>::IndexF(idx, 9)] + _p.f[P<T>::IndexF(idx, 11)] + _p.f[P<T>::IndexF(idx, 12)] + _p.f[P<T>::IndexF(idx, 14)])/6.0; T flux0 = T(); if (_bctype(i + _p.offsetx, (_p.ny - 1) + _p.offsety, k + _p.offsetz) == SetT) { flux0 = _tem[idx]*( _uz[idx]*(_q.f[Q<T>::IndexF(idx, 9)] - _q.f[Q<T>::IndexF(idx, 11)] - _q.f[Q<T>::IndexF(idx, 12)] + _q.f[Q<T>::IndexF(idx, 14)])/4.0 + _ux[idx]*(_q.f[Q<T>::IndexF(idx, 9)] - _q.f[Q<T>::IndexF(idx, 11)] + _q.f[Q<T>::IndexF(idx, 12)] - _q.f[Q<T>::IndexF(idx, 14)])/4.0 )/(_rho[idx]*(1.0 - 3.0*_uy[idx])); } else if (_bctype(i + _p.offsetx, (_p.ny - 1) + _p.offsety, k + _p.offsetz) == SetQ) { flux0 = -_tem[idx]*( (8.0*_q.f[Q<T>::IndexF(idx, 5)] + _q.f[Q<T>::IndexF(idx, 9)] + _q.f[Q<T>::IndexF(idx, 11)] + _q.f[Q<T>::IndexF(idx, 12)] + _q.f[Q<T>::IndexF(idx, 14)])/6.0 + _uz[idx]*(_q.f[Q<T>::IndexF(idx, 9)] - _q.f[Q<T>::IndexF(idx, 11)] - _q.f[Q<T>::IndexF(idx, 12)] + _q.f[Q<T>::IndexF(idx, 14)])/4.0 + _ux[idx]*(_q.f[Q<T>::IndexF(idx, 9)] - _q.f[Q<T>::IndexF(idx, 11)] + _q.f[Q<T>::IndexF(idx, 12)] - _q.f[Q<T>::IndexF(idx, 14)])/4.0 )/(_rho[idx]*(1.0 + 3.0*_uy[idx])); } T obj0 = _eps*2.0*_tem[idx]/((1.0 + 3.0*_uy[idx])*_rho[idx]); _p.f[P<T>::IndexF(idx, 2)] = _p.f[P<T>::IndexF(idx, 5)] + rho0 + flux0 + obj0; _p.f[P<T>::IndexF(idx, 7)] = _p.f[P<T>::IndexF(idx, 11)] + rho0 + flux0 + obj0; _p.f[P<T>::IndexF(idx, 8)] = _p.f[P<T>::IndexF(idx, 12)] + rho0 + flux0 + obj0; _p.f[P<T>::IndexF(idx, 10)] = _p.f[P<T>::IndexF(idx, 14)] + rho0 + flux0 + obj0; _p.f[P<T>::IndexF(idx, 13)] = _p.f[P<T>::IndexF(idx, 9)] + rho0 + flux0 + obj0; } } } } // On zmin if (_p.PEz == 0) { for (int i = 0; i < _p.nx; ++i) { for (int j = 0; j < _p.ny; ++j) { if (_bctype(i + _p.offsetx, j + _p.offsety, 0 + _p.offsetz)) { int idx = _p.Index(i, j, 0); T rho0 = -(8.0*_p.f[P<T>::IndexF(idx, 3)] + _p.f[P<T>::IndexF(idx, 7)] + _p.f[P<T>::IndexF(idx, 8)] + _p.f[P<T>::IndexF(idx, 9)] + _p.f[P<T>::IndexF(idx, 14)])/6.0; T flux0 = T(); if (_bctype(i + _p.offsetx, j + _p.offsety, 0 + _p.offsetz) == SetT) { flux0 = _tem[idx]*( _ux[idx]*(_q.f[Q<T>::IndexF(idx, 7)] - _q.f[Q<T>::IndexF(idx, 8)] + _q.f[Q<T>::IndexF(idx, 9)] - _q.f[Q<T>::IndexF(idx, 14)])/4.0 + _uy[idx]*(_q.f[Q<T>::IndexF(idx, 7)] + _q.f[Q<T>::IndexF(idx, 8)] - _q.f[Q<T>::IndexF(idx, 9)] - _q.f[Q<T>::IndexF(idx, 14)])/4.0 )/(_rho[idx]*(1.0 + 3.0*_uz[idx])); } else if (_bctype(i + _p.offsetx, j + _p.offsety, 0 + _p.offsetz) == SetQ) { flux0 = -_tem[idx]*( (8.0*_q.f[Q<T>::IndexF(idx, 3)] + _q.f[Q<T>::IndexF(idx, 7)] + _q.f[Q<T>::IndexF(idx, 8)] + _q.f[Q<T>::IndexF(idx, 9)] + _q.f[Q<T>::IndexF(idx, 14)])/6.0 + _ux[idx]*(_q.f[Q<T>::IndexF(idx, 7)] - _q.f[Q<T>::IndexF(idx, 8)] + _q.f[Q<T>::IndexF(idx, 9)] - _q.f[Q<T>::IndexF(idx, 14)])/4.0 + _uy[idx]*(_q.f[Q<T>::IndexF(idx, 7)] + _q.f[Q<T>::IndexF(idx, 8)] - _q.f[Q<T>::IndexF(idx, 9)] - _q.f[Q<T>::IndexF(idx, 14)])/4.0 )/(_rho[idx]*(1.0 - 3.0*_uz[idx])); } T obj0 = _eps*2.0*_tem[idx]/((1.0 - 3.0*_uz[idx])*_rho[idx]); _p.f[P<T>::IndexF(idx, 6)] = _p.f[P<T>::IndexF(idx, 3)] + rho0 + flux0 + obj0; _p.f[P<T>::IndexF(idx, 10)] = _p.f[P<T>::IndexF(idx, 14)] + rho0 + flux0 + obj0; _p.f[P<T>::IndexF(idx, 11)] = _p.f[P<T>::IndexF(idx, 7)] + rho0 + flux0 + obj0; _p.f[P<T>::IndexF(idx, 12)] = _p.f[P<T>::IndexF(idx, 8)] + rho0 + flux0 + obj0; _p.f[P<T>::IndexF(idx, 13)] = _p.f[P<T>::IndexF(idx, 9)] + rho0 + flux0 + obj0; } } } } // On zmax if (_p.PEz == _p.mz - 1) { for (int i = 0; i < _p.nx; ++i) { for (int j = 0; j < _p.ny; ++j) { if (_bctype(i + _p.offsetx, j + _p.offsety, (_p.nz - 1) + _p.offsetz)) { int idx = _p.Index(i, j, _p.nz - 1); T rho0 = -(8.0*_p.f[P<T>::IndexF(idx, 6)] + _p.f[P<T>::IndexF(idx, 10)] + _p.f[P<T>::IndexF(idx, 11)] + _p.f[P<T>::IndexF(idx, 12)] + _p.f[P<T>::IndexF(idx, 13)])/6.0; T flux0 = T(); if (_bctype(i + _p.offsetx, j + _p.offsety, (_p.nz - 1) + _p.offsetz) == SetT) { flux0 = _tem[idx]*( _ux[idx]*(_q.f[Q<T>::IndexF(idx, 10)] - _q.f[Q<T>::IndexF(idx, 11)] + _q.f[Q<T>::IndexF(idx, 12)] - _q.f[Q<T>::IndexF(idx, 13)])/4.0 + _uy[idx]*(_q.f[Q<T>::IndexF(idx, 10)] - _q.f[Q<T>::IndexF(idx, 11)] - _q.f[Q<T>::IndexF(idx, 12)] + _q.f[Q<T>::IndexF(idx, 13)])/4.0 )/(_rho[idx]*(1.0 - 3.0*_uz[idx])); } else if (_bctype(i + _p.offsetx, j + _p.offsety, (_p.nz - 1) + _p.offsetz) == SetQ) { flux0 = -_tem[idx]*( (8.0*_q.f[Q<T>::IndexF(idx, 6)] + _q.f[Q<T>::IndexF(idx, 10)] + _q.f[Q<T>::IndexF(idx, 11)] + _q.f[Q<T>::IndexF(idx, 12)] + _q.f[Q<T>::IndexF(idx, 13)])/6.0 + _ux[idx]*(_q.f[Q<T>::IndexF(idx, 10)] - _q.f[Q<T>::IndexF(idx, 11)] + _q.f[Q<T>::IndexF(idx, 12)] - _q.f[Q<T>::IndexF(idx, 13)])/4.0 + _uy[idx]*(_q.f[Q<T>::IndexF(idx, 10)] - _q.f[Q<T>::IndexF(idx, 11)] - _q.f[Q<T>::IndexF(idx, 12)] + _q.f[Q<T>::IndexF(idx, 13)])/4.0 )/(_rho[idx]*(1.0 + 3.0*_uz[idx])); } T obj0 = _eps*2.0*_tem[idx]/((1.0 + 3.0*_uz[idx])*_rho[idx]); _p.f[P<T>::IndexF(idx, 3)] = _p.f[P<T>::IndexF(idx, 6)] + rho0 + flux0 + obj0; _p.f[P<T>::IndexF(idx, 7)] = _p.f[P<T>::IndexF(idx, 11)] + rho0 + flux0 + obj0; _p.f[P<T>::IndexF(idx, 8)] = _p.f[P<T>::IndexF(idx, 12)] + rho0 + flux0 + obj0; _p.f[P<T>::IndexF(idx, 9)] = _p.f[P<T>::IndexF(idx, 13)] + rho0 + flux0 + obj0; _p.f[P<T>::IndexF(idx, 14)] = _p.f[P<T>::IndexF(idx, 10)] + rho0 + flux0 + obj0; } } } } } // Function of getting sensitivity of temperature at heat source for D2Q9 template<class T, template<class>class Q, class Fv, class Ff> void SensitivityTemperatureAtHeatSource( const T *_ux, const T *_uy, const T *_imx, const T *_imy, Q<T>& _q, const T *_tem, const T *_item, const T *_iqx, const T *_iqy, const T *_g, const T *_ig, T *_dfds, const T *_diffusivity, const T *_dads, const T *_dkds, Fv _qnbc, Ff _bctype ) { // Brinkman term and diffusivity term for (int idx = 0; idx < _q.nxyz; ++idx) { _dfds[idx] += 3.0*_dads[idx]*(_ux[idx]*_imx[idx] + _uy[idx]*_imy[idx]); int offsetf = Q<T>::nc*idx; T sumg = T(); for (int c = 0; c < Q<T>::nc; ++c) { sumg += _g[offsetf + c]*_ig[offsetf + c]; } _dfds[idx] += -3.0/pow(3.0*_diffusivity[idx] + 0.5, 2.0)*_dkds[idx]*(sumg - _tem[idx]*(_item[idx] + 3.0*(_ux[idx]*_iqx[idx] + _uy[idx]*_iqy[idx]))); } // Boundary term along xmin if (_q.PEx == 0) { for (int j = 0; j < _q.ny; ++j) { if (_bctype(0 + _q.offsetx, j + _q.offsety)) { int idx = _q.Index(0, j), offsetf = Q<T>::nc*idx; _dfds[idx] += _qnbc(0 + _q.offsetx, j + _q.offsety)*_dkds[idx]*( (1.0 + 3.0*_ux[idx])*(-6.0 + 4.0*_ig[offsetf + 1] + _ig[offsetf + 5] + _ig[offsetf + 8]) + 3.0*_uy[idx]*(_ig[offsetf + 5] - _ig[offsetf + 8]) )/(36.0*(1.0 - 3.0*_ux[idx])*pow(_diffusivity[idx], 2.0)); } } } // Boundary term along xmax if (_q.PEx == _q.mx - 1) { for (int j = 0; j < _q.ny; ++j) { if (_bctype((_q.nx - 1) + _q.offsetx, j + _q.offsety)) { int idx = _q.Index(_q.nx - 1, j), offsetf = Q<T>::nc*idx; _dfds[idx] += _qnbc((_q.nx - 1) + _q.offsetx, j + _q.offsety)*_dkds[idx]*( (1.0 - 3.0*_ux[idx])*(-6.0 + 4.0*_ig[offsetf + 3] + _ig[offsetf + 6] + _ig[offsetf + 7]) + 3.0*_uy[idx]*(_ig[offsetf + 6] - _ig[offsetf + 7]) )/(36.0*(1.0 + 3.0*_ux[idx])*pow(_diffusivity[idx], 2.0)); } } } // Boundary term along ymin if (_q.PEy == 0) { for (int i = 0; i < _q.nx; ++i) { if (_bctype(i + _q.offsetx, 0 + _q.offsety)) { int idx = _q.Index(i, 0), offsetf = Q<T>::nc*idx; _dfds[idx] += _qnbc(i + _q.offsetx, 0 + _q.offsety)*_dkds[idx]*( (1.0 + 3.0*_uy[idx])*(-6.0 + 4.0*_ig[offsetf + 2] + _ig[offsetf + 5] + _ig[offsetf + 6]) + 3.0*_ux[idx]*(_ig[offsetf + 5] - _ig[offsetf + 6]) )/(36.0*(1.0 - 3.0*_uy[idx])*pow(_diffusivity[idx], 2.0)); } } } // Boundary term along ymax if (_q.PEy == _q.my - 1) { for (int i = 0; i < _q.nx; ++i) { if (_bctype(i + _q.offsetx, (_q.ny - 1) + _q.offsety)) { int idx = _q.Index(i, _q.ny - 1), offsetf = Q<T>::nc*idx; _dfds[idx] += _qnbc(i + _q.offsetx, (_q.ny - 1) + _q.offsety)*_dkds[idx]*( (1.0 - 3.0*_uy[idx])*(-6.0 + 4.0*_ig[offsetf + 4] + _ig[offsetf + 7] + _ig[offsetf + 8]) + 3.0*_ux[idx]*(_ig[offsetf + 8] - _ig[offsetf + 7]) )/(36.0*(1.0 + 3.0*_uy[idx])*pow(_diffusivity[idx], 2.0)); } } } } // Function of getting sensitivity of temperature at heat source for D3Q15 template<class T, template<class>class Q, class Fv, class Ff> void SensitivityTemperatureAtHeatSource( const T *_ux, const T *_uy, const T *_uz, const T *_imx, const T *_imy, const T *_imz, Q<T>& _q, const T *_tem, const T *_item, const T *_iqx, const T *_iqy, const T *_iqz, const T *_g0, const T *_g, const T *_ig0, const T *_ig, T *_dfds, const T *_diffusivity, const T *_dads, const T *_dkds, Fv _qnbc, Ff _bctype ) { // Brinkman term and diffusivity term for (int idx = 0; idx < _q.nxyz; ++idx) { _dfds[idx] += 3.0*_dads[idx]*(_ux[idx]*_imx[idx] + _uy[idx]*_imy[idx] + _uz[idx]*_imz[idx]); int offsetf = Q<T>::nc*idx; T sumg = T(); for (int c = 0; c < Q<T>::nc; ++c) { sumg += _g[offsetf + c]*_ig[offsetf + c]; } _dfds[idx] += -3.0/pow(3.0*_diffusivity[idx] + 0.5, 2.0)*_dkds[idx]*(sumg - _tem[idx]*(_item[idx] + 3.0*(_ux[idx]*_iqx[idx] + _uy[idx]*_iqy[idx] + _uz[idx]*_iqz[idx]))); } // Boundary term along xmin if (_q.PEx == 0) { for (int j = 0; j < _q.ny; ++j) { for (int k = 0; k < _q.nz; ++k) { if (_bctype(0 + _q.offsetx, j + _q.offsety, k + _q.offsetz)) { int idx = _q.Index(0, j, k), offsetf = Q<T>::nc*idx; _dfds[idx] += _qnbc(0 + _q.offsetx, j + _q.offsety, k + _q.offsetz)*_dkds[idx]*( (1.0 + 3.0*_ux[idx])*(-12.0 + 8.0*_ig[offsetf + 1] + _ig[offsetf + 7] + _ig[offsetf + 9] + _ig[offsetf + 10] + _ig[offsetf + 12]) + 3.0*_uy[idx]*(_ig[offsetf + 7] - _ig[offsetf + 9] + _ig[offsetf + 10] - _ig[offsetf + 12]) + 3.0*_uz[idx]*(_ig[offsetf + 7] + _ig[offsetf + 9] - _ig[offsetf + 10] - _ig[offsetf + 12]) )/(72.0*(1.0 - 3.0*_ux[idx])*pow(_diffusivity[idx], 2.0)); } } } } // Boundary term along xmax if (_q.PEx == _q.mx - 1) { for (int j = 0; j < _q.ny; ++j) { for (int k = 0; k < _q.nz; ++k) { if (_bctype((_q.nx - 1) + _q.offsetx, j + _q.offsety, k + _q.offsetz)) { int idx = _q.Index(_q.nx - 1, j, k), offsetf = Q<T>::nc*idx; _dfds[idx] += _qnbc((_q.nx - 1) + _q.offsetx, j + _q.offsety, k + _q.offsetz)*_dkds[idx]*( (1.0 - 3.0*_ux[idx])*(-12.0 + 8.0*_ig[offsetf + 4] + _ig[offsetf + 8] + _ig[offsetf + 11] + _ig[offsetf + 13] + _ig[offsetf + 14]) + 3.0*_uy[idx]*(_ig[offsetf + 8] - _ig[offsetf + 11] + _ig[offsetf + 13] - _ig[offsetf + 14]) + 3.0*_uz[idx]*(_ig[offsetf + 8] - _ig[offsetf + 11] - _ig[offsetf + 13] + _ig[offsetf + 14]) )/(72.0*(1.0 + 3.0*_ux[idx])*pow(_diffusivity[idx], 2.0)); } } } } // Boundary term along ymin if (_q.PEy == 0) { for (int k = 0; k < _q.nz; ++k) { for (int i = 0; i < _q.nx; ++i) { if (_bctype(i + _q.offsetx, 0 + _q.offsety, k + _q.offsetz)) { int idx = _q.Index(i, 0, k), offsetf = Q<T>::nc*idx; _dfds[idx] += _qnbc(i + _q.offsetx, 0 + _q.offsety, k + _q.offsetz)*_dkds[idx]*( (1.0 + 3.0*_uy[idx])*(-12.0 + 8.0*_ig[offsetf + 2] + _ig[offsetf + 7] + _ig[offsetf + 8] + _ig[offsetf + 10] + _ig[offsetf + 13]) + 3.0*_uz[idx]*(_ig[offsetf + 7] + _ig[offsetf + 8] - _ig[offsetf + 10] - _ig[offsetf + 13]) + 3.0*_ux[idx]*(_ig[offsetf + 7] - _ig[offsetf + 8] + _ig[offsetf + 10] - _ig[offsetf + 13]) )/(72.0*(1.0 - 3.0*_uy[idx])*pow(_diffusivity[idx], 2.0)); } } } } // Boundary term along ymax if (_q.PEy == _q.my - 1) { for (int k = 0; k < _q.nz; ++k) { for (int i = 0; i < _q.nx; ++i) { if (_bctype(i + _q.offsetx, (_q.ny - 1) + _q.offsety, k + _q.offsetz)) { int idx = _q.Index(i, _q.ny - 1, k), offsetf = Q<T>::nc*idx; _dfds[idx] += _qnbc(i + _q.offsetx, (_q.ny - 1) + _q.offsety, k + _q.offsetz)*_dkds[idx]*( (1.0 - 3.0*_uy[idx])*(-12.0 + 8.0*_ig[offsetf + 5] + _ig[offsetf + 9] + _ig[offsetf + 11] + _ig[offsetf + 12] + _ig[offsetf + 14]) + _uz[idx]*(_ig[offsetf + 9] - _ig[offsetf + 11] - _ig[offsetf + 12] + _ig[offsetf + 14]) + _ux[idx]*(_ig[offsetf + 9] - _ig[offsetf + 11] + _ig[offsetf + 12] - _ig[offsetf + 14]) )/(72.0*(1.0 + 3.0*_uy[idx])*pow(_diffusivity[idx], 2.0)); } } } } // Boundary term along zmin if (_q.PEz == 0) { for (int i = 0; i < _q.nx; ++i) { for (int j = 0; j < _q.ny; ++j) { if (_bctype(i + _q.offsetx, j + _q.offsety, 0 + _q.offsetz)) { int idx = _q.Index(i, j, 0), offsetf = Q<T>::nc*idx; _dfds[idx] += _qnbc(i + _q.offsetx, j + _q.offsety, 0 + _q.offsetz)*_dkds[idx]*( (1.0 + 3.0*_uz[idx])*(-12.0 + 8.0*_ig[offsetf + 3] + _ig[offsetf + 7] + _ig[offsetf + 8] + _ig[offsetf + 9] + _ig[offsetf + 14]) + _ux[idx]*(_ig[offsetf + 7] - _ig[offsetf + 8] + _ig[offsetf + 9] - _ig[offsetf + 14]) + _uy[idx]*(_ig[offsetf + 7] + _ig[offsetf + 8] - _ig[offsetf + 9] - _ig[offsetf + 14]) )/(72.0*(1.0 - 3.0*_uz[idx])*pow(_diffusivity[idx], 2.0)); } } } } // Boundary term along zmax if (_q.PEz == _q.mz - 1) { for (int i = 0; i < _q.nx; ++i) { for (int j = 0; j < _q.ny; ++j) { if (_bctype(i + _q.offsetx, j + _q.offsety, (_q.nz - 1) + _q.offsetz)) { int idx = _q.Index(i, j, _q.nz - 1), offsetf = Q<T>::nc*idx; _dfds[idx] += _qnbc(i + _q.offsetx, j + _q.offsety, (_q.nz - 1) + _q.offsetz)*_dkds[idx]*( (1.0 - 3.0*_uz[idx])*(-12.0 + 8.0*_ig[offsetf + 6] + _ig[offsetf + 10] + _ig[offsetf + 11] + _ig[offsetf + 12] + _ig[offsetf + 13]) + _ux[idx]*(_ig[offsetf + 10] - _ig[offsetf + 11] + _ig[offsetf + 12] - _ig[offsetf + 13]) + _uy[idx]*(_ig[offsetf + 10] - _ig[offsetf + 11] - _ig[offsetf + 12] + _ig[offsetf + 13]) )/(72.0*(1.0 + 3.0*_uz[idx])*pow(_diffusivity[idx], 2.0)); } } } } } } }
GB_binop__minus_fp64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__minus_fp64) // A.*B function (eWiseMult): GB (_AemultB_01__minus_fp64) // A.*B function (eWiseMult): GB (_AemultB_02__minus_fp64) // A.*B function (eWiseMult): GB (_AemultB_03__minus_fp64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__minus_fp64) // A*D function (colscale): GB (_AxD__minus_fp64) // D*A function (rowscale): GB (_DxB__minus_fp64) // C+=B function (dense accum): GB (_Cdense_accumB__minus_fp64) // C+=b function (dense accum): GB (_Cdense_accumb__minus_fp64) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__minus_fp64) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__minus_fp64) // C=scalar+B GB (_bind1st__minus_fp64) // C=scalar+B' GB (_bind1st_tran__minus_fp64) // C=A+scalar GB (_bind2nd__minus_fp64) // C=A'+scalar GB (_bind2nd_tran__minus_fp64) // C type: double // A type: double // B,b type: double // BinaryOp: cij = (aij - bij) #define GB_ATYPE \ double #define GB_BTYPE \ double #define GB_CTYPE \ double // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ double aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ double bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ double t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x - y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINUS || GxB_NO_FP64 || GxB_NO_MINUS_FP64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__minus_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__minus_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__minus_fp64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__minus_fp64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type double double bwork = (*((double *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__minus_fp64) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *restrict Cx = (double *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__minus_fp64) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *restrict Cx = (double *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__minus_fp64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__minus_fp64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__minus_fp64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__minus_fp64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__minus_fp64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__minus_fp64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *Cx = (double *) Cx_output ; double x = (*((double *) x_input)) ; double *Bx = (double *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; double bij = GBX (Bx, p, false) ; Cx [p] = (x - bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__minus_fp64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; double *Cx = (double *) Cx_output ; double *Ax = (double *) Ax_input ; double y = (*((double *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; double aij = GBX (Ax, p, false) ; Cx [p] = (aij - y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x - aij) ; \ } GrB_Info GB (_bind1st_tran__minus_fp64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ double #if GB_DISABLE return (GrB_NO_VALUE) ; #else double x = (*((const double *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ double } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij - y) ; \ } GrB_Info GB (_bind2nd_tran__minus_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double y = (*((const double *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
jan_example.c
#ifndef _CIVL #include <cassert> #include <cstdlib> #include <cmath> #include <iostream> #include <fstream> #include <vector> #include <cstdio> #include <string> #include <inttypes.h> #include <sys/time.h> #include <math.h> #include <omp.h> #endif #ifdef _CIVL #include <civlc.cvh> #include <stdio.h> #include <assert.h> #include <omp.h> #endif const int sz = 8; const int st = 10; #ifdef _CIVL $input float uvecin[sz*st*sz]; #else float uvecin[st*sz*sz]; #endif int OperatorSerial(float *u_vec) { #ifdef _CIVL float *u[st][sz]; for(int i3=0; i3<st; i3++) { for(int i2=0; i2<sz; i2++) { u[i3][i2] = &u_vec[i3*sz*sz+i2*sz]; } } #else float (*u)[sz][sz] = (float (*)[sz][sz]) u_vec; #endif { int t0; int t1; for (int i3 = 0; i3<st; i3+=1) { { t0 = (i3)%(2); t1 = (t0 + 1)%(2); } { for (int i1 = 1; i1<sz-1; i1++) { #pragma GCC ivdep for (int i2 = 1; i2<sz-1; i2++) { u[t1][i1][i2] = 2.5e-1F*u[t0][i1][i2 - 1] + 2.5e-1F*u[t0][i1][i2 + 1] + 2.5e-1F*u[t0][i1 - 1][i2] + 2.5e-1F*u[t0][i1 + 1][i2]; } } } } } return 0; } int OperatorParall(float *u_vec) { #ifdef _CIVL float *u[st][sz]; for(int i3=0; i3<st; i3++) { for(int i2=0; i2<sz; i2++) { u[i3][i2] = &u_vec[i3*sz*sz+i2*sz]; } } #else float (*u)[sz][sz] = (float (*)[sz][sz]) u_vec; #endif { int t0; int t1; #pragma omp parallel for (int i3 = 0; i3<st; i3+=1) { #pragma omp single { t0 = (i3)%(2); t1 = (t0 + 1)%(2); } { #pragma omp for schedule(static) for (int i1 = 1; i1<sz-1; i1++) { //#pragma omp simd aligned(u:64) for (int i2 = 1; i2<sz-1; i2++) { u[t1][i1][i2] = 2.5e-1F*u[t0][i1][i2 - 1] + 2.5e-1F*u[t0][i1][i2 + 1] + 2.5e-1F*u[t0][i1 - 1][i2] + 2.5e-1F*u[t0][i1 + 1][i2]; } } } } } return 0; } int main(int argc, char** argv) { printf("alive A\n"); float uvecoutserial[st*sz*sz]; float uvecoutparall[st*sz*sz]; printf("alive B\n"); #ifndef _CIVL for (int i = 0; i<st*sz*sz; i++) { uvecin[i] = 0.0; } #endif printf("alive C\n"); for (int i = 0; i<st*sz*sz; i++) { uvecoutserial[i] = uvecin[i]; uvecoutparall[i] = uvecin[i]; } printf("alive D\n"); OperatorSerial(&uvecoutserial[0]); OperatorParall(&uvecoutparall[0]); printf("alive E\n"); for (int i = 0; i<st*sz*sz; i++) { printf("%f %f \n",uvecoutserial[i], uvecoutparall[i]); assert(uvecoutserial[i] == uvecoutparall[i]); } printf("alive F\n"); return 0; }
volumeramprecision.h
/********************************************************************************* * * Inviwo - Interactive Visualization Workshop * * Copyright (c) 2013-2017 Inviwo Foundation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *********************************************************************************/ #ifndef IVW_VOLUMERAMPRECISION_H #define IVW_VOLUMERAMPRECISION_H #include <inviwo/core/datastructures/volume/volumeram.h> #include <inviwo/core/datastructures/volume/volumeramhistogram.h> #include <inviwo/core/util/glm.h> #include <inviwo/core/util/stdextensions.h> #include <inviwo/core/datastructures/volume/volume.h> namespace inviwo { /** * \ingroup datastructures */ template <typename T> class VolumeRAMPrecision : public VolumeRAM { public: using type = T; VolumeRAMPrecision(size3_t dimensions = size3_t(128, 128, 128)); VolumeRAMPrecision(T* data, size3_t dimensions = size3_t(128, 128, 128)); VolumeRAMPrecision(const VolumeRAMPrecision<T>& rhs); VolumeRAMPrecision<T>& operator=(const VolumeRAMPrecision<T>& that); virtual VolumeRAMPrecision<T>* clone() const override; virtual ~VolumeRAMPrecision(); T* getDataTyped(); const T* getDataTyped() const; virtual void* getData() override; virtual const void* getData() const override; virtual void* getData(size_t) override; virtual const void* getData(size_t) const override; virtual void setData(void* data, size3_t dimensions) override; virtual void removeDataOwnership() override; virtual const size3_t& getDimensions() const override; virtual void setDimensions(size3_t dimensions) override; virtual bool hasHistograms() const override; virtual HistogramContainer* getHistograms(size_t bins = 2048u, size3_t sampleRate = size3_t(1)) override; virtual const HistogramContainer* getHistograms(size_t bins = 2048u, size3_t sampleRate = size3_t(1)) const override; virtual void calculateHistograms(size_t bins, size3_t sampleRate, const bool& stop) const override; virtual double getAsDouble(const size3_t& pos) const override; virtual dvec2 getAsDVec2(const size3_t& pos) const override; virtual dvec3 getAsDVec3(const size3_t& pos) const override; virtual dvec4 getAsDVec4(const size3_t& pos) const override; virtual void setFromDouble(const size3_t& pos, double val) override; virtual void setFromDVec2(const size3_t& pos, dvec2 val) override; virtual void setFromDVec3(const size3_t& pos, dvec3 val) override; virtual void setFromDVec4(const size3_t& pos, dvec4 val) override; virtual double getAsNormalizedDouble(const size3_t& pos) const override; virtual dvec2 getAsNormalizedDVec2(const size3_t& pos) const override; virtual dvec3 getAsNormalizedDVec3(const size3_t& pos) const override; virtual dvec4 getAsNormalizedDVec4(const size3_t& pos) const override; virtual void setFromNormalizedDouble(const size3_t& pos, double val) override; virtual void setFromNormalizedDVec2(const size3_t& pos, dvec2 val) override; virtual void setFromNormalizedDVec3(const size3_t& pos, dvec3 val) override; virtual void setFromNormalizedDVec4(const size3_t& pos, dvec4 val) override; void setValuesFromVolume(const VolumeRAM* src, const size3_t& dstOffset, const size3_t& subSize, const size3_t& subOffset) override; virtual size_t getNumberOfBytes() const override; private: size3_t dimensions_; bool ownsDataPtr_; std::unique_ptr<T[]> data_; mutable HistogramContainer histCont_; }; /** * Factory for volumes. * Creates an VolumeRAM with data type specified by format. * * @param dimensions of volume to create. * @param format of volume to create. * @param dataPtr optional pointer to data to be handed into the volume. * @return nullptr if no valid format was specified. */ IVW_CORE_API std::shared_ptr<VolumeRAM> createVolumeRAM(const size3_t& dimensions, const DataFormatBase* format, void* dataPtr = nullptr); template <typename T> VolumeRAMPrecision<T>::VolumeRAMPrecision(size3_t dimensions) : VolumeRAM(DataFormat<T>::get()) , dimensions_(dimensions) , ownsDataPtr_(true) , data_(new T[dimensions_.x * dimensions_.y * dimensions_.z]()) {} template <typename T> VolumeRAMPrecision<T>::VolumeRAMPrecision(T* data, size3_t dimensions) : VolumeRAM(DataFormat<T>::get()) , dimensions_(dimensions) , ownsDataPtr_(true) , data_(data ? data : new T[dimensions_.x * dimensions_.y * dimensions_.z]()) {} template <typename T> VolumeRAMPrecision<T>::VolumeRAMPrecision(const VolumeRAMPrecision<T>& rhs) : VolumeRAM(rhs) , dimensions_(rhs.dimensions_) , ownsDataPtr_(true) , data_(new T[dimensions_.x * dimensions_.y * dimensions_.z]) { std::memcpy(data_.get(), rhs.data_.get(), dimensions_.x * dimensions_.y * dimensions_.z * sizeof(T)); } template <typename T> VolumeRAMPrecision<T>& VolumeRAMPrecision<T>::operator=(const VolumeRAMPrecision<T>& that) { if (this != &that) { VolumeRAM::operator=(that); auto dim = that.dimensions_; auto data = util::make_unique<T[]>(dim.x * dim.y * dim.z); std::memcpy(data.get(), that.data_.get(), dim.x * dim.y * dim.z * sizeof(T)); data_.swap(data); std::swap(dim, dimensions_); ownsDataPtr_ = true; } return *this; } template <typename T> VolumeRAMPrecision<T>::~VolumeRAMPrecision() { if (!ownsDataPtr_) data_.release(); } template <typename T> VolumeRAMPrecision<T>* VolumeRAMPrecision<T>::clone() const { return new VolumeRAMPrecision<T>(*this); } template <typename T> const T* inviwo::VolumeRAMPrecision<T>::getDataTyped() const { return data_.get(); } template <typename T> T* inviwo::VolumeRAMPrecision<T>::getDataTyped() { return data_.get(); } template <typename T> void* VolumeRAMPrecision<T>::getData() { return data_.get(); } template <typename T> const void* VolumeRAMPrecision<T>::getData() const { return const_cast<const T*>(data_.get()); } template <typename T> void* VolumeRAMPrecision<T>::getData(size_t pos) { return data_.get() + pos; } template <typename T> const void* VolumeRAMPrecision<T>::getData(size_t pos) const { return const_cast<const T*>(data_.get()) + pos; } template <typename T> void VolumeRAMPrecision<T>::setData(void* d, size3_t dimensions) { std::unique_ptr<T[]> data(static_cast<T*>(d)); data_.swap(data); std::swap(dimensions_, dimensions); if (!ownsDataPtr_) data.release(); ownsDataPtr_ = true; } template <typename T> void VolumeRAMPrecision<T>::removeDataOwnership() { ownsDataPtr_ = false; } template <typename T> const size3_t& VolumeRAMPrecision<T>::getDimensions() const { return dimensions_; } template <typename T> size_t VolumeRAMPrecision<T>::getNumberOfBytes() const { return dimensions_.x * dimensions_.y * dimensions_.z * sizeof(T); } template <typename T> void VolumeRAMPrecision<T>::setDimensions(size3_t dimensions) { auto data = util::make_unique<T[]>(dimensions.x * dimensions.y * dimensions.z); data_.swap(data); dimensions_ = dimensions; if (!ownsDataPtr_) data.release(); ownsDataPtr_ = true; } template <typename T> double VolumeRAMPrecision<T>::getAsDouble(const size3_t& pos) const { return util::glm_convert<double>(data_[posToIndex(pos, dimensions_)]); } template <typename T> dvec2 VolumeRAMPrecision<T>::getAsDVec2(const size3_t& pos) const { return util::glm_convert<dvec2>(data_[posToIndex(pos, dimensions_)]); } template <typename T> dvec3 VolumeRAMPrecision<T>::getAsDVec3(const size3_t& pos) const { return util::glm_convert<dvec3>(data_[posToIndex(pos, dimensions_)]); } template <typename T> dvec4 VolumeRAMPrecision<T>::getAsDVec4(const size3_t& pos) const { return util::glm_convert<dvec4>(data_[posToIndex(pos, dimensions_)]); } template <typename T> void VolumeRAMPrecision<T>::setFromDouble(const size3_t& pos, double val) { data_[posToIndex(pos, dimensions_)] = util::glm_convert<T>(val); } template <typename T> void VolumeRAMPrecision<T>::setFromDVec2(const size3_t& pos, dvec2 val) { data_[posToIndex(pos, dimensions_)] = util::glm_convert<T>(val); } template <typename T> void VolumeRAMPrecision<T>::setFromDVec3(const size3_t& pos, dvec3 val) { data_[posToIndex(pos, dimensions_)] = util::glm_convert<T>(val); } template <typename T> void VolumeRAMPrecision<T>::setFromDVec4(const size3_t& pos, dvec4 val) { data_[posToIndex(pos, dimensions_)] = util::glm_convert<T>(val); } template <typename T> double VolumeRAMPrecision<T>::getAsNormalizedDouble(const size3_t& pos) const { return util::glm_convert_normalized<double>(data_[posToIndex(pos, dimensions_)]); } template <typename T> dvec2 VolumeRAMPrecision<T>::getAsNormalizedDVec2(const size3_t& pos) const { return util::glm_convert_normalized<dvec2>(data_[posToIndex(pos, dimensions_)]); } template <typename T> dvec3 VolumeRAMPrecision<T>::getAsNormalizedDVec3(const size3_t& pos) const { return util::glm_convert_normalized<dvec3>(data_[posToIndex(pos, dimensions_)]); } template <typename T> dvec4 VolumeRAMPrecision<T>::getAsNormalizedDVec4(const size3_t& pos) const { return util::glm_convert_normalized<dvec4>(data_[posToIndex(pos, dimensions_)]); } template <typename T> void VolumeRAMPrecision<T>::setFromNormalizedDouble(const size3_t& pos, double val) { data_[posToIndex(pos, dimensions_)] = util::glm_convert_normalized<T>(val); } template <typename T> void VolumeRAMPrecision<T>::setFromNormalizedDVec2(const size3_t& pos, dvec2 val) { data_[posToIndex(pos, dimensions_)] = util::glm_convert_normalized<T>(val); } template <typename T> void VolumeRAMPrecision<T>::setFromNormalizedDVec3(const size3_t& pos, dvec3 val) { data_[posToIndex(pos, dimensions_)] = util::glm_convert_normalized<T>(val); } template <typename T> void VolumeRAMPrecision<T>::setFromNormalizedDVec4(const size3_t& pos, dvec4 val) { data_[posToIndex(pos, dimensions_)] = util::glm_convert_normalized<T>(val); } template <typename T> void VolumeRAMPrecision<T>::setValuesFromVolume(const VolumeRAM* src, const size3_t& dstOffset, const size3_t& subSize, const size3_t& subOffset) { const T* srcData = reinterpret_cast<const T*>(src->getData()); size_t initialStartPos = (dstOffset.z * (dimensions_.x * dimensions_.y)) + (dstOffset.y * dimensions_.x) + dstOffset.x; size3_t srcDims = src->getDimensions(); size_t dataSize = subSize.x * getDataFormat()->getSize(); size_t volumePos; size_t subVolumePos; ivec3 subSizeI = ivec3(subSize); #pragma omp parallel for for (int zy = 0; zy < subSizeI.z * subSizeI.y; ++zy) { int z = zy / subSizeI.y; int y = zy % subSizeI.y; volumePos = (y * dimensions_.x) + (z * dimensions_.x * dimensions_.y); subVolumePos = ((y + subOffset.y) * srcDims.x) + ((z + subOffset.z) * srcDims.x * srcDims.y) + subOffset.x; std::memcpy((data_.get() + volumePos + initialStartPos), (srcData + subVolumePos), dataSize); } } template <typename T> const HistogramContainer* VolumeRAMPrecision<T>::getHistograms(size_t bins, size3_t sampleRate) const { if (!hasHistograms()) { bool stop = false; calculateHistograms(bins, sampleRate, stop); } return &histCont_; } template <typename T> HistogramContainer* VolumeRAMPrecision<T>::getHistograms(size_t bins, size3_t sampleRate) { if (!hasHistograms()) { bool stop = false; calculateHistograms(bins, sampleRate, stop); } return &histCont_; } template <typename T> void VolumeRAMPrecision<T>::calculateHistograms(size_t bins, size3_t sampleRate, const bool& stop) const { if (const auto volume = getOwner()) { dvec2 dataRange = volume->dataMap_.dataRange; histCont_ = util::calculateVolumeHistogram(data_.get(), dimensions_, dataRange, stop, bins, sampleRate); } } template <typename T> bool VolumeRAMPrecision<T>::hasHistograms() const { return !histCont_.empty() && histCont_.isValid(); } } // namespace #endif // IVW_VOLUMERAMPRECISION_H
DRB092-threadprivatemissing2-orig-yes.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* A file-scope variable used within a function called by a parallel region. No threadprivate is used to avoid data races. This is the case for a variable referenced within a construct. Data race pairs sum0@68:7 vs. sum0@68:12 sum0@68:7 vs. sum0@68:7 */ #include <stdio.h> #include <assert.h> int sum0=0, sum1=0; //#pragma omp threadprivate(sum0) int main() { int i, sum=0; { #pragma omp parallel for private(i ) reduction(+:sum0) for (i=1;i<=1000;i++) { sum0=sum0+i; } } sum= sum+sum0; /* reference calculation */ #pragma omp parallel for private(i ) reduction(+:sum1) for (i=1;i<=1000;i++) { sum1=sum1+i; } printf("sum=%d; sum1=%d\n",sum,sum1); // assert(sum==sum1); return 0; }
system_energy.c
// for license information, see the accompanying LICENSE file /* Functions to handle the computation of the energy */ #include <stdio.h> #include <stdlib.h> #include <complex.h> #include <assert.h> #include <math.h> #include <mpi.h> #include "vars.h" #include "tdslda_func.h" double deform( double * rho , int nxyz , Lattice_arrays * latt_coords , double dxyz ); // filter the density into left and right fragments void makeFragment(double * dens, double *densf,double *theta,int n){ int i; for(i=0;i<n;i++){ densf[i]=dens[i]*theta[i%n]; } return; } // calculate the coulomb energy between two fragments double coul_frag( double * rho , double * xa , double * ya , double * za , int nxyz , double dxyz,double z0 ){ int i,j; double r; double sum=0.; #pragma omp parallel for private(i,j) reduction(sum) for(i=0;i<nxyz;i++){ if(za[i]>=z0)continue; for(j=0;j<nxyz;j++){ if(za[j]<=z0)continue; sum+=rho[i]*rho[j]/sqrt((xa[i]-xa[j])*(xa[i]-xa[j])+(ya[i]-ya[j])*(ya[i]-ya[j])+(za[i]-za[j])*(za[i]-za[j])); } } double e2 = 197.3269631 / 137.035999679 ; return( sum*e2*dxyz*dxyz ); } double system_energy( Couplings * cc_edf , Densities * dens , Densities * dens_p , Densities * dens_n , const int isospin , const int nxyz , double complex * delta , double * chi, const int ip , const int root_p , const int root_n , const MPI_Comm comm , const double hbar2m , const double dxyz , Lattice_arrays * latt_coords , FFtransf_vars * fftransf_vars , MPI_Status * status , const double time , FILE * fd ) { const double mass_p = 938.272013 ; const double mass_n = 939.565346 ; double mass=.5*(mass_p+mass_n); const double hbarc = 197.3269631 ; // EXCOUL double xpow=1./3.; double e2 = -197.3269631*pow(3./acos(-1.),xpow) / 137.035999679 ; xpow*=4.; e2*=(3./2.); ///// static double egs; static int compute_gs =0 ; double e_tot , e_pair , e_rho, e_rhotau, e_so , e_laprho , e_kin, e_cm ; double e_flow_n, e_flow_p , e_ext; int ixyz , ix , iy , iz , i; double e_pair_n , e_kin_n , e_j , tmp , e_coul , n_part ; double pair_gap, pair_gap_n; double * rho_0 , * rho_1 , * lap_rho_0 , * lap_rho_1 , * vcoul ; double xcm , ycm , zcm , xcm_p , ycm_p , zcm_p , xcm_n , ycm_n , zcm_n ; double vx, vy, vz; int tag1 = 10 , tag2 = 100 , tag3 = 1000, tag4 = 10000 , tag5 = 100000, tag6 = 100001 ; double num_part , num_n , q20, q30, q40, q22; double xa, ya, za, ra; double beta; assert( rho_0 = malloc( nxyz * sizeof( double ) ) ) ; assert( rho_1 = malloc( nxyz * sizeof( double ) ) ) ; assert( lap_rho_0 = malloc( nxyz * sizeof( double ) ) ) ; assert( lap_rho_1 = malloc( nxyz * sizeof( double ) ) ) ; if( isospin == 1 ) { assert( vcoul = malloc( nxyz * sizeof( double ) ) ) ; coul_pot3( vcoul , dens->rho , rho_0 , rho_1 , latt_coords , nxyz , fftransf_vars , dxyz ) ; } for( ixyz = 0 ; ixyz < nxyz ; ixyz++ ) { rho_0[ ixyz ] = dens_p->rho[ixyz] + dens_n->rho[ixyz] ; rho_1[ixyz] = dens_n->rho[ixyz] - dens_p->rho[ixyz] ; } center_dist( rho_0 , nxyz , latt_coords , &xcm , &ycm , &zcm ) ; laplacean( rho_0 , lap_rho_0 , nxyz , fftransf_vars , latt_coords ) ; laplacean( rho_1 , lap_rho_1 , nxyz , fftransf_vars , latt_coords ) ; e_kin = 0. ; e_rho = 0. ; e_rhotau = 0. ; e_laprho = 0. ; e_so = 0. ; e_j = 0. ; e_pair = 0. ; e_coul = 0. ; num_part = 0. ; num_n = 0. ; //e_flow = 0.; e_flow_p = 0.; e_flow_n = 0.; e_ext = 0.; pair_gap = 0. ; q20 = 0. ; q22 = 0. ; q30 = 0. ; q40 = 0. ; vx = 0.; vy = 0.; vz = 0.; for( ixyz = 0 ; ixyz < nxyz ; ixyz++ ) { num_part += dens->rho[ ixyz ] ; num_n += dens_n->rho[ ixyz ] ; xa = latt_coords->xa[ ixyz ]-xcm ; ya = latt_coords->ya[ ixyz ]-ycm ; za = latt_coords->za[ ixyz ]-zcm ; ra = sqrt(xa*xa + ya*ya + za*za); q20 += rho_0[ ixyz ] * (3*za*za - ra*ra); q30 += rho_0[ ixyz ] * za * ( 5*za*za - 3*ra*ra);; q40 += rho_0[ ixyz ] * (35.*pow(za, 4.0) - 30.*pow(za,2.0)*pow(ra,2.0) + 3.*pow(ra, 4.0)); // if( fabs( latt->xa[ i ] ) > . e_kin += *( dens->tau + ixyz ) ; if(cc_edf->Skyrme){ e_rho += ( cc_edf->c_rho_0 * pow( *( rho_0 + ixyz ) , 2. ) ) + ( cc_edf->c_rho_1 * pow( *( rho_1 + ixyz ) , 2. ) ) + cc_edf->c_gamma_0 * pow( *( rho_0 + ixyz ) , cc_edf->gamma + 2. ) + cc_edf->c_gamma_1 * pow( *( rho_0 + ixyz ) , cc_edf->gamma ) * pow( *( rho_1 + ixyz ) , 2. ); } else{ e_rho += cc_edf->c_rho_a0 * pow( *(rho_0 + ixyz), 5./3. ) + cc_edf->c_rho_b0 * pow( *(rho_0 + ixyz), 2. ) + cc_edf->c_rho_c0 * pow( *(rho_0 + ixyz), 7./3. ) + cc_edf->c_rho_a1 * pow( *(rho_1 + ixyz), 2.) / (pow( *(rho_0 + ixyz), 1./3. ) + 1e-14) + cc_edf->c_rho_b1 * pow( *(rho_1 + ixyz), 2.) + cc_edf->c_rho_c1 * pow( *(rho_1 + ixyz), 2.) * pow( *(rho_0 + ixyz), 1./3. ) + cc_edf->c_rho_a2 * pow( *(rho_1 + ixyz), 4.) / (pow( *(rho_0 + ixyz), 7./3. ) + 1e-14) + cc_edf->c_rho_b2 * pow( *(rho_1 + ixyz), 4.) / (pow( *(rho_0 + ixyz), 2. ) + 1e-14) + cc_edf->c_rho_c2 * pow( *(rho_1 + ixyz), 4.) / (pow( *(rho_0 + ixyz), 5./3. ) + 1e-14); } e_rhotau += ( cc_edf->c_tau_0 * ( *( dens_p->tau + ixyz ) + *( dens_n->tau + ixyz ) ) * *( rho_0 + ixyz ) + cc_edf->c_tau_1 * ( *( dens_n->tau + ixyz ) - *( dens_p->tau + ixyz ) ) * *( rho_1 + ixyz ) ) ; e_laprho += cc_edf->c_laprho_0 * lap_rho_0[ixyz] * rho_0[ixyz] + cc_edf->c_laprho_1 * lap_rho_1[ixyz] * rho_1[ixyz] ; e_so += ( cc_edf->c_divjj_0 * *(rho_0 + ixyz ) * ( *( dens_n->divjj + ixyz ) + *( dens_p->divjj + ixyz ) ) + cc_edf->c_divjj_1 * *( rho_1 + ixyz ) * ( *( dens_n->divjj + ixyz ) - *( dens_p->divjj + ixyz ) ) ) ; e_pair -= creal( *( delta + ixyz ) * conj( *( dens->nu + ixyz ) ) ) ; pair_gap += cabs(*( delta + ixyz)) * dens->rho[ixyz]; e_j += ( cc_edf->c_j_0 * ( pow( dens_n->jx[ ixyz ] + dens_p->jx[ ixyz ] , 2 ) + pow( dens_n->jy[ ixyz ] + dens_p->jy[ ixyz ] , 2 ) + pow( dens_n->jz[ ixyz ] + dens_p->jz[ ixyz ] , 2 ) ) + cc_edf->c_j_1 * ( pow( dens_n->jx[ ixyz ] - dens_p->jx[ ixyz ] , 2 ) + pow( dens_n->jy[ ixyz ] - dens_p->jy[ ixyz ] , 2 ) + pow( dens_n->jz[ ixyz ] - dens_p->jz[ ixyz ] , 2 ) ) + cc_edf->c_divj_0 * ( ( dens_n->sx[ ixyz ] + dens_p->sx[ ixyz ] ) * ( dens_n->cjx[ ixyz ] + dens_p->cjx[ ixyz ] ) + ( dens_n->sy[ ixyz ] + dens_p->sy[ ixyz ] ) * ( dens_n->cjy[ ixyz ] + dens_p->cjy[ ixyz ] ) + ( dens_n->sz[ ixyz ] + dens_p->sz[ ixyz ] ) * ( dens_n->cjz[ ixyz ] + dens_p->cjz[ ixyz ] ) ) + cc_edf->c_divj_1 * ( ( dens_n->sx[ ixyz ] - dens_p->sx[ ixyz ] ) * ( dens_n->cjx[ ixyz ] - dens_p->cjx[ ixyz ] ) + ( dens_n->sy[ ixyz ] - dens_p->sy[ ixyz ] ) * ( dens_n->cjy[ ixyz ] - dens_p->cjy[ ixyz ] ) + ( dens_n->sz[ ixyz ] - dens_p->sz[ ixyz ] ) * ( dens_n->cjz[ ixyz ] - dens_p->cjz[ ixyz ] ) ) ) ; /* if( rho_0[ixyz] > 1-7 ){ double j2=pow(dens_p->jx[ixyz]+dens_n->jx[ixyz],2.); j2+=pow(dens_p->jy[ixyz]+dens_n->jy[ixyz],2.); j2+=pow(dens_p->jz[ixyz]+dens_n->jz[ixyz],2.); e_flow += j2/rho_0[ixyz]; } */ if( dens_p->rho[ixyz] > 1e-7 ){ e_flow_p += (pow(dens_p->jx[ixyz], 2.)+pow(dens_p->jy[ixyz], 2.)+pow(dens_p->jz[ixyz], 2.)) / dens_p->rho[ixyz]; } if( dens_n->rho[ixyz] > 1e-7 ){ e_flow_n += (pow(dens_n->jx[ixyz], 2.)+pow(dens_n->jy[ixyz], 2.)+pow(dens_n->jz[ixyz], 2.)) / dens_n->rho[ixyz]; } if( isospin == 1 ) { e_coul += dens_p->rho[ ixyz ] * vcoul[ ixyz ] ; e_coul += (e2*pow(dens_p->rho[ixyz],xpow)) * (double) cc_edf->iexcoul; } e_ext += chi[ixyz] * rho_0[ixyz]; vx += (dens_n->jx[ixyz] + dens_p->jx[ixyz]) ; vy += (dens_n->jy[ixyz] + dens_p->jy[ixyz]) ; vz += (dens_n->jz[ixyz] + dens_p->jz[ixyz]) ; } beta = deform( rho_0 , nxyz , latt_coords , dxyz ) ; free( rho_0 ); free( rho_1 ) ; free( lap_rho_0 ) ; free( lap_rho_1 ) ; e_pair *= dxyz ; pair_gap *= dxyz; e_kin *= ( hbar2m * dxyz ) ; num_n *= dxyz ; num_part *= dxyz; pair_gap /= num_part; e_ext *= dxyz; if( ip == root_p ) { MPI_Recv( &e_pair_n , 1 , MPI_DOUBLE , root_n , tag1 , comm , status ) ; MPI_Recv( &e_kin_n , 1 , MPI_DOUBLE , root_n , tag2 , comm , status ) ; MPI_Recv( &num_n , 1 , MPI_DOUBLE , root_n , tag3 , comm , status ) ; MPI_Recv( &pair_gap_n , 1 , MPI_DOUBLE , root_n , tag4 , comm , status ) ; e_kin += e_kin_n ; printf( "pairing energy: protons = %f, neutrons = %f \n" , e_pair , e_pair_n ) ; printf( "pairing gap: protons = %f, neutrons = %f \n" , pair_gap , pair_gap_n ) ; printf( "# protons = %12.6f \n# neutrons = %12.6f \n" , num_part , num_n ) ; e_pair += e_pair_n ; double mtot = mass * (num_n + num_part) ; vx *= hbarc*dxyz/mtot; vy *= hbarc*dxyz/mtot; vz *= hbarc*dxyz/mtot; e_cm = 0.5 * mtot * ( vx*vx + vy*vy + vz*vz ); } if( ip == root_n ) { MPI_Send( &e_pair , 1 , MPI_DOUBLE , root_p , tag1 , comm ) ; MPI_Send( &e_kin , 1 , MPI_DOUBLE , root_p , tag2 , comm ) ; MPI_Send( &num_part , 1 , MPI_DOUBLE , root_p , tag3 , comm ) ; MPI_Send( &pair_gap , 1 , MPI_DOUBLE , root_p , tag4 , comm ) ; return( e_kin ) ; } //e_rho_0 *= dxyz ; //e_rho_1 *= dxyz ; e_rho *= dxyz; e_rhotau *= dxyz ; e_so *= dxyz ; e_laprho *= dxyz ; e_j *= dxyz ; //e_gamma_0 *= dxyz ; //e_gamma_1 *= dxyz ; e_flow_n *= ( hbar2m * dxyz ); e_flow_p *= ( hbar2m * dxyz ); if(isospin == 1) free( vcoul ) ; e_coul *= ( .5 * dxyz ) ; e_tot = e_kin + e_pair + e_rho + e_rhotau + e_laprho + e_so + e_coul + e_j ; center_dist( dens_p->rho , nxyz , latt_coords , &xcm_p , &ycm_p , &zcm_p ) ; center_dist( dens_n->rho , nxyz , latt_coords , &xcm_n , &ycm_n , &zcm_n ) ; if( compute_gs == 0 ){ compute_gs = 1; egs = e_tot; } // printf("C_lap_rho0=%f C_lap_rho1=%f\n",cc_edf->c_laprho_0,cc_edf->c_laprho_1); printf("e_kin=%12.6f e_rho=%14.6f e_rhotau=%12.6f e_laprho=%12.6f e_so=%12.6f e_coul=%12.6f e_j=%12.6f e_flow = %12.6f e_ext = %12.6f e_cm = %12.6f\n" , e_kin , e_rho , e_rhotau , e_laprho, e_so , e_coul , e_j , e_flow_n+e_flow_p, e_ext, e_cm) ; printf("field energy: %12.6f \n" , e_rho + e_rhotau + e_laprho + e_j ) ; printf("total energy: %12.6f \n\n" , e_tot ) ; fprintf( fd , "%12.6f %12.6f %12.6f %12.6f %12.6f %12.6f %12.6f %12.6f %12.6f %12.6f %12.6f %12.6f %12.6f %6.3f %12.6f %12.6f %12.6f %12.6f %12.6f %12.6f %12.6f %12.6f %12.6f \n" , time , e_tot , num_part , num_n , xcm , ycm , zcm , xcm_p , ycm_p , zcm_p , xcm_n , ycm_n , zcm_n , beta , e_flow_n+e_flow_p , egs , q20*dxyz, q30*dxyz, q40*dxyz , pair_gap, pair_gap_n, e_ext, e_cm ) ; return( e_tot ) ; } // calculate the distance between two fragments double get_distance(Densities * dens_p , Densities * dens_n, Fragments * frag, const int nxyz , const double dxyz , Lattice_arrays * latt , double *rcm_L, double *rcm_R, double *A_L, double *Z_L, double *A_R, double *Z_R) { int ixyz, i; double xcm_L, ycm_L, zcm_L, xcm_R, ycm_R, zcm_R; // density of fragments double *rhof; double distance; assert(rhof = (double *) malloc(nxyz*sizeof(double))); makeFragment(dens_n->rho, frag->densf_n,frag->thetaL,nxyz); makeFragment(dens_p->rho, frag->densf_p,frag->thetaL,nxyz); *Z_L = center_dist( frag->densf_p, nxyz , latt , &xcm_L, &ycm_L , &zcm_L ) ; for(i=0;i<nxyz;i++) rhof[i] = frag->densf_n[i] + frag->densf_p[i]; *A_L = center_dist( rhof, nxyz , latt , &xcm_L, &ycm_L , &zcm_L ) ; makeFragment(dens_n->rho, frag->densf_n,frag->thetaR,nxyz); makeFragment(dens_p->rho, frag->densf_p,frag->thetaR,nxyz); *Z_R = center_dist( frag->densf_p, nxyz , latt , &xcm_R, &ycm_R , &zcm_R ) ; for(i=0;i<nxyz;i++) rhof[i] = frag->densf_n[i] + frag->densf_p[i]; *A_R = center_dist( rhof, nxyz , latt , &xcm_R, &ycm_R , &zcm_R ) ; rcm_L[0] = xcm_L; rcm_L[1] = ycm_L; rcm_L[2] = zcm_L; rcm_R[0] = xcm_R; rcm_R[1] = ycm_R; rcm_R[2] = zcm_R; distance = sqrt( (xcm_R-xcm_L)*(xcm_R-xcm_L) + (ycm_R-ycm_L)*(ycm_R-ycm_L) + (zcm_R-zcm_L)*(zcm_R-zcm_L)); free(rhof); *A_L *= dxyz; *Z_L *= dxyz; *A_R *= dxyz; *Z_R *= dxyz; return(distance); }
black-scholes_mkl.c
/* * Copyright (C) 2014-2015, 2018 Intel Corporation * * SPDX-License-Identifier: MIT */ #include <omp.h> #include <mkl.h> #include "euro_opt.h" #ifdef __DO_FLOAT__ # define VDIV(n,a,b,r) vsDiv(n,a,b,r) # define VLOG(n,a,r) vsLn(n,a,r) # define VEXP(n,a,r) vsExp(n,a,r) # define VINVSQRT(n,a,r) vsInvSqrt(n,a,r) # define VERF(n,a,r) vsErf(n,a,r) # define QUARTER 0.25f # define HALF 0.5f # define TWO 2.0f #else # define VDIV(n,a,b,r) vdDiv(n,a,b,r) # define VLOG(n,a,r) vdLn(n,a,r) # define VEXP(n,a,r) vdExp(n,a,r) # define VINVSQRT(n,a,r) vdInvSqrt(n,a,r) # define VERF(n,a,r) vdErf(n,a,r) # define QUARTER 0.25 # define HALF 0.5 # define TWO 2.0 #endif #if defined _VML_ACCURACY_EP_ # define VML_ACC VML_EP #elif defined _VML_ACCURACY_LA_ # define VML_ACC VML_LA #elif defined _VML_ACCURACY_HA_ # define VML_ACC VML_HA #else # error: _VML_ACCURACY_HA_/LA/EP should be defined in makefile #endif /* Set the reusable buffer for intermediate results */ #if !defined NBUF # define NBUF 1024 #endif /* // This function computes the Black-Scholes formula. // Input parameters: // nopt - length of arrays // s0 - initial price // x - strike price // t - maturity // // Implementation assumes fixed constant parameters // r - risk-neutral rate // sig - volatility // // Output arrays for call and put prices: // vcall, vput // // Note: the restrict keyword here tells the compiler // that none of the arrays overlap in memory. // // Note: the implementation assumes nopt is a multiple of NBUF */ void BlackScholesFormula_MKL( int nopt, tfloat r, tfloat sig, tfloat * restrict s0, tfloat * restrict x, tfloat * restrict t, tfloat * restrict vcall, tfloat * restrict vput ) { int i; tfloat mr = -r; tfloat sig_sig_two = sig * sig * TWO; #pragma omp parallel for \ shared(s0, x, t, vcall, vput, mr, sig_sig_two, nopt) \ default(none) for ( i = 0; i < nopt; i+= NBUF ) { int j; tfloat *a, *b, *c, *y, *z, *e; tfloat *d1, *d2, *w1, *w2; __declspec(align(ALIGN_FACTOR)) tfloat Buffer[NBUF*4]; // This computes vector length for the last iteration of the loop // in case nopt is not exact multiple of NBUF #define MY_MIN(x, y) ((x) < (y)) ? (x) : (y) int nbuf = MY_MIN(NBUF, nopt - i); a = Buffer + NBUF*0; w1 = a; d1 = w1; c = Buffer + NBUF*1; w2 = c; d2 = w2; b = Buffer + NBUF*2; e = b; z = Buffer + NBUF*3; y = z; // Must set VML accuracy in each thread vmlSetMode( VML_ACC ); VDIV(nbuf, s0 + i, x + i, a); VLOG(nbuf, a, a); #pragma simd vectorlength(512) for ( j = 0; j < nbuf; j++ ) { b[j] = t[i + j] * mr; a[j] = a[j] - b[j]; z[j] = t[i + j] * sig_sig_two; c[j] = QUARTER * z[j]; } VINVSQRT(nbuf, z, y); VEXP(nbuf, b, e); #pragma simd vectorlength(512) for ( j = 0; j < nbuf; j++ ) { tfloat aj = a[j]; tfloat cj = c[j]; w1[j] = ( aj + cj ) * y[j]; w2[j] = ( aj - cj ) * y[j]; } VERF(nbuf, w1, d1); VERF(nbuf, w2, d2); #pragma simd vectorlength(512) for ( j = 0; j < nbuf; j++ ) { tfloat d1j = HALF + HALF*d1[j]; tfloat d2j = HALF + HALF*d2[j]; tfloat ej = e[j]; tfloat s0j = s0[i+j]; tfloat xj = x[i+j]; tfloat vcallij =s0j*d1j - xj*ej*d2j; vcall[i+j] = vcallij; vput[i+j] = vcallij - s0j + xj*ej; } #if 0 for ( j = 0; j < nbuf; j++ ) { d1[j] = HALF + HALF*d1[j]; d2[j] = HALF + HALF*d2[j]; vcall[i+j] = s0[i+j]*d1[j] - x[i+j]*e[j]*d2[j]; vput[i+j] = vcall[i+j] - s0[i+j] + x[i+j]*e[j]; } #endif } }
exact_rhs.c
//-------------------------------------------------------------------------// // // // This benchmark is an OpenMP C version of the NPB SP code. This OpenMP // // C version is developed by the Center for Manycore Programming at Seoul // // National University and derived from the OpenMP Fortran versions in // // "NPB3.3-OMP" developed by NAS. // // // // Permission to use, copy, distribute and modify this software for any // // purpose with or without fee is hereby granted. This software is // // provided "as is" without express or implied warranty. // // // // Information on NPB 3.3, including the technical report, the original // // specifications, source code, results and information on how to submit // // new results, is available at: // // // // http://www.nas.nasa.gov/Software/NPB/ // // // // Send comments or suggestions for this OpenMP C version to // // cmp@aces.snu.ac.kr // // // // Center for Manycore Programming // // School of Computer Science and Engineering // // Seoul National University // // Seoul 151-744, Korea // // // // E-mail: cmp@aces.snu.ac.kr // // // //-------------------------------------------------------------------------// //-------------------------------------------------------------------------// // Authors: Sangmin Seo, Jungwon Kim, Jun Lee, Jeongho Nah, Gangwon Jo, // // and Jaejin Lee // //-------------------------------------------------------------------------// #include "header.h" //--------------------------------------------------------------------- // compute the right hand side based on exact solution //--------------------------------------------------------------------- void exact_rhs() { double dtemp[5], xi, eta, zeta, dtpp; int m, i, j, k, ip1, im1, jp1, jm1, km1, kp1; #pragma omp parallel default(shared) \ private(i,j,k,m,zeta,eta,xi,dtpp,im1,ip1,jm1,jp1,km1,kp1,dtemp) { //--------------------------------------------------------------------- // initialize //--------------------------------------------------------------------- #pragma omp for schedule(static) for (k = 0; k <= grid_points[2]-1; k++) { for (j = 0; j <= grid_points[1]-1; j++) { for (i = 0; i <= grid_points[0]-1; i++) { for (m = 0; m < 5; m++) { forcing[k][j][i][m] = 0.0; } } } } //--------------------------------------------------------------------- // xi-direction flux differences //--------------------------------------------------------------------- #pragma omp for schedule(static) nowait for (k = 1; k <= grid_points[2]-2; k++) { zeta = (double)k * dnzm1; for (j = 1; j <= grid_points[1]-2; j++) { eta = (double)j * dnym1; for (i = 0; i <= grid_points[0]-1; i++) { xi = (double)i * dnxm1; exact_solution(xi, eta, zeta, dtemp); for (m = 0; m < 5; m++) { ue[i][m] = dtemp[m]; } dtpp = 1.0 / dtemp[0]; for (m = 1; m < 5; m++) { buf[i][m] = dtpp * dtemp[m]; } cuf[i] = buf[i][1] * buf[i][1]; buf[i][0] = cuf[i] + buf[i][2] * buf[i][2] + buf[i][3] * buf[i][3]; q[i] = 0.5*(buf[i][1]*ue[i][1] + buf[i][2]*ue[i][2] + buf[i][3]*ue[i][3]); } for (i = 1; i <= grid_points[0]-2; i++) { im1 = i-1; ip1 = i+1; forcing[k][j][i][0] = forcing[k][j][i][0] - tx2*( ue[ip1][1]-ue[im1][1] )+ dx1tx1*(ue[ip1][0]-2.0*ue[i][0]+ue[im1][0]); forcing[k][j][i][1] = forcing[k][j][i][1] - tx2 * ( (ue[ip1][1]*buf[ip1][1]+c2*(ue[ip1][4]-q[ip1]))- (ue[im1][1]*buf[im1][1]+c2*(ue[im1][4]-q[im1])))+ xxcon1*(buf[ip1][1]-2.0*buf[i][1]+buf[im1][1])+ dx2tx1*( ue[ip1][1]-2.0* ue[i][1]+ue[im1][1]); forcing[k][j][i][2] = forcing[k][j][i][2] - tx2 * ( ue[ip1][2]*buf[ip1][1]-ue[im1][2]*buf[im1][1])+ xxcon2*(buf[ip1][2]-2.0*buf[i][2]+buf[im1][2])+ dx3tx1*( ue[ip1][2]-2.0*ue[i][2] +ue[im1][2]); forcing[k][j][i][3] = forcing[k][j][i][3] - tx2*( ue[ip1][3]*buf[ip1][1]-ue[im1][3]*buf[im1][1])+ xxcon2*(buf[ip1][3]-2.0*buf[i][3]+buf[im1][3])+ dx4tx1*( ue[ip1][3]-2.0* ue[i][3]+ ue[im1][3]); forcing[k][j][i][4] = forcing[k][j][i][4] - tx2*( buf[ip1][1]*(c1*ue[ip1][4]-c2*q[ip1])- buf[im1][1]*(c1*ue[im1][4]-c2*q[im1]))+ 0.5*xxcon3*(buf[ip1][0]-2.0*buf[i][0]+buf[im1][0])+ xxcon4*(cuf[ip1]-2.0*cuf[i]+cuf[im1])+ xxcon5*(buf[ip1][4]-2.0*buf[i][4]+buf[im1][4])+ dx5tx1*( ue[ip1][4]-2.0* ue[i][4]+ ue[im1][4]); } //--------------------------------------------------------------------- // Fourth-order dissipation //--------------------------------------------------------------------- for (m = 0; m < 5; m++) { i = 1; forcing[k][j][i][m] = forcing[k][j][i][m] - dssp * (5.0*ue[i][m] - 4.0*ue[i+1][m] +ue[i+2][m]); i = 2; forcing[k][j][i][m] = forcing[k][j][i][m] - dssp * (-4.0*ue[i-1][m] + 6.0*ue[i][m] - 4.0*ue[i+1][m] + ue[i+2][m]); } for (i = 3; i <= grid_points[0]-4; i++) { for (m = 0; m < 5; m++) { forcing[k][j][i][m] = forcing[k][j][i][m] - dssp* (ue[i-2][m] - 4.0*ue[i-1][m] + 6.0*ue[i][m] - 4.0*ue[i+1][m] + ue[i+2][m]); } } for (m = 0; m < 5; m++) { i = grid_points[0]-3; forcing[k][j][i][m] = forcing[k][j][i][m] - dssp * (ue[i-2][m] - 4.0*ue[i-1][m] + 6.0*ue[i][m] - 4.0*ue[i+1][m]); i = grid_points[0]-2; forcing[k][j][i][m] = forcing[k][j][i][m] - dssp * (ue[i-2][m] - 4.0*ue[i-1][m] + 5.0*ue[i][m]); } } } //--------------------------------------------------------------------- // eta-direction flux differences //--------------------------------------------------------------------- #pragma omp for schedule(static) for (k = 1; k <= grid_points[2]-2; k++) { zeta = (double)k * dnzm1; for (i = 1; i <= grid_points[0]-2; i++) { xi = (double)i * dnxm1; for (j = 0; j <= grid_points[1]-1; j++) { eta = (double)j * dnym1; exact_solution(xi, eta, zeta, dtemp); for (m = 0; m < 5; m++) { ue[j][m] = dtemp[m]; } dtpp = 1.0/dtemp[0]; for (m = 1; m < 5; m++) { buf[j][m] = dtpp * dtemp[m]; } cuf[j] = buf[j][2] * buf[j][2]; buf[j][0] = cuf[j] + buf[j][1] * buf[j][1] + buf[j][3] * buf[j][3]; q[j] = 0.5*(buf[j][1]*ue[j][1] + buf[j][2]*ue[j][2] + buf[j][3]*ue[j][3]); } for (j = 1; j <= grid_points[1]-2; j++) { jm1 = j-1; jp1 = j+1; forcing[k][j][i][0] = forcing[k][j][i][0] - ty2*( ue[jp1][2]-ue[jm1][2] )+ dy1ty1*(ue[jp1][0]-2.0*ue[j][0]+ue[jm1][0]); forcing[k][j][i][1] = forcing[k][j][i][1] - ty2*( ue[jp1][1]*buf[jp1][2]-ue[jm1][1]*buf[jm1][2])+ yycon2*(buf[jp1][1]-2.0*buf[j][1]+buf[jm1][1])+ dy2ty1*( ue[jp1][1]-2.0* ue[j][1]+ ue[jm1][1]); forcing[k][j][i][2] = forcing[k][j][i][2] - ty2*( (ue[jp1][2]*buf[jp1][2]+c2*(ue[jp1][4]-q[jp1]))- (ue[jm1][2]*buf[jm1][2]+c2*(ue[jm1][4]-q[jm1])))+ yycon1*(buf[jp1][2]-2.0*buf[j][2]+buf[jm1][2])+ dy3ty1*( ue[jp1][2]-2.0*ue[j][2] +ue[jm1][2]); forcing[k][j][i][3] = forcing[k][j][i][3] - ty2*( ue[jp1][3]*buf[jp1][2]-ue[jm1][3]*buf[jm1][2])+ yycon2*(buf[jp1][3]-2.0*buf[j][3]+buf[jm1][3])+ dy4ty1*( ue[jp1][3]-2.0*ue[j][3]+ ue[jm1][3]); forcing[k][j][i][4] = forcing[k][j][i][4] - ty2*( buf[jp1][2]*(c1*ue[jp1][4]-c2*q[jp1])- buf[jm1][2]*(c1*ue[jm1][4]-c2*q[jm1]))+ 0.5*yycon3*(buf[jp1][0]-2.0*buf[j][0]+ buf[jm1][0])+ yycon4*(cuf[jp1]-2.0*cuf[j]+cuf[jm1])+ yycon5*(buf[jp1][4]-2.0*buf[j][4]+buf[jm1][4])+ dy5ty1*(ue[jp1][4]-2.0*ue[j][4]+ue[jm1][4]); } //--------------------------------------------------------------------- // Fourth-order dissipation //--------------------------------------------------------------------- for (m = 0; m < 5; m++) { j = 1; forcing[k][j][i][m] = forcing[k][j][i][m] - dssp * (5.0*ue[j][m] - 4.0*ue[j+1][m] +ue[j+2][m]); j = 2; forcing[k][j][i][m] = forcing[k][j][i][m] - dssp * (-4.0*ue[j-1][m] + 6.0*ue[j][m] - 4.0*ue[j+1][m] + ue[j+2][m]); } for (j = 3; j <= grid_points[1]-4; j++) { for (m = 0; m < 5; m++) { forcing[k][j][i][m] = forcing[k][j][i][m] - dssp* (ue[j-2][m] - 4.0*ue[j-1][m] + 6.0*ue[j][m] - 4.0*ue[j+1][m] + ue[j+2][m]); } } for (m = 0; m < 5; m++) { j = grid_points[1]-3; forcing[k][j][i][m] = forcing[k][j][i][m] - dssp * (ue[j-2][m] - 4.0*ue[j-1][m] + 6.0*ue[j][m] - 4.0*ue[j+1][m]); j = grid_points[1]-2; forcing[k][j][i][m] = forcing[k][j][i][m] - dssp * (ue[j-2][m] - 4.0*ue[j-1][m] + 5.0*ue[j][m]); } } } //--------------------------------------------------------------------- // zeta-direction flux differences //--------------------------------------------------------------------- #pragma omp for schedule(static) for (j = 1; j <= grid_points[1]-2; j++) { eta = (double)j * dnym1; for (i = 1; i <= grid_points[0]-2; i++) { xi = (double)i * dnxm1; for (k = 0; k <= grid_points[2]-1; k++) { zeta = (double)k * dnzm1; exact_solution(xi, eta, zeta, dtemp); for (m = 0; m < 5; m++) { ue[k][m] = dtemp[m]; } dtpp = 1.0/dtemp[0]; for (m = 1; m < 5; m++) { buf[k][m] = dtpp * dtemp[m]; } cuf[k] = buf[k][3] * buf[k][3]; buf[k][0] = cuf[k] + buf[k][1] * buf[k][1] + buf[k][2] * buf[k][2]; q[k] = 0.5*(buf[k][1]*ue[k][1] + buf[k][2]*ue[k][2] + buf[k][3]*ue[k][3]); } for (k = 1; k <= grid_points[2]-2; k++) { km1 = k-1; kp1 = k+1; forcing[k][j][i][0] = forcing[k][j][i][0] - tz2*( ue[kp1][3]-ue[km1][3] )+ dz1tz1*(ue[kp1][0]-2.0*ue[k][0]+ue[km1][0]); forcing[k][j][i][1] = forcing[k][j][i][1] - tz2 * ( ue[kp1][1]*buf[kp1][3]-ue[km1][1]*buf[km1][3])+ zzcon2*(buf[kp1][1]-2.0*buf[k][1]+buf[km1][1])+ dz2tz1*( ue[kp1][1]-2.0* ue[k][1]+ ue[km1][1]); forcing[k][j][i][2] = forcing[k][j][i][2] - tz2 * ( ue[kp1][2]*buf[kp1][3]-ue[km1][2]*buf[km1][3])+ zzcon2*(buf[kp1][2]-2.0*buf[k][2]+buf[km1][2])+ dz3tz1*(ue[kp1][2]-2.0*ue[k][2]+ue[km1][2]); forcing[k][j][i][3] = forcing[k][j][i][3] - tz2 * ( (ue[kp1][3]*buf[kp1][3]+c2*(ue[kp1][4]-q[kp1]))- (ue[km1][3]*buf[km1][3]+c2*(ue[km1][4]-q[km1])))+ zzcon1*(buf[kp1][3]-2.0*buf[k][3]+buf[km1][3])+ dz4tz1*( ue[kp1][3]-2.0*ue[k][3] +ue[km1][3]); forcing[k][j][i][4] = forcing[k][j][i][4] - tz2 * ( buf[kp1][3]*(c1*ue[kp1][4]-c2*q[kp1])- buf[km1][3]*(c1*ue[km1][4]-c2*q[km1]))+ 0.5*zzcon3*(buf[kp1][0]-2.0*buf[k][0]+buf[km1][0])+ zzcon4*(cuf[kp1]-2.0*cuf[k]+cuf[km1])+ zzcon5*(buf[kp1][4]-2.0*buf[k][4]+buf[km1][4])+ dz5tz1*( ue[kp1][4]-2.0*ue[k][4]+ ue[km1][4]); } //--------------------------------------------------------------------- // Fourth-order dissipation //--------------------------------------------------------------------- for (m = 0; m < 5; m++) { k = 1; forcing[k][j][i][m] = forcing[k][j][i][m] - dssp * (5.0*ue[k][m] - 4.0*ue[k+1][m] +ue[k+2][m]); k = 2; forcing[k][j][i][m] = forcing[k][j][i][m] - dssp * (-4.0*ue[k-1][m] + 6.0*ue[k][m] - 4.0*ue[k+1][m] + ue[k+2][m]); } for (k = 3; k <= grid_points[2]-4; k++) { for (m = 0; m < 5; m++) { forcing[k][j][i][m] = forcing[k][j][i][m] - dssp* (ue[k-2][m] - 4.0*ue[k-1][m] + 6.0*ue[k][m] - 4.0*ue[k+1][m] + ue[k+2][m]); } } for (m = 0; m < 5; m++) { k = grid_points[2]-3; forcing[k][j][i][m] = forcing[k][j][i][m] - dssp * (ue[k-2][m] - 4.0*ue[k-1][m] + 6.0*ue[k][m] - 4.0*ue[k+1][m]); k = grid_points[2]-2; forcing[k][j][i][m] = forcing[k][j][i][m] - dssp * (ue[k-2][m] - 4.0*ue[k-1][m] + 5.0*ue[k][m]); } } } //--------------------------------------------------------------------- // now change the sign of the forcing function, //--------------------------------------------------------------------- #pragma omp for schedule(static) nowait for (k = 1; k <= grid_points[2]-2; k++) { for (j = 1; j <= grid_points[1]-2; j++) { for (i = 1; i <= grid_points[0]-2; i++) { for (m = 0; m < 5; m++) { forcing[k][j][i][m] = -1.0 * forcing[k][j][i][m]; } } } } } //end parallel }
3.race3.c
// RUN: clang %loadLLOV %s -o /dev/null 2>&1 | FileCheck %s #include <omp.h> #define N 20 int main() { int A[N][N][N]; for (int i = 1; i < N; i++) for (int j = 1; j < N; j++) #pragma omp parallel for for (int k = 1; k < N; k++) A[i][j][k] = A[i][j][k - 1]; } // CHECK: Data Race detected // END
calculate_water_fraction.h
#if !defined(KRATOS_CALCULATE_WATER_FRACTION_UTILITY_INCLUDED ) #define KRATOS_CALCULATE_WATER_FRACTION_UTILITY_INCLUDED // System includes #include <string> #include <iostream> #include <algorithm> // Project includes #include "includes/define.h" #include "pfem_2_application.h" #include "utilities/math_utils.h" #include "utilities/geometry_utilities.h" #include "includes/ublas_interface.h" #include "includes/variables.h" #include "includes/model_part.h" #include "includes/node.h" #include "includes/element.h" #include "utilities/enrichment_utilities.h" namespace Kratos { template< unsigned int TDim> class CalculateWaterFraction { public: KRATOS_CLASS_POINTER_DEFINITION(CalculateWaterFraction); CalculateWaterFraction(ModelPart& model_part) : mr_model_part(model_part) //mr_model_part is saved as private variable (declared at the end of the file) { KRATOS_TRY //std::cout << "Hello, I am the constructor of the Utility" << std::endl; KRATOS_CATCH("") } ~CalculateWaterFraction() {} /* double Calculate() //water fraction { KRATOS_TRY //double area; //we create the needed variables double sum_area=0.0; double sum_water_area=0.0; //double one_third=1.0/3.0; ModelPart::NodesContainerType::iterator inodebegin = mr_model_part.NodesBegin(); #pragma omp parallel for reduction(+:sum_area) reduction(+:sum_water_area) for(unsigned int ii=0; ii<mr_model_part.Nodes().size(); ii++) { ModelPart::NodesContainerType::iterator inode = inodebegin+ii; const double & nodal_area = inode->FastGetSolutionStepValue(NODAL_AREA); //resetting the temperature sum_area += nodal_area; if ((inode->FastGetSolutionStepValue(DISTANCE))<0.0) sum_water_area += nodal_area; } const double water_fraction = sum_water_area / sum_area; //std::cout << "Finished, the mean temperature is" << water_fraction << std::endl; //we print the result return water_fraction; KRATOS_CATCH("") } */ double Calculate() { KRATOS_TRY double sum_areas=1.0e-100; //double sum_temperatures=0.0; //double nodal_weight=1.0/(1.0+double(TDim)); ModelPart::ElementsContainerType::iterator ielembegin = mr_model_part.ElementsBegin(); vector<unsigned int> element_partition; #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Elements().size(), element_partition); #pragma omp parallel for reduction(+:sum_areas) for(int kkk=0; kkk<number_of_threads; kkk++) { double thread_sum_areas=0.0; for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++) { ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii; double Area; BoundedMatrix<double, (TDim+1), TDim > DN_DX; array_1d<double, (TDim+1) > N; Geometry<Node<3> >& geom = ielem->GetGeometry(); GeometryUtils::CalculateGeometryData(geom, DN_DX, N, Area); //sum_areas+=area; int negative_nodes=0; int positive_nodes=0; for (unsigned int k = 0; k < (TDim+1); k++) { if(geom[k].FastGetSolutionStepValue(DISTANCE)<0.0) negative_nodes++; else positive_nodes++; } if (negative_nodes==(TDim+1)) thread_sum_areas+=Area; else if (negative_nodes>0) { array_1d<double,(TDim+1)> distances; for (unsigned int i = 0; i < (TDim+1); i++) { distances[i] = geom[i].FastGetSolutionStepValue(DISTANCE); } BoundedMatrix<double,3*(TDim-1), 2> Nenriched; array_1d<double,(3*(TDim-1))> volumes; BoundedMatrix<double,(TDim+1), TDim > coords; BoundedMatrix<double, 3*(TDim-1), (TDim+1) > Ngauss; array_1d<double,(3*(TDim-1))> signs; std::vector< Matrix > gauss_gradients(3*(TDim-1)); //fill coordinates //unsigned int single_triangle_node = 1; for (unsigned int i = 0; i < (TDim+1); i++) { const array_1d<double, 3 > & xyz = geom[i].Coordinates(); for (unsigned int j = 0; j < TDim; j++) coords(i,j)=xyz(j); } for (unsigned int i = 0; i < 3*(TDim-1); i++) gauss_gradients[i].resize(2, TDim, false); //2 values of the 2 shape functions, and derivates in (xy) direction). unsigned int ndivisions = EnrichmentUtilities::CalculateEnrichedShapeFuncions(coords, DN_DX, distances, volumes, Ngauss, signs, gauss_gradients, Nenriched); for (unsigned int i=0;i!=ndivisions;i++) if (signs(i)<0.0) thread_sum_areas+=volumes(i); } } sum_areas = thread_sum_areas; } //const double mean_temperature = sum_temperatures / sum_areas; std::cout << "Finished, the water volume is " << sum_areas << std::endl; return sum_areas; KRATOS_CATCH("") } double CalculateWaterHeight(double x_position) { KRATOS_TRY double all_threads_water_height=-100000000.0; const double tolerance=0.001; const double upper_limit=x_position+tolerance; const double lower_limit=x_position-tolerance; ModelPart::NodesContainerType::iterator inodebegin = mr_model_part.NodesBegin(); vector<unsigned int> node_partition; #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Nodes().size(), node_partition); #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { double local_thread_water_height=-100000000.0; for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++) { ModelPart::NodesContainerType::iterator inode = inodebegin+ii; double & distance = (inode->FastGetSolutionStepValue(DISTANCE)); if ( distance <0.0) { if ((inode->X())<upper_limit && (inode->X())>lower_limit && (inode->Y())>local_thread_water_height) local_thread_water_height = (inode->Y()); } //now we search for the node given certain criteria } if ( local_thread_water_height > all_threads_water_height ) { #pragma omp critical { if ( local_thread_water_height > all_threads_water_height ) all_threads_water_height = local_thread_water_height; } } } return all_threads_water_height; KRATOS_CATCH("") } double CalculateMaxCourant() { KRATOS_TRY ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo(); const double delta_t = CurrentProcessInfo[DELTA_TIME]; double all_threads_max_courant = 0.0; ModelPart::ElementsContainerType::iterator ielembegin = mr_model_part.ElementsBegin(); vector<unsigned int> element_partition; #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Elements().size(), element_partition); #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { double local_thread_max_courant = 0.0; for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++) { ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii; //double & distance = (inode->FastGetSolutionStepValue(DISTANCE)); if ((ielem->GetValue(VELOCITY_OVER_ELEM_SIZE))>local_thread_max_courant) local_thread_max_courant = (ielem->GetValue(VELOCITY_OVER_ELEM_SIZE)); } if ( local_thread_max_courant > all_threads_max_courant ) { #pragma omp critical { if ( local_thread_max_courant > all_threads_max_courant ) all_threads_max_courant = local_thread_max_courant; } } } all_threads_max_courant *= delta_t * 1.414; return all_threads_max_courant; KRATOS_CATCH("") } double CalculateMaxCourantInNegativeElements() { KRATOS_TRY //using a nodal approach (faster!) ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo(); const double delta_t = CurrentProcessInfo[DELTA_TIME]; double all_threads_max_courant = 0.0; ModelPart::NodesContainerType::iterator inodebegin = mr_model_part.NodesBegin(); vector<unsigned int> node_partition; #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Nodes().size(), node_partition); #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { double local_thread_max_courant = 0.0; for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++) { ModelPart::NodesContainerType::iterator inode = inodebegin+ii; const double & distance = (inode->FastGetSolutionStepValue(DISTANCE)); const double velocity = sqrt(pow(inode->FastGetSolutionStepValue(VELOCITY_X),2)+pow(inode->FastGetSolutionStepValue(VELOCITY_Y),2)+pow(inode->FastGetSolutionStepValue(VELOCITY_Z),2)); const double nodal_courant = (velocity*delta_t/inode->FastGetSolutionStepValue(MEAN_SIZE)); if(nodal_courant>local_thread_max_courant && distance < 0.0) //only for negative nodes! local_thread_max_courant = nodal_courant; } if ( local_thread_max_courant > all_threads_max_courant ) { #pragma omp critical { if ( local_thread_max_courant > all_threads_max_courant ) all_threads_max_courant = local_thread_max_courant; } } } //all_threads_max_courant *= delta_t * 1.414; return all_threads_max_courant; KRATOS_CATCH("") } double CalculateMeanCourant() //water fraction { KRATOS_TRY ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo(); const double delta_t = CurrentProcessInfo[DELTA_TIME]; //double area=0.0; //we create the needed variables //double number_of_threads = double(OpenMPUtils::GetNumThreads()); double sum_courant=0.0; ModelPart::ElementsContainerType::iterator ielembegin = mr_model_part.ElementsBegin(); vector<unsigned int> element_partition; #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Elements().size(), element_partition); #pragma omp parallel for reduction(+:sum_courant) for(int kkk=0; kkk<number_of_threads; kkk++) { double thread_sum_courant=0.0; for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++) { ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii; //const double & velocity_over_elem_size = (ielem->GetValue(VELOCITY_OVER_ELEM_SIZE)); if ((ielem->GetValue(VELOCITY_OVER_ELEM_SIZE))>0.0) thread_sum_courant += ielem->GetValue(VELOCITY_OVER_ELEM_SIZE); } sum_courant += thread_sum_courant; } sum_courant *= delta_t * 1.414 / double(mr_model_part.Elements().size()); return sum_courant; KRATOS_CATCH("") } //NOW ONLY VISCOUS. but since in the first step we cannot use the pressure we just add the viscoust forces. still, lines to use pressure can be uncommented double CalculateForce(int direction) // { KRATOS_TRY ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo(); double viscosity = CurrentProcessInfo[VISCOSITY]; //double delta_t = CurrentProcessInfo[DELTA_TIME]; //array_1d<double,3> & gravity= CurrentProcessInfo[GRAVITY]; const array_1d<double,3> zero3 = ZeroVector(3); double nodal_weight = 1.0/ (double (TDim) ); double force=0.0; ModelPart::ElementsContainerType::iterator ielembegin = mr_model_part.ElementsBegin(); vector<unsigned int> element_partition; #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Elements().size(), element_partition); #pragma omp parallel for reduction(+:force) for(int kkk=0; kkk<number_of_threads; kkk++) { double thread_force=0.0; for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++) { ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii; if (ielem->Is(ACTIVE)) //elements can be inactive to add temporary walls. fractional velocity is integrated by parts so walls are seen as having zero velocity without doing anything { //double Area; Geometry<Node<3> >& geom = ielem->GetGeometry(); array_1d<unsigned int, 4 > fixed_nodes; //unordered : i position in the array might not correspond to i node of the element array_1d<bool, 4 > is_node_fixed; //i position belongs to i node of the element unsigned int number_of_fixed_nodes=0; //bool boundary_element=false; BoundedMatrix<double, 4, 3 > velocities = ZeroMatrix(4, 3); for (unsigned int i = 0; i < (TDim+1); i++) { const array_1d<double, 3 > & velocity = geom[i].FastGetSolutionStepValue(VELOCITY); for (unsigned int j = 0; j < (TDim); j++) velocities(i,j) = velocity[j]; if (TDim==2) { if (geom[i].IsFixed(FRACT_VEL_X) && geom[i].IsFixed(FRACT_VEL_Y)) { fixed_nodes[number_of_fixed_nodes]=i; is_node_fixed[i]=true; number_of_fixed_nodes++; } else is_node_fixed[i]=false; } else // (TDim==3) { if (geom[i].IsFixed(FRACT_VEL_X) && geom[i].IsFixed(FRACT_VEL_Y) && geom[i].IsFixed(FRACT_VEL_Z) ) { fixed_nodes[number_of_fixed_nodes]=i; number_of_fixed_nodes++; is_node_fixed[i]=true; } else is_node_fixed[i]=false; } } double plane_point_distance=1.0; double fixed_face_area_or_lenght=0.0; array_1d<double, 3 > boundary_stress; if (number_of_fixed_nodes==TDim) //it means we have cutted elements! { //boundary_element=true; array_1d<double, 3 > normal; unsigned int free_node=0; if (TDim==2) { fixed_face_area_or_lenght = fabs(sqrt(pow((geom[fixed_nodes[1]].Y()-geom[fixed_nodes[0]].Y()),2 ) + pow( (geom[fixed_nodes[1]].X()-geom[fixed_nodes[0]].X() ),2 ) ) ); normal[0] = geom[fixed_nodes[1]].Y()-geom[fixed_nodes[0]].Y(); normal[1] = - ( geom[fixed_nodes[1]].X()-geom[fixed_nodes[0]].X() ); normal[2] = 0.0; normal /= sqrt(normal[0]*normal[0]+normal[1]*normal[1]); if (fixed_nodes[0]==0) { if (fixed_nodes[1]==1) free_node=2; else free_node=1; } else free_node=0; //the plane is composed by the unit normal and any of the points of fixed nodes. we will use fixed_nodes[0]; plane_point_distance = inner_prod( (geom[free_node].Coordinates()-geom[fixed_nodes[0]].Coordinates()) , normal); //boundary_stress = geom[free_node].FastGetSolutionStepValue(VELOCITY)*viscosity/plane_point_distance; if (plane_point_distance<0.0) { plane_point_distance*=-1.0; normal *= -1.0; } } else //(TDim==3) { //the area is obtained from the crossproduct of the 2 vertices: MathUtils<double>::CrossProduct(normal, geom[fixed_nodes[1]].Coordinates() - geom[fixed_nodes[0]].Coordinates(), geom[fixed_nodes[2]].Coordinates() - geom[fixed_nodes[0]].Coordinates() ); fixed_face_area_or_lenght = 0.5 * sqrt( pow(normal[0],2) + pow(normal[1],2) + pow(normal[2],2) ); normal /= 2.0 * fixed_face_area_or_lenght; //this way it is a unit vector. now we must find the distance from the plane generated by the triangles to the free node: //fixed_face_area_or_lenght = fabs(fixed_face_area_or_lenght); for (unsigned int j=0; j!=(TDim+1); j++) { if (is_node_fixed[j]==false) { free_node=j; break; } } //the plane is composed by the unit normal and any of the points of fixed nodes. we will use fixed_nodes[0]; plane_point_distance = inner_prod( (geom[free_node].Coordinates()-geom[fixed_nodes[0]].Coordinates()) , normal); if (plane_point_distance<0.0) normal *= -1.0; { plane_point_distance*=-1.0; normal *= -1.0; } //boundary_stress = geom[free_node].FastGetSolutionStepValue(VELOCITY)*viscosity/plane_point_distance; } boundary_stress = - geom[free_node].FastGetSolutionStepValue(VELOCITY)*viscosity/(fabs(plane_point_distance)); //KRATOS_WATCH(plane_point_distance) //KRATOS_WATCH(boundary_stress) //KRATOS_WATCH(fixed_face_area_or_lenght) //drag forces: thread_force += boundary_stress[direction]*fixed_face_area_or_lenght; // unit density! careful! //face_force+=fixed_face_area_or_lenght*normal[direction]; //now pressure forces: for (unsigned int j=0; j!=(TDim); j++) // the 2 or 3 nodes that define the fixed face: { /* if ( (geom[fixed_nodes[j]].X())<5.0 ) face_force += nodal_weight*(0.5*fixed_face_area_or_lenght*normal[direction]); else face_force -= nodal_weight*(0.5*fixed_face_area_or_lenght*normal[direction]); */ thread_force +=nodal_weight*(geom[fixed_nodes[j]].FastGetSolutionStepValue(PRESSURE))*fixed_face_area_or_lenght*normal[direction]; //array_1d<double,3> & nodal_normal= (geom[fixed_nodes[j]].FastGetSolutionStepValue(NORMAL)); //face_force += (geom[fixed_nodes[j]].FastGetSolutionStepValue(PRESSURE))*nodal_normal[direction]; } } } } force+=thread_force; } return force; KRATOS_CATCH("") } protected: private: ModelPart& mr_model_part; }; } // namespace Kratos. #endif // KRATOS_CALCULATE__WATER_FRACTION_UTILITY_INCLUDED defined
meshtree.h
#ifndef VCGLIB_MESHTREE_H #define VCGLIB_MESHTREE_H #include <vcg/complex/algorithms/align_pair.h> #include <vcg/complex/algorithms/align_global.h> #include <vcg/complex/algorithms/occupancy_grid.h> #ifdef _OPENMP #include <omp.h> #endif namespace vcg { template<class MeshType, class ScalarType> class MeshTree { public: class MeshNode { public: bool glued; MeshType *m; explicit MeshNode(MeshType *_m) : m{_m}, glued{false} {} vcg::Matrix44<ScalarType> &tr() { return m->cm.Tr; } const vcg::Box3<ScalarType> &bbox() const { return m->cm.bbox; } int Id() { return m->id(); } }; class Param { public: int OGSize = 50000; float arcThreshold = 0.3f; float recalcThreshold = 0.1f; }; std::map<int, MeshNode*> nodeMap; std::vector<vcg::AlignPair::Result> resultList; vcg::OccupancyGrid<CMeshO, ScalarType> OG{}; vcg::CallBackPos* cb = vcg::DummyCallBackPos; MeshTree() = default; ~MeshTree() { clear(); } MeshType *MM(unsigned int i) { return nodeMap[i]->m; } void clear() { for (auto& ni : nodeMap) { delete ni.second; } nodeMap.clear(); resultList.clear(); } void deleteResult(MeshTree::MeshNode *mp) { auto li = std::begin(resultList); while (li != resultList.end()) { if (li->MovName == mp->Id() || li->FixName == mp->Id()) { li = resultList.erase(li); } else { ++li; } } } vcg::AlignPair::Result* findResult(int id1, int id2) { for (auto& li : resultList) { if ((li.MovName == id1 && li.FixName == id2) || (li.MovName == id2 && li.FixName == id1) ) { return &li; } } return nullptr; } MeshTree::MeshNode *find(int id) { MeshTree::MeshNode *mp = nodeMap[id]; if (mp == nullptr || mp->Id() != id) { assert("You are trying to find a non existent mesh" == nullptr); } return mp; } MeshTree::MeshNode *find(MeshType *m) { for (auto& ni : nodeMap) { if (ni.second->m == m) return ni.second; } assert("You are trying to find a non existent mesh" == nullptr); return nullptr; } int gluedNum() { int count = 0; for (auto& ni : nodeMap) { if (ni.second->glued) ++count; } return count; } void Process(vcg::AlignPair::Param& ap, MeshTree::Param& mtp) { std::array<char, 1024> buf; std::snprintf( buf.data(), 1024, "Starting Processing of %i glued meshes out of %zu meshes\n", gluedNum(), nodeMap.size()); cb(0, buf.data()); /******* Occupancy Grid Computation *************/ buf.fill('\0'); std::snprintf(buf.data(), 1024, "Computing Overlaps %i glued meshes...\n", gluedNum()); cb(0, buf.data()); OG.Init( static_cast<int>(nodeMap.size()), vcg::Box3<ScalarType>::Construct(gluedBBox()), mtp.OGSize); for (auto& ni : nodeMap) { MeshTree::MeshNode* mn = ni.second; if (mn->glued) { OG.AddMesh(mn->m->cm, vcg::Matrix44<ScalarType>::Construct(mn->tr()), mn->Id()); } } OG.Compute(); OG.Dump(stdout); // Note: the s and t of the OG translate into fix and mov, respectively. /*************** The long loop of arc computing **************/ // count existing arcs within current error threshold float percentileThr = 0; if (!resultList.empty()) { vcg::Distribution<float> H; for (auto& li : resultList) { H.Add(li.err); } percentileThr = H.Percentile(1.0f - mtp.recalcThreshold); } std::size_t totalArcNum = 0; int preservedArcNum = 0, recalcArcNum = 0; while (totalArcNum < OG.SVA.size() && OG.SVA[totalArcNum].norm_area > mtp.arcThreshold) { AlignPair::Result* curResult = findResult(OG.SVA[totalArcNum].s, OG.SVA[totalArcNum].t); if (curResult) { if (curResult->err < percentileThr) { ++preservedArcNum; } else { ++recalcArcNum; } } else { resultList.push_back(AlignPair::Result()); resultList.back().FixName = OG.SVA[totalArcNum].s; resultList.back().MovName = OG.SVA[totalArcNum].t; resultList.back().err = std::numeric_limits<double>::max(); } ++totalArcNum; } // if there are no arcs at all complain and return if (totalArcNum == 0) { buf.fill('\0'); std::snprintf( buf.data(), 1024, "\n Failure. There are no overlapping meshes?\n No candidate alignment arcs. " "Nothing Done.\n"); cb(0, buf.data()); return; } int num_max_thread = 1; #ifdef _OPENMP if (totalArcNum > 32) num_max_thread = omp_get_max_threads(); #endif buf.fill('\0'); std::snprintf( buf.data(), 1024,"Arc with good overlap %6zu (on %6zu)\n", totalArcNum, OG.SVA.size()); cb(0, buf.data()); buf.fill('\0'); std::snprintf(buf.data(), 1024," %6i preserved %i Recalc \n", preservedArcNum, recalcArcNum); cb(0, buf.data()); bool hasValidAlign = false; #pragma omp parallel for schedule(dynamic, 1) num_threads(num_max_thread) // on windows, omp does not support unsigned types for indices on cycles for (int i = 0; i < static_cast<int>(totalArcNum); ++i) { std::fprintf( stdout, "%4i -> %4i Area:%5i NormArea:%5.3f\n", OG.SVA[i].s, OG.SVA[i].t, OG.SVA[i].area, OG.SVA[i].norm_area); AlignPair::Result* curResult = findResult(OG.SVA[i].s, OG.SVA[i].t); // // missing arc and arc with great error must be recomputed. if (curResult->err >= percentileThr) { ProcessArc(OG.SVA[i].s, OG.SVA[i].t, *curResult, ap); curResult->area = OG.SVA[i].norm_area; if (curResult->isValid()) { hasValidAlign = true; std::pair<double, double> dd = curResult->computeAvgErr(); #pragma omp critical buf.fill('\0'); std::snprintf( buf.data(), 1024, "(%3i/%3zu) %2i -> %2i Aligned AvgErr dd=%f -> dd=%f \n", i + 1, totalArcNum, OG.SVA[i].s, OG.SVA[i].t, dd.first, dd.second); cb(0, buf.data()); } else { #pragma omp critical buf.fill('\0'); std::snprintf( buf.data(), 1024, "(%3i/%3zu) %2i -> %2i Failed Alignment of one arc %s\n", i + 1, totalArcNum, OG.SVA[i].s, OG.SVA[i].t, vcg::AlignPair::errorMsg(curResult->status)); cb(0, buf.data()); } } } // if there are no valid arcs complain and return if (!hasValidAlign) { buf.fill('\0'); std::snprintf( buf.data(), 1024, "\n Failure. No successful arc among candidate Alignment arcs. Nothing " "Done.\n"); cb(0, buf.data()); return; } vcg::Distribution<float> H; // stat for printing for (auto& li : resultList) { if (li.isValid()) H.Add(li.err); } buf.fill('\0'); std::snprintf( buf.data(), 1024, "Completed Mesh-Mesh Alignment: Avg Err %5.3f; Median %5.3f; 90%% %5.3f\n", H.Avg(), H.Percentile(0.5f), H.Percentile(0.9f)); cb(0, buf.data()); ProcessGlobal(ap); } void ProcessGlobal(vcg::AlignPair::Param& ap) { /************** Preparing Matrices for global alignment *************/ std::vector<int> GluedIdVec; std::vector<vcg::Matrix44d> GluedTrVec; std::map<int, std::string> names; for (auto& ni : nodeMap) { MeshTree::MeshNode* mn = ni.second; if (mn->glued) { GluedIdVec.push_back(mn->Id()); GluedTrVec.push_back(vcg::Matrix44d::Construct(mn->tr())); names[mn->Id()] = qUtf8Printable(mn->m->label()); } } vcg::AlignGlobal AG; std::vector<vcg::AlignPair::Result*> ResVecPtr; for (auto& li : resultList) { if (li.isValid()) { ResVecPtr.push_back(&li); } } AG.BuildGraph(ResVecPtr, GluedTrVec, GluedIdVec); float StartGlobErr = 0.001f; while (!AG.GlobalAlign( names, StartGlobErr, 100, ap.MatchMode == vcg::AlignPair::Param::MMRigid, stdout, cb)) { StartGlobErr *= 2; AG.BuildGraph(ResVecPtr, GluedTrVec, GluedIdVec); } std::vector<vcg::Matrix44d> GluedTrVecOut(GluedTrVec.size()); AG.GetMatrixVector(GluedTrVecOut, GluedIdVec); // Now get back the results! for (std::size_t ii = 0; ii < GluedTrVecOut.size(); ++ii) { MM(GluedIdVec[ii])->cm.Tr.Import(GluedTrVecOut[ii]); } std::string str = "Completed Global Alignment (error bound " + std::to_string(StartGlobErr) + ")\n"; cb(0, str.c_str()); } void ProcessArc(int fixId, int movId, vcg::AlignPair::Result &result, vcg::AlignPair::Param ap) { // l'allineatore globale cambia le varie matrici di posizione di base delle mesh // per questo motivo si aspetta i punti nel sistema di riferimento locale della mesh fix // Si fanno tutti i conti rispetto al sistema di riferimento locale della mesh fix vcg::Matrix44d FixM = vcg::Matrix44d::Construct(find(fixId)->tr()); vcg::Matrix44d MovM = vcg::Matrix44d::Construct(find(movId)->tr()); vcg::Matrix44d MovToFix = Inverse(FixM) * MovM; ProcessArc(fixId, movId, MovToFix, result, ap); } void ProcessArc(int fixId, int movId, vcg::Matrix44d &MovM, vcg::AlignPair::Result &result, vcg::AlignPair::Param ap) { vcg::AlignPair::A2Mesh Fix; vcg::AlignPair aa; // 1) Convert fixed mesh and put it into the grid. MM(fixId)->updateDataMask(MeshType::MeshModel::MM_FACEMARK); aa.convertMesh<CMeshO>(MM(fixId)->cm,Fix); vcg::AlignPair::A2Grid UG; vcg::AlignPair::A2GridVert VG; if (MM(fixId)->cm.fn==0 || ap.UseVertexOnly) { Fix.initVert(vcg::Matrix44d::Identity()); vcg::AlignPair::InitFixVert(&Fix,ap,VG); } else { Fix.init(vcg::Matrix44d::Identity()); vcg::AlignPair::initFix(&Fix, ap, UG); } // 2) Convert the second mesh and sample a <ap.SampleNum> points on it. MM(movId)->updateDataMask(MeshType::MeshModel::MM_FACEMARK); std::vector<vcg::AlignPair::A2Vertex> tmpmv; aa.convertVertex(MM(movId)->cm.vert,tmpmv); aa.sampleMovVert(tmpmv, ap.SampleNum, ap.SampleMode); aa.mov=&tmpmv; aa.fix=&Fix; aa.ap = ap; // Perform the ICP algorithm aa.align(MovM,UG,VG,result); result.FixName=fixId; result.MovName=movId; } inline vcg::Box3<ScalarType> bbox() { vcg::Box3<ScalarType> FullBBox; for (auto& ni : nodeMap) { FullBBox.Add(vcg::Matrix44d::Construct(ni.second->tr()), ni.second->bbox()); } return FullBBox; } inline vcg::Box3<ScalarType> gluedBBox() { vcg::Box3<ScalarType> FullBBox; for (auto& ni : nodeMap) { if (ni.second->glued) { FullBBox.Add(vcg::Matrix44<ScalarType>::Construct(ni.second->tr()), ni.second->bbox()); } } return FullBBox; } }; } #endif //VCGLIB_MESHTREE_H
2229.c
/* POLYBENCH/GPU-OPENMP * * This file is a part of the Polybench/GPU-OpenMP suite * * Contact: * William Killian <killian@udel.edu> * * Copyright 2013, The University of Delaware */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4096x4096. */ #include "convolution-2d.h" /* Array initialization. */ static void init_array (int ni, int nj, DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj)) { // printf("Initializing Array\n"); int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nj; j++) { A[i][j] = ((DATA_TYPE) (i + j) / nj); } } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int ni, int nj, DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj)) { int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nj; j++) { fprintf(stderr, DATA_PRINTF_MODIFIER, B[i][j]); if ((i * NJ + j) % 20 == 0) fprintf(stderr, "\n"); } fprintf(stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_conv2d(int ni, int nj, DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj), DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj)) { int i, j; #pragma scop for (i = 1; i < _PB_NI - 1; ++i) { #pragma omp target teams distribute schedule(dynamic, 4) for (j = 1; j < _PB_NJ - 1; ++j) { B[i][j] = 0.2 * A[i-1][j-1] + 0.5 * A[i-1][j] + -0.8 * A[i-1][j+1] + -0.3 * A[ i ][j-1] + 0.6 * A[ i ][j] + -0.9 * A[ i ][j+1] + 0.4 * A[i+1][j-1] + 0.7 * A[i+1][j] + 0.1 * A[i+1][j+1]; } } #pragma endscop // printf("Kernal computation complete !!\n"); } int main(int argc, char** argv) { /* Retrieve problem size. */ int ni = NI; int nj = NJ; /* Variable declaration/allocation. */ POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NJ, ni, nj); POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NI, NJ, ni, nj); /* Initialize array(s). */ init_array (ni, nj, POLYBENCH_ARRAY(A)); /* Start timer. */ //polybench_start_instruments; polybench_timer_start(); /* Run kernel. */ kernel_conv2d (ni, nj, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B)); /* Stop and print timer. */ polybench_timer_stop(); polybench_timer_print(); //polybench_stop_instruments; //polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(ni, nj, POLYBENCH_ARRAY(B))); /* Be clean. */ POLYBENCH_FREE_ARRAY(A); POLYBENCH_FREE_ARRAY(B); return 0; }
abstract_pivot_column.h
/* Copyright 2013 IST Austria Contributed by: Ulrich Bauer, Michael Kerber, Jan Reininghaus This file is part of PHAT. PHAT is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. PHAT is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with PHAT. If not, see <http://www.gnu.org/licenses/>. */ #pragma once #include <phat/helpers/misc.h> #include <phat/representations/vector_vector.h> namespace phat { // Note: We could even make the rep generic in the underlying Const representation // But I cannot imagine that anything else than vector<vector<index>> would // make sense template< typename PivotColumn > class abstract_pivot_column : public vector_vector { protected: typedef vector_vector Base; typedef PivotColumn pivot_col; // For parallization purposes, it could be more than one full column mutable thread_local_storage< pivot_col > pivot_cols; mutable thread_local_storage< index > idx_of_pivot_cols; pivot_col& get_pivot_col() const { return pivot_cols(); } bool is_pivot_col( index idx ) const { return idx_of_pivot_cols() == idx; } void release_pivot_col() { index idx = idx_of_pivot_cols(); if( idx != -1 ) { this->matrix[ idx ].clear(); pivot_cols().get_col_and_clear( this->matrix[ idx ] ); } idx_of_pivot_cols() = -1; } void make_pivot_col( index idx ) { release_pivot_col(); idx_of_pivot_cols() = idx; get_pivot_col().add_col( matrix[ idx ] ); } public: void _set_num_cols( index nr_of_cols ) { #pragma omp parallel for for( int tid = 0; tid < omp_get_num_threads(); tid++ ) { pivot_cols[ tid ].init( nr_of_cols ); idx_of_pivot_cols[ tid ] = -1; } Base::_set_num_cols( nr_of_cols ); } void _add_to( index source, index target ) { if( !is_pivot_col( target ) ) make_pivot_col( target ); get_pivot_col().add_col( matrix[source] ); } void _sync() { #pragma omp parallel for for( int tid = 0; tid < omp_get_num_threads(); tid++ ) release_pivot_col(); } void _get_col( index idx, column& col ) const { is_pivot_col( idx ) ? get_pivot_col().get_col( col ) : Base::_get_col( idx, col ); } bool _is_empty( index idx ) const { return is_pivot_col( idx ) ? get_pivot_col().is_empty() : Base::_is_empty( idx ); } index _get_max_index( index idx ) const { return is_pivot_col( idx ) ? get_pivot_col().get_max_index() : Base::_get_max_index( idx ); } void _clear( index idx ) { is_pivot_col( idx ) ? get_pivot_col().clear() : Base::_clear( idx ); } void _set_col( index idx, const column& col ) { is_pivot_col( idx ) ? get_pivot_col().set_col( col ) : Base::_set_col( idx, col ); } void _remove_max( index idx ) { is_pivot_col( idx ) ? get_pivot_col().remove_max() : Base::_remove_max( idx ); } void finalize( index idx ) { Base::_finalize( idx ); } }; }
example4.c
// calculation example of far-field intensity distributions. // radar chart is output for a distant scattering field. #include "emf_mie_ms.h" int main(int argc,char *argv[]) { MSPD msp; FILE *fp1,*fp2; double complex e[3],h[3]; double th,ph,phd,dthd,dthr,dphr,dphd,ra,r[3],*ie,*ih,mf,iemax,ihmax; int i,j,sn; if(argc!=2 && argc!=4){ printf("Usage : %s datafile_name [sampling_number multplier_factor](optional)\n",argv[0]); printf("default sampling number 360, multiplier factor 2000 (radius = 2000*lambda0)\n"); exit(0); } else if(argc==4){ sn=atoi(argv[2]); mf=atof(argv[3]); } else{ sn=360; mf=2000.0; } read_dat_ms(argv[1],&msp); // read data file print_data_ms(&msp); // print data ra=mf*msp.bm.lambda_0; // radius for calculation point dthd=360.0/(double)sn; // delta theta [degree] dthr=2.0*M_PI/(double)sn; // delta theta [radian] dphd=180.0/(double)sn; dphr=1.0*M_PI/(double)sn; ie=(double *)m_alloc2(sn+1,sizeof(double),"example2.c,ie"); ih=(double *)m_alloc2(sn+1,sizeof(double),"example2.c,ih"); // x=0 plane, th=0 : +z-axis, th=270 : +y-axis if((fp1=fopen("fsIe_yz.txt","wt"))==NULL){ printf("Can not open the file.\n"); exit(1); } fprintf(fp1,"%s\n","## x=0 plane, theta=0 : +z-axis, theta=270 : +y-axis "); fprintf(fp1,"%s %d, %s %g\n","## sampling number",sn,"multiplier factor",mf); fprintf(fp1,"%s\n","# theta electric_field_intensity normalized_intensity"); if((fp2=fopen("fsIh_yz.txt","wt"))==NULL){ printf("Can not open the file.\n"); exit(1); } fprintf(fp2,"%s\n","## x=0 plane, theta=0 : +z-axis, theta=270 : +y-axis "); fprintf(fp2,"%s %d, %s %g\n","## sampling number",sn,"multiplier factor",mf); fprintf(fp2,"%s\n","# theta magnetic_field_intensity normalized_intensity"); iemax=0.0; ihmax=0.0; for(i=0;i<sn;i++){ th=0.5*dthr+(double)i*dthr; r[0]=0.0; r[1]=-ra*sin(th); r[2]= ra*cos(th); scattered_EH_ms(e,h,r,&msp); // scattered field ie[i]=creal(e[0]*conj(e[0]))+creal(e[1]*conj(e[1]))+creal(e[2]*conj(e[2])); ih[i]=creal(h[0]*conj(h[0]))+creal(h[1]*conj(h[1]))+creal(h[2]*conj(h[2])); if(ie[i]>iemax) iemax=ie[i]; if(ih[i]>ihmax) ihmax=ih[i]; } for(i=0;i<sn;i++){ th=0.5*dthd+(double)i*dthd; fprintf(fp1,"%g %15.14e %15.14e\n",th,ie[i],ie[i]/iemax); fprintf(fp2,"%g %15.14e %15.14e\n",th,ih[i],ih[i]/ihmax); } fclose(fp1); fclose(fp2); // y=0 plane, th=0 : +z-axis, th=90 : +x-axis if((fp1=fopen("fsIe_xz.txt","wt"))==NULL){ printf("Can not open the file.\n"); exit(1); } fprintf(fp1,"%s\n","## y=0 plane, theta=0 : +z-axis, theta=90 : +x-axis "); fprintf(fp1,"%s %d, %s %g\n","## sampling number",sn,"multiplier factor",mf); fprintf(fp1,"%s\n","# theta electric_field_intensity normalized_intensity"); if((fp2=fopen("fsIh_xz.txt","wt"))==NULL){ printf("Can not open the file.\n"); exit(1); } fprintf(fp2,"%s\n","## x=0 plane, theta=0 : +z-axis, theta=90 : +x-axis "); fprintf(fp2,"%s %d, %s %g\n","## sampling number",sn,"multiplier factor",mf); fprintf(fp2,"%s\n","# theta magnetic_field_intensity normalized_intensity"); iemax=0.0; ihmax=0.0; for(i=0;i<sn;i++){ th=0.5*dthr+(double)i*dthr; r[0]=ra*sin(th); r[1]=0.0; r[2]=ra*cos(th); scattered_EH_ms(e,h,r,&msp); // scattered field ie[i]=creal(e[0]*conj(e[0]))+creal(e[1]*conj(e[1]))+creal(e[2]*conj(e[2])); ih[i]=creal(h[0]*conj(h[0]))+creal(h[1]*conj(h[1]))+creal(h[2]*conj(h[2])); if(ie[i]>iemax) iemax=ie[i]; if(ih[i]>ihmax) ihmax=ih[i]; } for(i=0;i<sn;i++){ th=0.5*dthd+(double)i*dthd; fprintf(fp1,"%g %15.14e %15.14e\n",th,ie[i],ie[i]/iemax); fprintf(fp2,"%g %15.14e %15.14e\n",th,ih[i],ih[i]/ihmax); } fclose(fp1); fclose(fp2); // z=0 plane, th=0 : +x-axis, th=90 : +y-axis if((fp1=fopen("fsIe_xy.txt","wt"))==NULL){ printf("Can not open the file.\n"); exit(1); } fprintf(fp1,"%s\n","## z=0 plane, theta=0 : +x-axis, theta=90 : +y-axis "); fprintf(fp1,"%s %d, %s %g\n","## sampling number",sn,"multiplier factor",mf); fprintf(fp1,"%s\n","# theta electric_field_intensity normalized_intensity"); if((fp2=fopen("fsIh_xy.txt","wt"))==NULL){ printf("Can not open the file.\n"); exit(1); } fprintf(fp2,"%s\n","## z=0 plane, theta=0 : +x-axis, theta=90 : +y-axis "); fprintf(fp2,"%s %d, %s %g\n","## sampling number",sn,"multiplier factor",mf); fprintf(fp2,"%s\n","# theta magnetic_field_intensity normalized_intensity"); iemax=0.0; ihmax=0.0; for(i=0;i<sn;i++){ th=0.5*dthr+(double)i*dthr; r[0]=ra*cos(th); r[1]=ra*sin(th); r[2]=0.0; scattered_EH_ms(e,h,r,&msp); // scattered field ie[i]=creal(e[0]*conj(e[0]))+creal(e[1]*conj(e[1]))+creal(e[2]*conj(e[2])); ih[i]=creal(h[0]*conj(h[0]))+creal(h[1]*conj(h[1]))+creal(h[2]*conj(h[2])); if(ie[i]>iemax) iemax=ie[i]; if(ih[i]>ihmax) ihmax=ih[i]; } for(i=0;i<sn;i++){ th=0.5*dthd+(double)i*dthd; fprintf(fp1,"%g %15.14e %15.14e\n",th,ie[i],ie[i]/iemax); fprintf(fp2,"%g %15.14e %15.14e\n",th,ih[i],ih[i]/ihmax); } fclose(fp1); fclose(fp2); // 3d plot if((fp1=fopen("fsIe_3d.txt","wt"))==NULL){ printf("Can not open the file.\n"); exit(1); } fprintf(fp1,"%s\n","## 3d plot, x=r*sin(theta)*cos(phi), y=r*sin(theta)*sin(phi), z=r*cos(theta), r=multiplier_factor*lambda0"); fprintf(fp1,"%s %d, %s %g\n","## sampling number",sn,"multiplier factor",mf); fprintf(fp1,"%s\n","# theta phi electric_field_intensity"); if((fp2=fopen("fsIh_3d.txt","wt"))==NULL){ printf("Can not open the file.\n"); exit(1); } fprintf(fp2,"%s\n","## 3d plot, x=r*sin(theta)*cos(phi), y=r*sin(theta)*sin(phi), z=r*cos(theta), r=multiplier_factor*lambda0"); fprintf(fp2,"%s %d, %s %g\n","## sampling number",sn,"multiplier factor",mf); fprintf(fp2,"%s\n","# theta phi magnetic_field_intensity"); for(i=0;i<sn;i++){ ph =0.5*dphr+(double)i*dphr; phd=0.5*dphd+(double)i*dphd; #pragma omp parallel for schedule(dynamic) private(th,r,e,h) for(j=0;j<=sn;j++){ th=(double)j*dthr; r[0]=ra*sin(ph)*cos(th); r[1]=ra*sin(ph)*sin(th); r[2]=ra*cos(ph); scattered_EH_ms(e,h,r,&msp); // scattered field ie[j]=creal(e[0]*conj(e[0]))+creal(e[1]*conj(e[1]))+creal(e[2]*conj(e[2])); ih[j]=creal(h[0]*conj(h[0]))+creal(h[1]*conj(h[1]))+creal(h[2]*conj(h[2])); } for(j=0;j<=sn;j++){ th=(double)j*dthd; fprintf(fp1,"%g %g %15.14e\n",phd,th,ie[j]); fprintf(fp2,"%g %g %15.14e\n",phd,th,ih[j]); } fprintf(fp1,"\n"); fprintf(fp2,"\n"); } fclose(fp1); fclose(fp2); free(ie); free(ih); free_ms(&msp); return 0; }
GB_binop__first_bool.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__first_bool) // A.*B function (eWiseMult): GB (_AemultB_08__first_bool) // A.*B function (eWiseMult): GB (_AemultB_02__first_bool) // A.*B function (eWiseMult): GB (_AemultB_04__first_bool) // A.*B function (eWiseMult): GB (_AemultB_bitmap__first_bool) // A*D function (colscale): GB (_AxD__first_bool) // D*A function (rowscale): GB (_DxB__first_bool) // C+=B function (dense accum): GB (_Cdense_accumB__first_bool) // C+=b function (dense accum): GB (_Cdense_accumb__first_bool) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__first_bool) // C=scalar+B GB ((none)) // C=scalar+B' GB ((none)) // C=A+scalar GB ((none)) // C=A'+scalar GB ((none)) // C type: bool // A type: bool // A pattern? 0 // B type: bool // B pattern? 1 // BinaryOp: cij = aij #define GB_ATYPE \ bool #define GB_BTYPE \ bool #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ bool aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ ; // true if values of B are not used #define GB_B_IS_PATTERN \ 1 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = x ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_FIRST || GxB_NO_BOOL || GxB_NO_FIRST_BOOL) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__first_bool) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__first_bool) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__first_bool) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type bool bool bwork = (*((bool *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__first_bool) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__first_bool) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__first_bool) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; bool alpha_scalar ; bool beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((bool *) alpha_scalar_in)) ; beta_scalar = (*((bool *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__first_bool) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__first_bool) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__first_bool) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__first_bool) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; bool x = (*((bool *) x_input)) ; bool *Bx = (bool *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; ; ; Cx [p] = x ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; bool *Ax = (bool *) Ax_input ; bool y = (*((bool *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; bool aij = GBX (Ax, p, false) ; Cx [p] = aij ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = x ; \ } GrB_Info GB ((none)) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ bool #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool x = (*((const bool *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ bool } #endif //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ bool aij = GBX (Ax, pA, false) ; \ Cx [pC] = aij ; \ } GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool y = (*((const bool *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif #endif
quantize.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % QQQ U U AAA N N TTTTT IIIII ZZZZZ EEEEE % % Q Q U U A A NN N T I ZZ E % % Q Q U U AAAAA N N N T I ZZZ EEEEE % % Q QQ U U A A N NN T I ZZ E % % QQQQ UUU A A N N T IIIII ZZZZZ EEEEE % % % % % % MagickCore Methods to Reduce the Number of Unique Colors in an Image % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Realism in computer graphics typically requires using 24 bits/pixel to % generate an image. Yet many graphic display devices do not contain the % amount of memory necessary to match the spatial and color resolution of % the human eye. The Quantize methods takes a 24 bit image and reduces % the number of colors so it can be displayed on raster device with less % bits per pixel. In most instances, the quantized image closely % resembles the original reference image. % % A reduction of colors in an image is also desirable for image % transmission and real-time animation. % % QuantizeImage() takes a standard RGB or monochrome images and quantizes % them down to some fixed number of colors. % % For purposes of color allocation, an image is a set of n pixels, where % each pixel is a point in RGB space. RGB space is a 3-dimensional % vector space, and each pixel, Pi, is defined by an ordered triple of % red, green, and blue coordinates, (Ri, Gi, Bi). % % Each primary color component (red, green, or blue) represents an % intensity which varies linearly from 0 to a maximum value, Cmax, which % corresponds to full saturation of that color. Color allocation is % defined over a domain consisting of the cube in RGB space with opposite % vertices at (0,0,0) and (Cmax, Cmax, Cmax). QUANTIZE requires Cmax = % 255. % % The algorithm maps this domain onto a tree in which each node % represents a cube within that domain. In the following discussion % these cubes are defined by the coordinate of two opposite vertices (vertex % nearest the origin in RGB space and the vertex farthest from the origin). % % The tree's root node represents the entire domain, (0,0,0) through % (Cmax,Cmax,Cmax). Each lower level in the tree is generated by % subdividing one node's cube into eight smaller cubes of equal size. % This corresponds to bisecting the parent cube with planes passing % through the midpoints of each edge. % % The basic algorithm operates in three phases: Classification, % Reduction, and Assignment. Classification builds a color description % tree for the image. Reduction collapses the tree until the number it % represents, at most, the number of colors desired in the output image. % Assignment defines the output image's color map and sets each pixel's % color by restorage_class in the reduced tree. Our goal is to minimize % the numerical discrepancies between the original colors and quantized % colors (quantization error). % % Classification begins by initializing a color description tree of % sufficient depth to represent each possible input color in a leaf. % However, it is impractical to generate a fully-formed color description % tree in the storage_class phase for realistic values of Cmax. If % colors components in the input image are quantized to k-bit precision, % so that Cmax= 2k-1, the tree would need k levels below the root node to % allow representing each possible input color in a leaf. This becomes % prohibitive because the tree's total number of nodes is 1 + % sum(i=1, k, 8k). % % A complete tree would require 19,173,961 nodes for k = 8, Cmax = 255. % Therefore, to avoid building a fully populated tree, QUANTIZE: (1) % Initializes data structures for nodes only as they are needed; (2) % Chooses a maximum depth for the tree as a function of the desired % number of colors in the output image (currently log2(colormap size)). % % For each pixel in the input image, storage_class scans downward from % the root of the color description tree. At each level of the tree it % identifies the single node which represents a cube in RGB space % containing the pixel's color. It updates the following data for each % such node: % % n1: Number of pixels whose color is contained in the RGB cube which % this node represents; % % n2: Number of pixels whose color is not represented in a node at % lower depth in the tree; initially, n2 = 0 for all nodes except % leaves of the tree. % % Sr, Sg, Sb: Sums of the red, green, and blue component values for all % pixels not classified at a lower depth. The combination of these sums % and n2 will ultimately characterize the mean color of a set of pixels % represented by this node. % % E: the distance squared in RGB space between each pixel contained % within a node and the nodes' center. This represents the % quantization error for a node. % % Reduction repeatedly prunes the tree until the number of nodes with n2 % > 0 is less than or equal to the maximum number of colors allowed in % the output image. On any given iteration over the tree, it selects % those nodes whose E count is minimal for pruning and merges their color % statistics upward. It uses a pruning threshold, Ep, to govern node % selection as follows: % % Ep = 0 % while number of nodes with (n2 > 0) > required maximum number of colors % prune all nodes such that E <= Ep % Set Ep to minimum E in remaining nodes % % This has the effect of minimizing any quantization error when merging % two nodes together. % % When a node to be pruned has offspring, the pruning procedure invokes % itself recursively in order to prune the tree from the leaves upward. % n2, Sr, Sg, and Sb in a node being pruned are always added to the % corresponding data in that node's parent. This retains the pruned % node's color characteristics for later averaging. % % For each node, n2 pixels exist for which that node represents the % smallest volume in RGB space containing those pixel's colors. When n2 % > 0 the node will uniquely define a color in the output image. At the % beginning of reduction, n2 = 0 for all nodes except a the leaves of % the tree which represent colors present in the input image. % % The other pixel count, n1, indicates the total number of colors within % the cubic volume which the node represents. This includes n1 - n2 % pixels whose colors should be defined by nodes at a lower level in the % tree. % % Assignment generates the output image from the pruned tree. The output % image consists of two parts: (1) A color map, which is an array of % color descriptions (RGB triples) for each color present in the output % image; (2) A pixel array, which represents each pixel as an index % into the color map array. % % First, the assignment phase makes one pass over the pruned color % description tree to establish the image's color map. For each node % with n2 > 0, it divides Sr, Sg, and Sb by n2 . This produces the mean % color of all pixels that classify no lower than this node. Each of % these colors becomes an entry in the color map. % % Finally, the assignment phase reclassifies each pixel in the pruned % tree to identify the deepest node containing the pixel's color. The % pixel's value in the pixel array becomes the index of this node's mean % color in the color map. % % This method is based on a similar algorithm written by Paul Raveling. % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/cache-view.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colormap.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/compare.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/histogram.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/memory_.h" #include "MagickCore/memory-private.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/quantize.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/random_.h" #include "MagickCore/resource_.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" /* Define declarations. */ #if !defined(__APPLE__) && !defined(TARGET_OS_IPHONE) #define CacheShift 2 #else #define CacheShift 3 #endif #define ErrorQueueLength 16 #define ErrorRelativeWeight PerceptibleReciprocal(16) #define MaxNodes 266817 #define MaxTreeDepth 8 #define NodesInAList 1920 /* Typdef declarations. */ typedef struct _DoublePixelPacket { double red, green, blue, alpha; } DoublePixelPacket; typedef struct _NodeInfo { struct _NodeInfo *parent, *child[16]; MagickSizeType number_unique; DoublePixelPacket total_color; double quantize_error; size_t color_number, id, level; } NodeInfo; typedef struct _Nodes { NodeInfo *nodes; struct _Nodes *next; } Nodes; typedef struct _CubeInfo { NodeInfo *root; size_t colors, maximum_colors; ssize_t transparent_index; MagickSizeType transparent_pixels; DoublePixelPacket target; double distance, pruning_threshold, next_threshold; size_t nodes, free_nodes, color_number; NodeInfo *next_node; Nodes *node_queue; MemoryInfo *memory_info; ssize_t *cache; DoublePixelPacket error[ErrorQueueLength]; double diffusion, weights[ErrorQueueLength]; QuantizeInfo *quantize_info; MagickBooleanType associate_alpha; ssize_t x, y; size_t depth; MagickOffsetType offset; MagickSizeType span; } CubeInfo; /* Method prototypes. */ static CubeInfo *GetCubeInfo(const QuantizeInfo *,const size_t,const size_t); static NodeInfo *GetNodeInfo(CubeInfo *,const size_t,const size_t,NodeInfo *); static MagickBooleanType AssignImageColors(Image *,CubeInfo *,ExceptionInfo *), ClassifyImageColors(CubeInfo *,const Image *,ExceptionInfo *), DitherImage(Image *,CubeInfo *,ExceptionInfo *), SetGrayscaleImage(Image *,ExceptionInfo *), SetImageColormap(Image *,CubeInfo *,ExceptionInfo *); static void ClosestColor(const Image *,CubeInfo *,const NodeInfo *), DefineImageColormap(Image *,CubeInfo *,NodeInfo *), DestroyCubeInfo(CubeInfo *), PruneLevel(CubeInfo *,const NodeInfo *), PruneToCubeDepth(CubeInfo *,const NodeInfo *), ReduceImageColors(const Image *,CubeInfo *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e Q u a n t i z e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireQuantizeInfo() allocates the QuantizeInfo structure. % % The format of the AcquireQuantizeInfo method is: % % QuantizeInfo *AcquireQuantizeInfo(const ImageInfo *image_info) % % A description of each parameter follows: % % o image_info: the image info. % */ MagickExport QuantizeInfo *AcquireQuantizeInfo(const ImageInfo *image_info) { QuantizeInfo *quantize_info; quantize_info=(QuantizeInfo *) AcquireCriticalMemory(sizeof(*quantize_info)); GetQuantizeInfo(quantize_info); if (image_info != (ImageInfo *) NULL) { const char *option; quantize_info->dither_method=image_info->dither == MagickFalse ? NoDitherMethod : RiemersmaDitherMethod; option=GetImageOption(image_info,"dither"); if (option != (const char *) NULL) quantize_info->dither_method=(DitherMethod) ParseCommandOption( MagickDitherOptions,MagickFalse,option); quantize_info->measure_error=image_info->verbose; } return(quantize_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + A s s i g n I m a g e C o l o r s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AssignImageColors() generates the output image from the pruned tree. The % output image consists of two parts: (1) A color map, which is an array % of color descriptions (RGB triples) for each color present in the % output image; (2) A pixel array, which represents each pixel as an % index into the color map array. % % First, the assignment phase makes one pass over the pruned color % description tree to establish the image's color map. For each node % with n2 > 0, it divides Sr, Sg, and Sb by n2 . This produces the mean % color of all pixels that classify no lower than this node. Each of % these colors becomes an entry in the color map. % % Finally, the assignment phase reclassifies each pixel in the pruned % tree to identify the deepest node containing the pixel's color. The % pixel's value in the pixel array becomes the index of this node's mean % color in the color map. % % The format of the AssignImageColors() method is: % % MagickBooleanType AssignImageColors(Image *image,CubeInfo *cube_info) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % */ static inline void AssociateAlphaPixel(const Image *image, const CubeInfo *cube_info,const Quantum *pixel,DoublePixelPacket *alpha_pixel) { double alpha; if ((cube_info->associate_alpha == MagickFalse) || (GetPixelAlpha(image,pixel) == OpaqueAlpha)) { alpha_pixel->red=(double) GetPixelRed(image,pixel); alpha_pixel->green=(double) GetPixelGreen(image,pixel); alpha_pixel->blue=(double) GetPixelBlue(image,pixel); alpha_pixel->alpha=(double) GetPixelAlpha(image,pixel); return; } alpha=(double) (QuantumScale*GetPixelAlpha(image,pixel)); alpha_pixel->red=alpha*GetPixelRed(image,pixel); alpha_pixel->green=alpha*GetPixelGreen(image,pixel); alpha_pixel->blue=alpha*GetPixelBlue(image,pixel); alpha_pixel->alpha=(double) GetPixelAlpha(image,pixel); } static inline void AssociateAlphaPixelInfo(const CubeInfo *cube_info, const PixelInfo *pixel,DoublePixelPacket *alpha_pixel) { double alpha; if ((cube_info->associate_alpha == MagickFalse) || (pixel->alpha == OpaqueAlpha)) { alpha_pixel->red=(double) pixel->red; alpha_pixel->green=(double) pixel->green; alpha_pixel->blue=(double) pixel->blue; alpha_pixel->alpha=(double) pixel->alpha; return; } alpha=(double) (QuantumScale*pixel->alpha); alpha_pixel->red=alpha*pixel->red; alpha_pixel->green=alpha*pixel->green; alpha_pixel->blue=alpha*pixel->blue; alpha_pixel->alpha=(double) pixel->alpha; } static inline size_t ColorToNodeId(const CubeInfo *cube_info, const DoublePixelPacket *pixel,size_t index) { size_t id; id=(size_t) (((ScaleQuantumToChar(ClampPixel(pixel->red)) >> index) & 0x01) | ((ScaleQuantumToChar(ClampPixel(pixel->green)) >> index) & 0x01) << 1 | ((ScaleQuantumToChar(ClampPixel(pixel->blue)) >> index) & 0x01) << 2); if (cube_info->associate_alpha != MagickFalse) id|=((ScaleQuantumToChar(ClampPixel(pixel->alpha)) >> index) & 0x1) << 3; return(id); } static MagickBooleanType AssignImageColors(Image *image,CubeInfo *cube_info, ExceptionInfo *exception) { #define AssignImageTag "Assign/Image" ColorspaceType colorspace; ssize_t y; /* Allocate image colormap. */ colorspace=image->colorspace; if (cube_info->quantize_info->colorspace != UndefinedColorspace) (void) TransformImageColorspace(image,cube_info->quantize_info->colorspace, exception); cube_info->transparent_pixels=0; cube_info->transparent_index=(-1); if (SetImageColormap(image,cube_info,exception) == MagickFalse) return(MagickFalse); /* Create a reduced color image. */ if (cube_info->quantize_info->dither_method != NoDitherMethod) (void) DitherImage(image,cube_info,exception); else { CacheView *image_view; MagickBooleanType status; status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { CubeInfo cube; Quantum *magick_restrict q; ssize_t count, x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } cube=(*cube_info); for (x=0; x < (ssize_t) image->columns; x+=count) { DoublePixelPacket pixel; const NodeInfo *node_info; ssize_t i; size_t id, index; /* Identify the deepest node containing the pixel's color. */ for (count=1; (x+count) < (ssize_t) image->columns; count++) { PixelInfo packet; GetPixelInfoPixel(image,q+count*GetPixelChannels(image),&packet); if (IsPixelEquivalent(image,q,&packet) == MagickFalse) break; } AssociateAlphaPixel(image,&cube,q,&pixel); node_info=cube.root; for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--) { id=ColorToNodeId(&cube,&pixel,index); if (node_info->child[id] == (NodeInfo *) NULL) break; node_info=node_info->child[id]; } /* Find closest color among siblings and their children. */ cube.target=pixel; cube.distance=(double) (4.0*(QuantumRange+1.0)*(QuantumRange+1.0)+ 1.0); ClosestColor(image,&cube,node_info->parent); index=cube.color_number; for (i=0; i < (ssize_t) count; i++) { if (image->storage_class == PseudoClass) SetPixelIndex(image,(Quantum) index,q); if (cube.quantize_info->measure_error == MagickFalse) { SetPixelRed(image,ClampToQuantum( image->colormap[index].red),q); SetPixelGreen(image,ClampToQuantum( image->colormap[index].green),q); SetPixelBlue(image,ClampToQuantum( image->colormap[index].blue),q); if (cube.associate_alpha != MagickFalse) SetPixelAlpha(image,ClampToQuantum( image->colormap[index].alpha),q); } q+=GetPixelChannels(image); } } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); } if (cube_info->quantize_info->measure_error != MagickFalse) (void) GetImageQuantizeError(image,exception); if ((cube_info->quantize_info->number_colors == 2) && (IsGrayColorspace(cube_info->quantize_info->colorspace))) { double intensity; /* Monochrome image. */ intensity=GetPixelInfoLuma(image->colormap+0) < QuantumRange/2.0 ? 0.0 : QuantumRange; if (image->colors > 1) { intensity=0.0; if (GetPixelInfoLuma(image->colormap+0) > GetPixelInfoLuma(image->colormap+1)) intensity=(double) QuantumRange; } image->colormap[0].red=intensity; image->colormap[0].green=intensity; image->colormap[0].blue=intensity; if (image->colors > 1) { image->colormap[1].red=(double) QuantumRange-intensity; image->colormap[1].green=(double) QuantumRange-intensity; image->colormap[1].blue=(double) QuantumRange-intensity; } } (void) SyncImage(image,exception); if ((cube_info->quantize_info->colorspace != UndefinedColorspace) && (IssRGBCompatibleColorspace(colorspace) == MagickFalse)) (void) TransformImageColorspace(image,colorspace,exception); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l a s s i f y I m a g e C o l o r s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClassifyImageColors() begins by initializing a color description tree % of sufficient depth to represent each possible input color in a leaf. % However, it is impractical to generate a fully-formed color % description tree in the storage_class phase for realistic values of % Cmax. If colors components in the input image are quantized to k-bit % precision, so that Cmax= 2k-1, the tree would need k levels below the % root node to allow representing each possible input color in a leaf. % This becomes prohibitive because the tree's total number of nodes is % 1 + sum(i=1,k,8k). % % A complete tree would require 19,173,961 nodes for k = 8, Cmax = 255. % Therefore, to avoid building a fully populated tree, QUANTIZE: (1) % Initializes data structures for nodes only as they are needed; (2) % Chooses a maximum depth for the tree as a function of the desired % number of colors in the output image (currently log2(colormap size)). % % For each pixel in the input image, storage_class scans downward from % the root of the color description tree. At each level of the tree it % identifies the single node which represents a cube in RGB space % containing It updates the following data for each such node: % % n1 : Number of pixels whose color is contained in the RGB cube % which this node represents; % % n2 : Number of pixels whose color is not represented in a node at % lower depth in the tree; initially, n2 = 0 for all nodes except % leaves of the tree. % % Sr, Sg, Sb : Sums of the red, green, and blue component values for % all pixels not classified at a lower depth. The combination of % these sums and n2 will ultimately characterize the mean color of a % set of pixels represented by this node. % % E: the distance squared in RGB space between each pixel contained % within a node and the nodes' center. This represents the quantization % error for a node. % % The format of the ClassifyImageColors() method is: % % MagickBooleanType ClassifyImageColors(CubeInfo *cube_info, % const Image *image,ExceptionInfo *exception) % % A description of each parameter follows. % % o cube_info: A pointer to the Cube structure. % % o image: the image. % */ static inline void SetAssociatedAlpha(const Image *image,CubeInfo *cube_info) { MagickBooleanType associate_alpha; associate_alpha=image->alpha_trait != UndefinedPixelTrait ? MagickTrue : MagickFalse; if ((cube_info->quantize_info->number_colors == 2) && ((cube_info->quantize_info->colorspace == LinearGRAYColorspace) || (cube_info->quantize_info->colorspace == GRAYColorspace))) associate_alpha=MagickFalse; cube_info->associate_alpha=associate_alpha; } static MagickBooleanType ClassifyImageColors(CubeInfo *cube_info, const Image *image,ExceptionInfo *exception) { #define ClassifyImageTag "Classify/Image" CacheView *image_view; double bisect; DoublePixelPacket error, mid, midpoint, pixel; MagickBooleanType proceed; NodeInfo *node_info; size_t count, id, index, level; ssize_t y; /* Classify the first cube_info->maximum_colors colors to a tree depth of 8. */ SetAssociatedAlpha(image,cube_info); if (cube_info->quantize_info->colorspace != image->colorspace) { if ((cube_info->quantize_info->colorspace != UndefinedColorspace) && (cube_info->quantize_info->colorspace != CMYKColorspace)) (void) TransformImageColorspace((Image *) image, cube_info->quantize_info->colorspace,exception); else if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) (void) TransformImageColorspace((Image *) image,sRGBColorspace, exception); } midpoint.red=(double) QuantumRange/2.0; midpoint.green=(double) QuantumRange/2.0; midpoint.blue=(double) QuantumRange/2.0; midpoint.alpha=(double) QuantumRange/2.0; error.alpha=0.0; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *magick_restrict p; ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; if (cube_info->nodes > MaxNodes) { /* Prune one level if the color tree is too large. */ PruneLevel(cube_info,cube_info->root); cube_info->depth--; } for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) count) { /* Start at the root and descend the color cube tree. */ for (count=1; (x+(ssize_t) count) < (ssize_t) image->columns; count++) { PixelInfo packet; GetPixelInfoPixel(image,p+count*GetPixelChannels(image),&packet); if (IsPixelEquivalent(image,p,&packet) == MagickFalse) break; } AssociateAlphaPixel(image,cube_info,p,&pixel); index=MaxTreeDepth-1; bisect=((double) QuantumRange+1.0)/2.0; mid=midpoint; node_info=cube_info->root; for (level=1; level <= MaxTreeDepth; level++) { double distance; bisect*=0.5; id=ColorToNodeId(cube_info,&pixel,index); mid.red+=(id & 1) != 0 ? bisect : -bisect; mid.green+=(id & 2) != 0 ? bisect : -bisect; mid.blue+=(id & 4) != 0 ? bisect : -bisect; mid.alpha+=(id & 8) != 0 ? bisect : -bisect; if (node_info->child[id] == (NodeInfo *) NULL) { /* Set colors of new node to contain pixel. */ node_info->child[id]=GetNodeInfo(cube_info,id,level,node_info); if (node_info->child[id] == (NodeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); continue; } if (level == MaxTreeDepth) cube_info->colors++; } /* Approximate the quantization error represented by this node. */ node_info=node_info->child[id]; error.red=QuantumScale*(pixel.red-mid.red); error.green=QuantumScale*(pixel.green-mid.green); error.blue=QuantumScale*(pixel.blue-mid.blue); if (cube_info->associate_alpha != MagickFalse) error.alpha=QuantumScale*(pixel.alpha-mid.alpha); distance=(double) (error.red*error.red+error.green*error.green+ error.blue*error.blue+error.alpha*error.alpha); if (IsNaN(distance) != 0) distance=0.0; node_info->quantize_error+=count*sqrt(distance); cube_info->root->quantize_error+=node_info->quantize_error; index--; } /* Sum RGB for this leaf for later derivation of the mean cube color. */ node_info->number_unique+=count; node_info->total_color.red+=count*QuantumScale*ClampPixel(pixel.red); node_info->total_color.green+=count*QuantumScale*ClampPixel(pixel.green); node_info->total_color.blue+=count*QuantumScale*ClampPixel(pixel.blue); if (cube_info->associate_alpha != MagickFalse) node_info->total_color.alpha+=count*QuantumScale* ClampPixel(pixel.alpha); else node_info->total_color.alpha+=count*QuantumScale* ClampPixel((MagickRealType) OpaqueAlpha); p+=count*GetPixelChannels(image); } if (cube_info->colors > cube_info->maximum_colors) { PruneToCubeDepth(cube_info,cube_info->root); break; } proceed=SetImageProgress(image,ClassifyImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) break; } for (y++; y < (ssize_t) image->rows; y++) { const Quantum *magick_restrict p; ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; if (cube_info->nodes > MaxNodes) { /* Prune one level if the color tree is too large. */ PruneLevel(cube_info,cube_info->root); cube_info->depth--; } for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) count) { /* Start at the root and descend the color cube tree. */ for (count=1; (x+(ssize_t) count) < (ssize_t) image->columns; count++) { PixelInfo packet; GetPixelInfoPixel(image,p+count*GetPixelChannels(image),&packet); if (IsPixelEquivalent(image,p,&packet) == MagickFalse) break; } AssociateAlphaPixel(image,cube_info,p,&pixel); index=MaxTreeDepth-1; bisect=((double) QuantumRange+1.0)/2.0; mid=midpoint; node_info=cube_info->root; for (level=1; level <= cube_info->depth; level++) { double distance; bisect*=0.5; id=ColorToNodeId(cube_info,&pixel,index); mid.red+=(id & 1) != 0 ? bisect : -bisect; mid.green+=(id & 2) != 0 ? bisect : -bisect; mid.blue+=(id & 4) != 0 ? bisect : -bisect; mid.alpha+=(id & 8) != 0 ? bisect : -bisect; if (node_info->child[id] == (NodeInfo *) NULL) { /* Set colors of new node to contain pixel. */ node_info->child[id]=GetNodeInfo(cube_info,id,level,node_info); if (node_info->child[id] == (NodeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","%s", image->filename); continue; } if (level == cube_info->depth) cube_info->colors++; } /* Approximate the quantization error represented by this node. */ node_info=node_info->child[id]; error.red=QuantumScale*(pixel.red-mid.red); error.green=QuantumScale*(pixel.green-mid.green); error.blue=QuantumScale*(pixel.blue-mid.blue); if (cube_info->associate_alpha != MagickFalse) error.alpha=QuantumScale*(pixel.alpha-mid.alpha); distance=(double) (error.red*error.red+error.green*error.green+ error.blue*error.blue+error.alpha*error.alpha); if (IsNaN(distance) != 0) distance=0.0; node_info->quantize_error+=count*sqrt(distance); cube_info->root->quantize_error+=node_info->quantize_error; index--; } /* Sum RGB for this leaf for later derivation of the mean cube color. */ node_info->number_unique+=count; node_info->total_color.red+=count*QuantumScale*ClampPixel(pixel.red); node_info->total_color.green+=count*QuantumScale*ClampPixel(pixel.green); node_info->total_color.blue+=count*QuantumScale*ClampPixel(pixel.blue); if (cube_info->associate_alpha != MagickFalse) node_info->total_color.alpha+=count*QuantumScale* ClampPixel(pixel.alpha); else node_info->total_color.alpha+=count*QuantumScale* ClampPixel((MagickRealType) OpaqueAlpha); p+=count*GetPixelChannels(image); } proceed=SetImageProgress(image,ClassifyImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) break; } image_view=DestroyCacheView(image_view); if (cube_info->quantize_info->colorspace != image->colorspace) if ((cube_info->quantize_info->colorspace != UndefinedColorspace) && (cube_info->quantize_info->colorspace != CMYKColorspace)) (void) TransformImageColorspace((Image *) image,sRGBColorspace,exception); return(y < (ssize_t) image->rows ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e Q u a n t i z e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneQuantizeInfo() makes a duplicate of the given quantize info structure, % or if quantize info is NULL, a new one. % % The format of the CloneQuantizeInfo method is: % % QuantizeInfo *CloneQuantizeInfo(const QuantizeInfo *quantize_info) % % A description of each parameter follows: % % o clone_info: Method CloneQuantizeInfo returns a duplicate of the given % quantize info, or if image info is NULL a new one. % % o quantize_info: a structure of type info. % */ MagickExport QuantizeInfo *CloneQuantizeInfo(const QuantizeInfo *quantize_info) { QuantizeInfo *clone_info; clone_info=(QuantizeInfo *) AcquireCriticalMemory(sizeof(*clone_info)); GetQuantizeInfo(clone_info); if (quantize_info == (QuantizeInfo *) NULL) return(clone_info); clone_info->number_colors=quantize_info->number_colors; clone_info->tree_depth=quantize_info->tree_depth; clone_info->dither_method=quantize_info->dither_method; clone_info->colorspace=quantize_info->colorspace; clone_info->measure_error=quantize_info->measure_error; return(clone_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l o s e s t C o l o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClosestColor() traverses the color cube tree at a particular node and % determines which colormap entry best represents the input color. % % The format of the ClosestColor method is: % % void ClosestColor(const Image *image,CubeInfo *cube_info, % const NodeInfo *node_info) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % % o node_info: the address of a structure of type NodeInfo which points to a % node in the color cube tree that is to be pruned. % */ static void ClosestColor(const Image *image,CubeInfo *cube_info, const NodeInfo *node_info) { size_t number_children; ssize_t i; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) ClosestColor(image,cube_info,node_info->child[i]); if (node_info->number_unique != 0) { double alpha, beta, distance, pixel; DoublePixelPacket *magick_restrict q; PixelInfo *magick_restrict p; /* Determine if this color is "closest". */ p=image->colormap+node_info->color_number; q=(&cube_info->target); alpha=1.0; beta=1.0; if (cube_info->associate_alpha != MagickFalse) { alpha=(MagickRealType) (QuantumScale*p->alpha); beta=(MagickRealType) (QuantumScale*q->alpha); } pixel=alpha*p->red-beta*q->red; distance=pixel*pixel; if (distance <= cube_info->distance) { pixel=alpha*p->green-beta*q->green; distance+=pixel*pixel; if (distance <= cube_info->distance) { pixel=alpha*p->blue-beta*q->blue; distance+=pixel*pixel; if (distance <= cube_info->distance) { if (cube_info->associate_alpha != MagickFalse) { pixel=p->alpha-q->alpha; distance+=pixel*pixel; } if (distance <= cube_info->distance) { cube_info->distance=distance; cube_info->color_number=node_info->color_number; } } } } } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o m p r e s s I m a g e C o l o r m a p % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CompressImageColormap() compresses an image colormap by removing any % duplicate or unused color entries. % % The format of the CompressImageColormap method is: % % MagickBooleanType CompressImageColormap(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType CompressImageColormap(Image *image, ExceptionInfo *exception) { QuantizeInfo quantize_info; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (IsPaletteImage(image) == MagickFalse) return(MagickFalse); GetQuantizeInfo(&quantize_info); quantize_info.number_colors=image->colors; quantize_info.tree_depth=MaxTreeDepth; return(QuantizeImage(&quantize_info,image,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e f i n e I m a g e C o l o r m a p % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DefineImageColormap() traverses the color cube tree and notes each colormap % entry. A colormap entry is any node in the color cube tree where the % of unique colors is not zero. % % The format of the DefineImageColormap method is: % % void DefineImageColormap(Image *image,CubeInfo *cube_info, % NodeInfo *node_info) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % % o node_info: the address of a structure of type NodeInfo which points to a % node in the color cube tree that is to be pruned. % */ static void DefineImageColormap(Image *image,CubeInfo *cube_info, NodeInfo *node_info) { size_t number_children; ssize_t i; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) DefineImageColormap(image,cube_info,node_info->child[i]); if (node_info->number_unique != 0) { double alpha; PixelInfo *magick_restrict q; /* Colormap entry is defined by the mean color in this cube. */ q=image->colormap+image->colors; alpha=(double) ((MagickOffsetType) node_info->number_unique); alpha=PerceptibleReciprocal(alpha); if (cube_info->associate_alpha == MagickFalse) { q->red=(double) ClampToQuantum(alpha*QuantumRange* node_info->total_color.red); q->green=(double) ClampToQuantum(alpha*QuantumRange* node_info->total_color.green); q->blue=(double) ClampToQuantum(alpha*QuantumRange* node_info->total_color.blue); q->alpha=(double) OpaqueAlpha; } else { double opacity; opacity=(double) (alpha*QuantumRange*node_info->total_color.alpha); q->alpha=(double) ClampToQuantum(opacity); if (q->alpha == OpaqueAlpha) { q->red=(double) ClampToQuantum(alpha*QuantumRange* node_info->total_color.red); q->green=(double) ClampToQuantum(alpha*QuantumRange* node_info->total_color.green); q->blue=(double) ClampToQuantum(alpha*QuantumRange* node_info->total_color.blue); } else { double gamma; gamma=(double) (QuantumScale*q->alpha); gamma=PerceptibleReciprocal(gamma); q->red=(double) ClampToQuantum(alpha*gamma*QuantumRange* node_info->total_color.red); q->green=(double) ClampToQuantum(alpha*gamma*QuantumRange* node_info->total_color.green); q->blue=(double) ClampToQuantum(alpha*gamma*QuantumRange* node_info->total_color.blue); if (node_info->number_unique > cube_info->transparent_pixels) { cube_info->transparent_pixels=node_info->number_unique; cube_info->transparent_index=(ssize_t) image->colors; } } } node_info->color_number=image->colors++; } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y C u b e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyCubeInfo() deallocates memory associated with an image. % % The format of the DestroyCubeInfo method is: % % DestroyCubeInfo(CubeInfo *cube_info) % % A description of each parameter follows: % % o cube_info: the address of a structure of type CubeInfo. % */ static void DestroyCubeInfo(CubeInfo *cube_info) { Nodes *nodes; /* Release color cube tree storage. */ do { nodes=cube_info->node_queue->next; cube_info->node_queue->nodes=(NodeInfo *) RelinquishMagickMemory( cube_info->node_queue->nodes); cube_info->node_queue=(Nodes *) RelinquishMagickMemory( cube_info->node_queue); cube_info->node_queue=nodes; } while (cube_info->node_queue != (Nodes *) NULL); if (cube_info->memory_info != (MemoryInfo *) NULL) cube_info->memory_info=RelinquishVirtualMemory(cube_info->memory_info); cube_info->quantize_info=DestroyQuantizeInfo(cube_info->quantize_info); cube_info=(CubeInfo *) RelinquishMagickMemory(cube_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y Q u a n t i z e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyQuantizeInfo() deallocates memory associated with an QuantizeInfo % structure. % % The format of the DestroyQuantizeInfo method is: % % QuantizeInfo *DestroyQuantizeInfo(QuantizeInfo *quantize_info) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % */ MagickExport QuantizeInfo *DestroyQuantizeInfo(QuantizeInfo *quantize_info) { (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(quantize_info != (QuantizeInfo *) NULL); assert(quantize_info->signature == MagickCoreSignature); quantize_info->signature=(~MagickCoreSignature); quantize_info=(QuantizeInfo *) RelinquishMagickMemory(quantize_info); return(quantize_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D i t h e r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DitherImage() distributes the difference between an original image and % the corresponding color reduced algorithm to neighboring pixels using % serpentine-scan Floyd-Steinberg error diffusion. DitherImage returns % MagickTrue if the image is dithered otherwise MagickFalse. % % The format of the DitherImage method is: % % MagickBooleanType DitherImage(Image *image,CubeInfo *cube_info, % ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % % o exception: return any errors or warnings in this structure. % */ static DoublePixelPacket **DestroyPixelThreadSet(DoublePixelPacket **pixels) { ssize_t i; assert(pixels != (DoublePixelPacket **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (pixels[i] != (DoublePixelPacket *) NULL) pixels[i]=(DoublePixelPacket *) RelinquishMagickMemory(pixels[i]); pixels=(DoublePixelPacket **) RelinquishMagickMemory(pixels); return(pixels); } static DoublePixelPacket **AcquirePixelThreadSet(const size_t count) { DoublePixelPacket **pixels; size_t number_threads; ssize_t i; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); pixels=(DoublePixelPacket **) AcquireQuantumMemory(number_threads, sizeof(*pixels)); if (pixels == (DoublePixelPacket **) NULL) return((DoublePixelPacket **) NULL); (void) memset(pixels,0,number_threads*sizeof(*pixels)); for (i=0; i < (ssize_t) number_threads; i++) { pixels[i]=(DoublePixelPacket *) AcquireQuantumMemory(count,2* sizeof(**pixels)); if (pixels[i] == (DoublePixelPacket *) NULL) return(DestroyPixelThreadSet(pixels)); } return(pixels); } static inline ssize_t CacheOffset(CubeInfo *cube_info, const DoublePixelPacket *pixel) { #define RedShift(pixel) (((pixel) >> CacheShift) << (0*(8-CacheShift))) #define GreenShift(pixel) (((pixel) >> CacheShift) << (1*(8-CacheShift))) #define BlueShift(pixel) (((pixel) >> CacheShift) << (2*(8-CacheShift))) #define AlphaShift(pixel) (((pixel) >> CacheShift) << (3*(8-CacheShift))) ssize_t offset; offset=(ssize_t) (RedShift(ScaleQuantumToChar(ClampPixel(pixel->red))) | GreenShift(ScaleQuantumToChar(ClampPixel(pixel->green))) | BlueShift(ScaleQuantumToChar(ClampPixel(pixel->blue)))); if (cube_info->associate_alpha != MagickFalse) offset|=AlphaShift(ScaleQuantumToChar(ClampPixel(pixel->alpha))); return(offset); } static MagickBooleanType FloydSteinbergDither(Image *image,CubeInfo *cube_info, ExceptionInfo *exception) { #define DitherImageTag "Dither/Image" CacheView *image_view; DoublePixelPacket **pixels; MagickBooleanType status; ssize_t y; /* Distribute quantization error using Floyd-Steinberg. */ pixels=AcquirePixelThreadSet(image->columns); if (pixels == (DoublePixelPacket **) NULL) return(MagickFalse); status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); CubeInfo cube; DoublePixelPacket *current, *previous; Quantum *magick_restrict q; size_t index; ssize_t x, v; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } cube=(*cube_info); current=pixels[id]+(y & 0x01)*image->columns; previous=pixels[id]+((y+1) & 0x01)*image->columns; v=(ssize_t) ((y & 0x01) != 0 ? -1 : 1); for (x=0; x < (ssize_t) image->columns; x++) { DoublePixelPacket color, pixel; ssize_t i; ssize_t u; u=(y & 0x01) != 0 ? (ssize_t) image->columns-1-x : x; AssociateAlphaPixel(image,&cube,q+u*GetPixelChannels(image),&pixel); if (x > 0) { pixel.red+=7.0*cube_info->diffusion*current[u-v].red/16; pixel.green+=7.0*cube_info->diffusion*current[u-v].green/16; pixel.blue+=7.0*cube_info->diffusion*current[u-v].blue/16; if (cube.associate_alpha != MagickFalse) pixel.alpha+=7.0*cube_info->diffusion*current[u-v].alpha/16; } if (y > 0) { if (x < (ssize_t) (image->columns-1)) { pixel.red+=cube_info->diffusion*previous[u+v].red/16; pixel.green+=cube_info->diffusion*previous[u+v].green/16; pixel.blue+=cube_info->diffusion*previous[u+v].blue/16; if (cube.associate_alpha != MagickFalse) pixel.alpha+=cube_info->diffusion*previous[u+v].alpha/16; } pixel.red+=5.0*cube_info->diffusion*previous[u].red/16; pixel.green+=5.0*cube_info->diffusion*previous[u].green/16; pixel.blue+=5.0*cube_info->diffusion*previous[u].blue/16; if (cube.associate_alpha != MagickFalse) pixel.alpha+=5.0*cube_info->diffusion*previous[u].alpha/16; if (x > 0) { pixel.red+=3.0*cube_info->diffusion*previous[u-v].red/16; pixel.green+=3.0*cube_info->diffusion*previous[u-v].green/16; pixel.blue+=3.0*cube_info->diffusion*previous[u-v].blue/16; if (cube.associate_alpha != MagickFalse) pixel.alpha+=3.0*cube_info->diffusion*previous[u-v].alpha/16; } } pixel.red=(double) ClampPixel(pixel.red); pixel.green=(double) ClampPixel(pixel.green); pixel.blue=(double) ClampPixel(pixel.blue); if (cube.associate_alpha != MagickFalse) pixel.alpha=(double) ClampPixel(pixel.alpha); i=CacheOffset(&cube,&pixel); if (cube.cache[i] < 0) { NodeInfo *node_info; size_t node_id; /* Identify the deepest node containing the pixel's color. */ node_info=cube.root; for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--) { node_id=ColorToNodeId(&cube,&pixel,index); if (node_info->child[node_id] == (NodeInfo *) NULL) break; node_info=node_info->child[node_id]; } /* Find closest color among siblings and their children. */ cube.target=pixel; cube.distance=(double) (4.0*(QuantumRange+1.0)*(QuantumRange+1.0)+ 1.0); ClosestColor(image,&cube,node_info->parent); cube.cache[i]=(ssize_t) cube.color_number; } /* Assign pixel to closest colormap entry. */ index=(size_t) cube.cache[i]; if (image->storage_class == PseudoClass) SetPixelIndex(image,(Quantum) index,q+u*GetPixelChannels(image)); if (cube.quantize_info->measure_error == MagickFalse) { SetPixelRed(image,ClampToQuantum(image->colormap[index].red), q+u*GetPixelChannels(image)); SetPixelGreen(image,ClampToQuantum(image->colormap[index].green), q+u*GetPixelChannels(image)); SetPixelBlue(image,ClampToQuantum(image->colormap[index].blue), q+u*GetPixelChannels(image)); if (cube.associate_alpha != MagickFalse) SetPixelAlpha(image,ClampToQuantum(image->colormap[index].alpha), q+u*GetPixelChannels(image)); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; /* Store the error. */ AssociateAlphaPixelInfo(&cube,image->colormap+index,&color); current[u].red=pixel.red-color.red; current[u].green=pixel.green-color.green; current[u].blue=pixel.blue-color.blue; if (cube.associate_alpha != MagickFalse) current[u].alpha=pixel.alpha-color.alpha; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,DitherImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } } image_view=DestroyCacheView(image_view); pixels=DestroyPixelThreadSet(pixels); return(MagickTrue); } static MagickBooleanType RiemersmaDither(Image *image,CacheView *image_view, CubeInfo *cube_info,const unsigned int direction,ExceptionInfo *exception) { #define DitherImageTag "Dither/Image" CubeInfo *p; DoublePixelPacket color, pixel; MagickBooleanType proceed; size_t index; p=cube_info; if ((p->x >= 0) && (p->x < (ssize_t) image->columns) && (p->y >= 0) && (p->y < (ssize_t) image->rows)) { Quantum *magick_restrict q; ssize_t i; /* Distribute error. */ q=GetCacheViewAuthenticPixels(image_view,p->x,p->y,1,1,exception); if (q == (Quantum *) NULL) return(MagickFalse); AssociateAlphaPixel(image,cube_info,q,&pixel); for (i=0; i < ErrorQueueLength; i++) { pixel.red+=ErrorRelativeWeight*cube_info->diffusion*p->weights[i]* p->error[i].red; pixel.green+=ErrorRelativeWeight*cube_info->diffusion*p->weights[i]* p->error[i].green; pixel.blue+=ErrorRelativeWeight*cube_info->diffusion*p->weights[i]* p->error[i].blue; if (cube_info->associate_alpha != MagickFalse) pixel.alpha+=ErrorRelativeWeight*cube_info->diffusion*p->weights[i]* p->error[i].alpha; } pixel.red=(double) ClampPixel(pixel.red); pixel.green=(double) ClampPixel(pixel.green); pixel.blue=(double) ClampPixel(pixel.blue); if (cube_info->associate_alpha != MagickFalse) pixel.alpha=(double) ClampPixel(pixel.alpha); i=CacheOffset(cube_info,&pixel); if (p->cache[i] < 0) { NodeInfo *node_info; size_t id; /* Identify the deepest node containing the pixel's color. */ node_info=p->root; for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--) { id=ColorToNodeId(cube_info,&pixel,index); if (node_info->child[id] == (NodeInfo *) NULL) break; node_info=node_info->child[id]; } /* Find closest color among siblings and their children. */ p->target=pixel; p->distance=(double) (4.0*(QuantumRange+1.0)*((double) QuantumRange+1.0)+1.0); ClosestColor(image,p,node_info->parent); p->cache[i]=(ssize_t) p->color_number; } /* Assign pixel to closest colormap entry. */ index=(size_t) p->cache[i]; if (image->storage_class == PseudoClass) SetPixelIndex(image,(Quantum) index,q); if (cube_info->quantize_info->measure_error == MagickFalse) { SetPixelRed(image,ClampToQuantum(image->colormap[index].red),q); SetPixelGreen(image,ClampToQuantum(image->colormap[index].green),q); SetPixelBlue(image,ClampToQuantum(image->colormap[index].blue),q); if (cube_info->associate_alpha != MagickFalse) SetPixelAlpha(image,ClampToQuantum(image->colormap[index].alpha),q); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) return(MagickFalse); /* Propagate the error as the last entry of the error queue. */ (void) memmove(p->error,p->error+1,(ErrorQueueLength-1)* sizeof(p->error[0])); AssociateAlphaPixelInfo(cube_info,image->colormap+index,&color); p->error[ErrorQueueLength-1].red=pixel.red-color.red; p->error[ErrorQueueLength-1].green=pixel.green-color.green; p->error[ErrorQueueLength-1].blue=pixel.blue-color.blue; if (cube_info->associate_alpha != MagickFalse) p->error[ErrorQueueLength-1].alpha=pixel.alpha-color.alpha; proceed=SetImageProgress(image,DitherImageTag,p->offset,p->span); if (proceed == MagickFalse) return(MagickFalse); p->offset++; } switch (direction) { case WestGravity: p->x--; break; case EastGravity: p->x++; break; case NorthGravity: p->y--; break; case SouthGravity: p->y++; break; } return(MagickTrue); } static MagickBooleanType Riemersma(Image *image,CacheView *image_view, CubeInfo *cube_info,const size_t level,const unsigned int direction, ExceptionInfo *exception) { MagickBooleanType status; status=MagickTrue; if (level == 1) switch (direction) { case WestGravity: { status=RiemersmaDither(image,image_view,cube_info,EastGravity, exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,SouthGravity, exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,WestGravity, exception); break; } case EastGravity: { status=RiemersmaDither(image,image_view,cube_info,WestGravity, exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,NorthGravity, exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,EastGravity, exception); break; } case NorthGravity: { status=RiemersmaDither(image,image_view,cube_info,SouthGravity, exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,EastGravity, exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,NorthGravity, exception); break; } case SouthGravity: { status=RiemersmaDither(image,image_view,cube_info,NorthGravity, exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,WestGravity, exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,SouthGravity, exception); break; } default: break; } else switch (direction) { case WestGravity: { status=Riemersma(image,image_view,cube_info,level-1,NorthGravity, exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,EastGravity, exception); if (status != MagickFalse) status=Riemersma(image,image_view,cube_info,level-1,WestGravity, exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,SouthGravity, exception); if (status != MagickFalse) status=Riemersma(image,image_view,cube_info,level-1,WestGravity, exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,WestGravity, exception); if (status != MagickFalse) status=Riemersma(image,image_view,cube_info,level-1,SouthGravity, exception); break; } case EastGravity: { status=Riemersma(image,image_view,cube_info,level-1,SouthGravity, exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,WestGravity, exception); if (status != MagickFalse) status=Riemersma(image,image_view,cube_info,level-1,EastGravity, exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,NorthGravity, exception); if (status != MagickFalse) status=Riemersma(image,image_view,cube_info,level-1,EastGravity, exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,EastGravity, exception); if (status != MagickFalse) status=Riemersma(image,image_view,cube_info,level-1,NorthGravity, exception); break; } case NorthGravity: { status=Riemersma(image,image_view,cube_info,level-1,WestGravity, exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,SouthGravity, exception); if (status != MagickFalse) status=Riemersma(image,image_view,cube_info,level-1,NorthGravity, exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,EastGravity, exception); if (status != MagickFalse) status=Riemersma(image,image_view,cube_info,level-1,NorthGravity, exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,NorthGravity, exception); if (status != MagickFalse) status=Riemersma(image,image_view,cube_info,level-1,EastGravity, exception); break; } case SouthGravity: { status=Riemersma(image,image_view,cube_info,level-1,EastGravity, exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,NorthGravity, exception); if (status != MagickFalse) status=Riemersma(image,image_view,cube_info,level-1,SouthGravity, exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,WestGravity, exception); if (status != MagickFalse) status=Riemersma(image,image_view,cube_info,level-1,SouthGravity, exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,SouthGravity, exception); if (status != MagickFalse) status=Riemersma(image,image_view,cube_info,level-1,WestGravity, exception); break; } default: break; } return(status); } static MagickBooleanType DitherImage(Image *image,CubeInfo *cube_info, ExceptionInfo *exception) { CacheView *image_view; const char *artifact; MagickBooleanType status; size_t extent, level; artifact=GetImageArtifact(image,"dither:diffusion-amount"); if (artifact != (const char *) NULL) cube_info->diffusion=StringToDoubleInterval(artifact,1.0); if (cube_info->quantize_info->dither_method != RiemersmaDitherMethod) return(FloydSteinbergDither(image,cube_info,exception)); /* Distribute quantization error along a Hilbert curve. */ (void) memset(cube_info->error,0,ErrorQueueLength*sizeof(*cube_info->error)); cube_info->x=0; cube_info->y=0; extent=MagickMax(image->columns,image->rows); level=(size_t) log2((double) extent); if (((size_t) 1UL << level) < extent) level++; cube_info->offset=0; cube_info->span=(MagickSizeType) image->columns*image->rows; image_view=AcquireAuthenticCacheView(image,exception); status=MagickTrue; if (level > 0) status=Riemersma(image,image_view,cube_info,level,NorthGravity,exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,ForgetGravity,exception); image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t C u b e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetCubeInfo() initialize the Cube data structure. % % The format of the GetCubeInfo method is: % % CubeInfo GetCubeInfo(const QuantizeInfo *quantize_info, % const size_t depth,const size_t maximum_colors) % % A description of each parameter follows. % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % % o depth: Normally, this integer value is zero or one. A zero or % one tells Quantize to choose a optimal tree depth of Log4(number_colors). % A tree of this depth generally allows the best representation of the % reference image with the least amount of memory and the fastest % computational speed. In some cases, such as an image with low color % dispersion (a few number of colors), a value other than % Log4(number_colors) is required. To expand the color tree completely, % use a value of 8. % % o maximum_colors: maximum colors. % */ static CubeInfo *GetCubeInfo(const QuantizeInfo *quantize_info, const size_t depth,const size_t maximum_colors) { CubeInfo *cube_info; double weight; size_t length; ssize_t i; /* Initialize tree to describe color cube_info. */ cube_info=(CubeInfo *) AcquireMagickMemory(sizeof(*cube_info)); if (cube_info == (CubeInfo *) NULL) return((CubeInfo *) NULL); (void) memset(cube_info,0,sizeof(*cube_info)); cube_info->depth=depth; if (cube_info->depth > MaxTreeDepth) cube_info->depth=MaxTreeDepth; if (cube_info->depth < 2) cube_info->depth=2; cube_info->maximum_colors=maximum_colors; /* Initialize root node. */ cube_info->root=GetNodeInfo(cube_info,0,0,(NodeInfo *) NULL); if (cube_info->root == (NodeInfo *) NULL) return((CubeInfo *) NULL); cube_info->root->parent=cube_info->root; cube_info->quantize_info=CloneQuantizeInfo(quantize_info); if (cube_info->quantize_info->dither_method == NoDitherMethod) return(cube_info); /* Initialize dither resources. */ length=(size_t) (1UL << (4*(8-CacheShift))); cube_info->memory_info=AcquireVirtualMemory(length,sizeof(*cube_info->cache)); if (cube_info->memory_info == (MemoryInfo *) NULL) return((CubeInfo *) NULL); cube_info->cache=(ssize_t *) GetVirtualMemoryBlob(cube_info->memory_info); /* Initialize color cache. */ (void) memset(cube_info->cache,(-1),sizeof(*cube_info->cache)*length); /* Distribute weights along a curve of exponential decay. */ weight=1.0; for (i=0; i < ErrorQueueLength; i++) { cube_info->weights[i]=PerceptibleReciprocal(weight); weight*=exp(log(1.0/ErrorRelativeWeight)/(ErrorQueueLength-1.0)); } cube_info->diffusion=1.0; return(cube_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t N o d e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetNodeInfo() allocates memory for a new node in the color cube tree and % presets all fields to zero. % % The format of the GetNodeInfo method is: % % NodeInfo *GetNodeInfo(CubeInfo *cube_info,const size_t id, % const size_t level,NodeInfo *parent) % % A description of each parameter follows. % % o node: The GetNodeInfo method returns a pointer to a queue of nodes. % % o id: Specifies the child number of the node. % % o level: Specifies the level in the storage_class the node resides. % */ static NodeInfo *GetNodeInfo(CubeInfo *cube_info,const size_t id, const size_t level,NodeInfo *parent) { NodeInfo *node_info; if (cube_info->free_nodes == 0) { Nodes *nodes; /* Allocate a new queue of nodes. */ nodes=(Nodes *) AcquireMagickMemory(sizeof(*nodes)); if (nodes == (Nodes *) NULL) return((NodeInfo *) NULL); nodes->nodes=(NodeInfo *) AcquireQuantumMemory(NodesInAList, sizeof(*nodes->nodes)); if (nodes->nodes == (NodeInfo *) NULL) return((NodeInfo *) NULL); nodes->next=cube_info->node_queue; cube_info->node_queue=nodes; cube_info->next_node=nodes->nodes; cube_info->free_nodes=NodesInAList; } cube_info->nodes++; cube_info->free_nodes--; node_info=cube_info->next_node++; (void) memset(node_info,0,sizeof(*node_info)); node_info->parent=parent; node_info->id=id; node_info->level=level; return(node_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e Q u a n t i z e E r r o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageQuantizeError() measures the difference between the original % and quantized images. This difference is the total quantization error. % The error is computed by summing over all pixels in an image the distance % squared in RGB space between each reference pixel value and its quantized % value. These values are computed: % % o mean_error_per_pixel: This value is the mean error for any single % pixel in the image. % % o normalized_mean_square_error: This value is the normalized mean % quantization error for any single pixel in the image. This distance % measure is normalized to a range between 0 and 1. It is independent % of the range of red, green, and blue values in the image. % % o normalized_maximum_square_error: Thsi value is the normalized % maximum quantization error for any single pixel in the image. This % distance measure is normalized to a range between 0 and 1. It is % independent of the range of red, green, and blue values in your image. % % The format of the GetImageQuantizeError method is: % % MagickBooleanType GetImageQuantizeError(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetImageQuantizeError(Image *image, ExceptionInfo *exception) { CacheView *image_view; double alpha, area, beta, distance, maximum_error, mean_error, mean_error_per_pixel; ssize_t index, y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); image->total_colors=GetNumberColors(image,(FILE *) NULL,exception); (void) memset(&image->error,0,sizeof(image->error)); if (image->storage_class == DirectClass) return(MagickTrue); alpha=1.0; beta=1.0; area=3.0*image->columns*image->rows; maximum_error=0.0; mean_error_per_pixel=0.0; mean_error=0.0; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *magick_restrict p; ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { index=(ssize_t) GetPixelIndex(image,p); if (image->alpha_trait != UndefinedPixelTrait) { alpha=(double) (QuantumScale*GetPixelAlpha(image,p)); beta=(double) (QuantumScale*image->colormap[index].alpha); } distance=fabs((double) (alpha*GetPixelRed(image,p)-beta* image->colormap[index].red)); mean_error_per_pixel+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; distance=fabs((double) (alpha*GetPixelGreen(image,p)-beta* image->colormap[index].green)); mean_error_per_pixel+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; distance=fabs((double) (alpha*GetPixelBlue(image,p)-beta* image->colormap[index].blue)); mean_error_per_pixel+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; p+=GetPixelChannels(image); } } image_view=DestroyCacheView(image_view); image->error.mean_error_per_pixel=(double) mean_error_per_pixel/area; image->error.normalized_mean_error=(double) QuantumScale*QuantumScale* mean_error/area; image->error.normalized_maximum_error=(double) QuantumScale*maximum_error; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t Q u a n t i z e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetQuantizeInfo() initializes the QuantizeInfo structure. % % The format of the GetQuantizeInfo method is: % % GetQuantizeInfo(QuantizeInfo *quantize_info) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to a QuantizeInfo structure. % */ MagickExport void GetQuantizeInfo(QuantizeInfo *quantize_info) { (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(quantize_info != (QuantizeInfo *) NULL); (void) memset(quantize_info,0,sizeof(*quantize_info)); quantize_info->number_colors=256; quantize_info->dither_method=RiemersmaDitherMethod; quantize_info->colorspace=UndefinedColorspace; quantize_info->measure_error=MagickFalse; quantize_info->signature=MagickCoreSignature; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % K m e a n s I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % KmeansImage() applies k-means color reduction to an image. This is a % colorspace clustering or segmentation technique. % % The format of the KmeansImage method is: % % MagickBooleanType KmeansImage(Image *image,const size_t number_colors, % const size_t max_iterations,const double tolerance, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o number_colors: number of colors to use as seeds. % % o max_iterations: maximum number of iterations while converging. % % o tolerance: the maximum tolerance. % % o exception: return any errors or warnings in this structure. % */ typedef struct _KmeansInfo { double red, green, blue, alpha, black, count, distortion; } KmeansInfo; static KmeansInfo **DestroyKmeansThreadSet(KmeansInfo **kmeans_info) { ssize_t i; assert(kmeans_info != (KmeansInfo **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (kmeans_info[i] != (KmeansInfo *) NULL) kmeans_info[i]=(KmeansInfo *) RelinquishMagickMemory(kmeans_info[i]); kmeans_info=(KmeansInfo **) RelinquishMagickMemory(kmeans_info); return(kmeans_info); } static KmeansInfo **AcquireKmeansThreadSet(const size_t number_colors) { KmeansInfo **kmeans_info; ssize_t i; size_t number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); kmeans_info=(KmeansInfo **) AcquireQuantumMemory(number_threads, sizeof(*kmeans_info)); if (kmeans_info == (KmeansInfo **) NULL) return((KmeansInfo **) NULL); (void) memset(kmeans_info,0,number_threads*sizeof(*kmeans_info)); for (i=0; i < (ssize_t) number_threads; i++) { kmeans_info[i]=(KmeansInfo *) AcquireQuantumMemory(number_colors, sizeof(**kmeans_info)); if (kmeans_info[i] == (KmeansInfo *) NULL) return(DestroyKmeansThreadSet(kmeans_info)); } return(kmeans_info); } static inline double KmeansMetric(const Image *magick_restrict image, const Quantum *magick_restrict p,const PixelInfo *magick_restrict q) { double gamma, metric, pixel; gamma=1.0; metric=0.0; if ((image->alpha_trait != UndefinedPixelTrait) || (q->alpha_trait != UndefinedPixelTrait)) { pixel=GetPixelAlpha(image,p)-(q->alpha_trait != UndefinedPixelTrait ? q->alpha : OpaqueAlpha); metric+=pixel*pixel; if (image->alpha_trait != UndefinedPixelTrait) gamma*=QuantumScale*GetPixelAlpha(image,p); if (q->alpha_trait != UndefinedPixelTrait) gamma*=QuantumScale*q->alpha; } if (image->colorspace == CMYKColorspace) { pixel=QuantumScale*(GetPixelBlack(image,p)-q->black); metric+=gamma*pixel*pixel; gamma*=QuantumScale*(QuantumRange-GetPixelBlack(image,p)); gamma*=QuantumScale*(QuantumRange-q->black); } metric*=3.0; pixel=QuantumScale*(GetPixelRed(image,p)-q->red); if (IsHueCompatibleColorspace(image->colorspace) != MagickFalse) { if (fabs((double) pixel) > 0.5) pixel-=0.5; pixel*=2.0; } metric+=gamma*pixel*pixel; pixel=QuantumScale*(GetPixelGreen(image,p)-q->green); metric+=gamma*pixel*pixel; pixel=QuantumScale*(GetPixelBlue(image,p)-q->blue); metric+=gamma*pixel*pixel; return(metric); } MagickExport MagickBooleanType KmeansImage(Image *image, const size_t number_colors,const size_t max_iterations,const double tolerance, ExceptionInfo *exception) { #define KmeansImageTag "Kmeans/Image" #define RandomColorComponent(info) (QuantumRange*GetPseudoRandomValue(info)) CacheView *image_view; const char *colors; double previous_tolerance; KmeansInfo **kmeans_pixels; MagickBooleanType verbose, status; ssize_t n; size_t number_threads; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); colors=GetImageArtifact(image,"kmeans:seed-colors"); if (colors == (const char *) NULL) { CubeInfo *cube_info; QuantizeInfo *quantize_info; size_t depth; /* Seed clusters from color quantization. */ quantize_info=AcquireQuantizeInfo((ImageInfo *) NULL); quantize_info->colorspace=image->colorspace; quantize_info->number_colors=number_colors; quantize_info->dither_method=NoDitherMethod; n=number_colors; for (depth=1; n != 0; depth++) n>>=2; cube_info=GetCubeInfo(quantize_info,depth,number_colors); if (cube_info == (CubeInfo *) NULL) { quantize_info=DestroyQuantizeInfo(quantize_info); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } status=ClassifyImageColors(cube_info,image,exception); if (status != MagickFalse) { if (cube_info->colors > cube_info->maximum_colors) ReduceImageColors(image,cube_info); status=SetImageColormap(image,cube_info,exception); } DestroyCubeInfo(cube_info); quantize_info=DestroyQuantizeInfo(quantize_info); if (status == MagickFalse) return(status); } else { char color[MagickPathExtent]; const char *p; /* Seed clusters from color list (e.g. red;green;blue). */ status=AcquireImageColormap(image,number_colors,exception); if (status == MagickFalse) return(status); for (n=0, p=colors; n < (ssize_t) image->colors; n++) { const char *q; for (q=p; *q != '\0'; q++) if (*q == ';') break; (void) CopyMagickString(color,p,(size_t) MagickMin(q-p+1, MagickPathExtent)); (void) QueryColorCompliance(color,AllCompliance,image->colormap+n, exception); if (*q == '\0') { n++; break; } p=q+1; } if (n < (ssize_t) image->colors) { RandomInfo *random_info; /* Seed clusters from random values. */ random_info=AcquireRandomInfo(); for ( ; n < (ssize_t) image->colors; n++) { (void) QueryColorCompliance("#000",AllCompliance,image->colormap+n, exception); image->colormap[n].red=RandomColorComponent(random_info); image->colormap[n].green=RandomColorComponent(random_info); image->colormap[n].blue=RandomColorComponent(random_info); if (image->alpha_trait != UndefinedPixelTrait) image->colormap[n].alpha=RandomColorComponent(random_info); if (image->colorspace == CMYKColorspace) image->colormap[n].black=RandomColorComponent(random_info); } random_info=DestroyRandomInfo(random_info); } } /* Iterative refinement. */ kmeans_pixels=AcquireKmeansThreadSet(number_colors); if (kmeans_pixels == (KmeansInfo **) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); previous_tolerance=0.0; verbose=IsStringTrue(GetImageArtifact(image,"debug")); number_threads=(size_t) GetMagickResourceLimit(ThreadResource); image_view=AcquireAuthenticCacheView(image,exception); for (n=0; n < (ssize_t) max_iterations; n++) { double distortion; ssize_t j, y; for (j=0; j < (ssize_t) number_threads; j++) (void) memset(kmeans_pixels[j],0,image->colors*sizeof(*kmeans_pixels[j])); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double min_distance; ssize_t i, k; /* Assign each pixel whose mean has the least squared color distance. */ k=0; min_distance=KmeansMetric(image,q,image->colormap+0); for (i=1; i < (ssize_t) image->colors; i++) { double distance; if (min_distance <= MagickEpsilon) break; distance=KmeansMetric(image,q,image->colormap+i); if (distance < min_distance) { min_distance=distance; k=i; } } kmeans_pixels[id][k].red+=QuantumScale*GetPixelRed(image,q); kmeans_pixels[id][k].green+=QuantumScale*GetPixelGreen(image,q); kmeans_pixels[id][k].blue+=QuantumScale*GetPixelBlue(image,q); if (image->alpha_trait != UndefinedPixelTrait) kmeans_pixels[id][k].alpha+=QuantumScale*GetPixelAlpha(image,q); if (image->colorspace == CMYKColorspace) kmeans_pixels[id][k].black+=QuantumScale*GetPixelBlack(image,q); kmeans_pixels[id][k].count++; kmeans_pixels[id][k].distortion+=min_distance; SetPixelIndex(image,(Quantum) k,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } if (status == MagickFalse) break; /* Reduce sums to [0] entry. */ for (j=1; j < (ssize_t) number_threads; j++) { ssize_t k; for (k=0; k < (ssize_t) image->colors; k++) { kmeans_pixels[0][k].red+=kmeans_pixels[j][k].red; kmeans_pixels[0][k].green+=kmeans_pixels[j][k].green; kmeans_pixels[0][k].blue+=kmeans_pixels[j][k].blue; if (image->alpha_trait != UndefinedPixelTrait) kmeans_pixels[0][k].alpha+=kmeans_pixels[j][k].alpha; if (image->colorspace == CMYKColorspace) kmeans_pixels[0][k].black+=kmeans_pixels[j][k].black; kmeans_pixels[0][k].count+=kmeans_pixels[j][k].count; kmeans_pixels[0][k].distortion+=kmeans_pixels[j][k].distortion; } } /* Calculate the new means (centroids) of the pixels in the new clusters. */ distortion=0.0; for (j=0; j < (ssize_t) image->colors; j++) { double gamma; gamma=PerceptibleReciprocal((double) kmeans_pixels[0][j].count); image->colormap[j].red=gamma*QuantumRange*kmeans_pixels[0][j].red; image->colormap[j].green=gamma*QuantumRange*kmeans_pixels[0][j].green; image->colormap[j].blue=gamma*QuantumRange*kmeans_pixels[0][j].blue; if (image->alpha_trait != UndefinedPixelTrait) image->colormap[j].alpha=gamma*QuantumRange*kmeans_pixels[0][j].alpha; if (image->colorspace == CMYKColorspace) image->colormap[j].black=gamma*QuantumRange*kmeans_pixels[0][j].black; distortion+=kmeans_pixels[0][j].distortion; } if (verbose != MagickFalse) (void) FormatLocaleFile(stderr,"distortion[%.20g]: %*g %*g\n",(double) n, GetMagickPrecision(),distortion,GetMagickPrecision(), fabs(distortion-previous_tolerance)); if (fabs(distortion-previous_tolerance) <= tolerance) break; previous_tolerance=distortion; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,KmeansImageTag,(MagickOffsetType) n, max_iterations); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); kmeans_pixels=DestroyKmeansThreadSet(kmeans_pixels); if (image->progress_monitor != (MagickProgressMonitor) NULL) (void) SetImageProgress(image,KmeansImageTag,(MagickOffsetType) max_iterations-1,max_iterations); if (status == MagickFalse) return(status); return(SyncImage(image,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P o s t e r i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PosterizeImage() reduces the image to a limited number of colors for a % "poster" effect. % % The format of the PosterizeImage method is: % % MagickBooleanType PosterizeImage(Image *image,const size_t levels, % const DitherMethod dither_method,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: Specifies a pointer to an Image structure. % % o levels: Number of color levels allowed in each channel. Very low values % (2, 3, or 4) have the most visible effect. % % o dither_method: choose from UndefinedDitherMethod, NoDitherMethod, % RiemersmaDitherMethod, FloydSteinbergDitherMethod. % % o exception: return any errors or warnings in this structure. % */ static inline double MagickRound(double x) { /* Round the fraction to nearest integer. */ if ((x-floor(x)) < (ceil(x)-x)) return(floor(x)); return(ceil(x)); } MagickExport MagickBooleanType PosterizeImage(Image *image,const size_t levels, const DitherMethod dither_method,ExceptionInfo *exception) { #define PosterizeImageTag "Posterize/Image" #define PosterizePixel(pixel) ClampToQuantum((MagickRealType) QuantumRange*( \ MagickRound(QuantumScale*pixel*(levels-1)))/MagickMax((ssize_t) levels-1,1)) CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; QuantizeInfo *quantize_info; ssize_t i; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (image->storage_class == PseudoClass) #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->colors,1) #endif for (i=0; i < (ssize_t) image->colors; i++) { /* Posterize colormap. */ if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].red=(double) PosterizePixel(image->colormap[i].red); if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].green=(double) PosterizePixel(image->colormap[i].green); if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].blue=(double) PosterizePixel(image->colormap[i].blue); if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].alpha=(double) PosterizePixel(image->colormap[i].alpha); } /* Posterize image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) SetPixelRed(image,PosterizePixel(GetPixelRed(image,q)),q); if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) SetPixelGreen(image,PosterizePixel(GetPixelGreen(image,q)),q); if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) SetPixelBlue(image,PosterizePixel(GetPixelBlue(image,q)),q); if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) SetPixelBlack(image,PosterizePixel(GetPixelBlack(image,q)),q); if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait != UndefinedPixelTrait)) SetPixelAlpha(image,PosterizePixel(GetPixelAlpha(image,q)),q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,PosterizeImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); quantize_info=AcquireQuantizeInfo((ImageInfo *) NULL); quantize_info->number_colors=(size_t) MagickMin((ssize_t) levels*levels* levels,MaxColormapSize+1); quantize_info->dither_method=dither_method; quantize_info->tree_depth=MaxTreeDepth; status=QuantizeImage(quantize_info,image,exception); quantize_info=DestroyQuantizeInfo(quantize_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + P r u n e C h i l d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PruneChild() deletes the given node and merges its statistics into its % parent. % % The format of the PruneSubtree method is: % % PruneChild(CubeInfo *cube_info,const NodeInfo *node_info) % % A description of each parameter follows. % % o cube_info: A pointer to the Cube structure. % % o node_info: pointer to node in color cube tree that is to be pruned. % */ static void PruneChild(CubeInfo *cube_info,const NodeInfo *node_info) { NodeInfo *parent; size_t number_children; ssize_t i; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) PruneChild(cube_info,node_info->child[i]); if (cube_info->nodes > cube_info->maximum_colors) { /* Merge color statistics into parent. */ parent=node_info->parent; parent->number_unique+=node_info->number_unique; parent->total_color.red+=node_info->total_color.red; parent->total_color.green+=node_info->total_color.green; parent->total_color.blue+=node_info->total_color.blue; parent->total_color.alpha+=node_info->total_color.alpha; parent->child[node_info->id]=(NodeInfo *) NULL; cube_info->nodes--; } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + P r u n e L e v e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PruneLevel() deletes all nodes at the bottom level of the color tree merging % their color statistics into their parent node. % % The format of the PruneLevel method is: % % PruneLevel(CubeInfo *cube_info,const NodeInfo *node_info) % % A description of each parameter follows. % % o cube_info: A pointer to the Cube structure. % % o node_info: pointer to node in color cube tree that is to be pruned. % */ static void PruneLevel(CubeInfo *cube_info,const NodeInfo *node_info) { size_t number_children; ssize_t i; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) PruneLevel(cube_info,node_info->child[i]); if (node_info->level == cube_info->depth) PruneChild(cube_info,node_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + P r u n e T o C u b e D e p t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PruneToCubeDepth() deletes any nodes at a depth greater than % cube_info->depth while merging their color statistics into their parent % node. % % The format of the PruneToCubeDepth method is: % % PruneToCubeDepth(CubeInfo *cube_info,const NodeInfo *node_info) % % A description of each parameter follows. % % o cube_info: A pointer to the Cube structure. % % o node_info: pointer to node in color cube tree that is to be pruned. % */ static void PruneToCubeDepth(CubeInfo *cube_info,const NodeInfo *node_info) { size_t number_children; ssize_t i; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) PruneToCubeDepth(cube_info,node_info->child[i]); if (node_info->level > cube_info->depth) PruneChild(cube_info,node_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % Q u a n t i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QuantizeImage() analyzes the colors within a reference image and chooses a % fixed number of colors to represent the image. The goal of the algorithm % is to minimize the color difference between the input and output image while % minimizing the processing time. % % The format of the QuantizeImage method is: % % MagickBooleanType QuantizeImage(const QuantizeInfo *quantize_info, % Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType QuantizeImage(const QuantizeInfo *quantize_info, Image *image,ExceptionInfo *exception) { CubeInfo *cube_info; ImageType type; MagickBooleanType status; size_t depth, maximum_colors; assert(quantize_info != (const QuantizeInfo *) NULL); assert(quantize_info->signature == MagickCoreSignature); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); maximum_colors=quantize_info->number_colors; if (maximum_colors == 0) maximum_colors=MaxColormapSize; if (maximum_colors > MaxColormapSize) maximum_colors=MaxColormapSize; type=IdentifyImageGray(image,exception); if (IsGrayImageType(type) != MagickFalse) (void) SetGrayscaleImage(image,exception); depth=quantize_info->tree_depth; if (depth == 0) { size_t colors; /* Depth of color tree is: Log4(colormap size)+2. */ colors=maximum_colors; for (depth=1; colors != 0; depth++) colors>>=2; if ((quantize_info->dither_method != NoDitherMethod) && (depth > 2)) depth--; if ((image->alpha_trait != UndefinedPixelTrait) && (depth > 5)) depth--; if (IsGrayImageType(type) != MagickFalse) depth=MaxTreeDepth; } /* Initialize color cube. */ cube_info=GetCubeInfo(quantize_info,depth,maximum_colors); if (cube_info == (CubeInfo *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); status=ClassifyImageColors(cube_info,image,exception); if (status != MagickFalse) { /* Reduce the number of colors in the image. */ if (cube_info->colors > cube_info->maximum_colors) ReduceImageColors(image,cube_info); status=AssignImageColors(image,cube_info,exception); } DestroyCubeInfo(cube_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % Q u a n t i z e I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QuantizeImages() analyzes the colors within a set of reference images and % chooses a fixed number of colors to represent the set. The goal of the % algorithm is to minimize the color difference between the input and output % images while minimizing the processing time. % % The format of the QuantizeImages method is: % % MagickBooleanType QuantizeImages(const QuantizeInfo *quantize_info, % Image *images,ExceptionInfo *exception) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % % o images: Specifies a pointer to a list of Image structures. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType QuantizeImages(const QuantizeInfo *quantize_info, Image *images,ExceptionInfo *exception) { CubeInfo *cube_info; Image *image; MagickBooleanType proceed, status; MagickProgressMonitor progress_monitor; size_t depth, maximum_colors, number_images; ssize_t i; assert(quantize_info != (const QuantizeInfo *) NULL); assert(quantize_info->signature == MagickCoreSignature); assert(images != (Image *) NULL); assert(images->signature == MagickCoreSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (GetNextImageInList(images) == (Image *) NULL) { /* Handle a single image with QuantizeImage. */ status=QuantizeImage(quantize_info,images,exception); return(status); } status=MagickFalse; maximum_colors=quantize_info->number_colors; if (maximum_colors == 0) maximum_colors=MaxColormapSize; if (maximum_colors > MaxColormapSize) maximum_colors=MaxColormapSize; depth=quantize_info->tree_depth; if (depth == 0) { size_t colors; /* Depth of color tree is: Log4(colormap size)+2. */ colors=maximum_colors; for (depth=1; colors != 0; depth++) colors>>=2; if (quantize_info->dither_method != NoDitherMethod) depth--; } /* Initialize color cube. */ cube_info=GetCubeInfo(quantize_info,depth,maximum_colors); if (cube_info == (CubeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",images->filename); return(MagickFalse); } number_images=GetImageListLength(images); image=images; for (i=0; image != (Image *) NULL; i++) { progress_monitor=SetImageProgressMonitor(image,(MagickProgressMonitor) NULL, image->client_data); status=ClassifyImageColors(cube_info,image,exception); if (status == MagickFalse) break; (void) SetImageProgressMonitor(image,progress_monitor,image->client_data); proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) i, number_images); if (proceed == MagickFalse) break; image=GetNextImageInList(image); } if (status != MagickFalse) { /* Reduce the number of colors in an image sequence. */ ReduceImageColors(images,cube_info); image=images; for (i=0; image != (Image *) NULL; i++) { progress_monitor=SetImageProgressMonitor(image,(MagickProgressMonitor) NULL,image->client_data); status=AssignImageColors(image,cube_info,exception); if (status == MagickFalse) break; (void) SetImageProgressMonitor(image,progress_monitor, image->client_data); proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) i, number_images); if (proceed == MagickFalse) break; image=GetNextImageInList(image); } } DestroyCubeInfo(cube_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + Q u a n t i z e E r r o r F l a t t e n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QuantizeErrorFlatten() traverses the color cube and flattens the quantization % error into a sorted 1D array. This accelerates the color reduction process. % % Contributed by Yoya. % % The format of the QuantizeErrorFlatten method is: % % size_t QuantizeErrorFlatten(const CubeInfo *cube_info, % const NodeInfo *node_info,const ssize_t offset, % double *quantize_error) % % A description of each parameter follows. % % o cube_info: A pointer to the Cube structure. % % o node_info: pointer to node in color cube tree that is current pointer. % % o offset: quantize error offset. % % o quantize_error: the quantization error vector. % */ static size_t QuantizeErrorFlatten(const CubeInfo *cube_info, const NodeInfo *node_info,const ssize_t offset,double *quantize_error) { size_t n, number_children; ssize_t i; if (offset >= (ssize_t) cube_info->nodes) return(0); quantize_error[offset]=node_info->quantize_error; n=1; number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children ; i++) if (node_info->child[i] != (NodeInfo *) NULL) n+=QuantizeErrorFlatten(cube_info,node_info->child[i],offset+n, quantize_error); return(n); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e d u c e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Reduce() traverses the color cube tree and prunes any node whose % quantization error falls below a particular threshold. % % The format of the Reduce method is: % % Reduce(CubeInfo *cube_info,const NodeInfo *node_info) % % A description of each parameter follows. % % o cube_info: A pointer to the Cube structure. % % o node_info: pointer to node in color cube tree that is to be pruned. % */ static void Reduce(CubeInfo *cube_info,const NodeInfo *node_info) { size_t number_children; ssize_t i; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) Reduce(cube_info,node_info->child[i]); if (node_info->quantize_error <= cube_info->pruning_threshold) PruneChild(cube_info,node_info); else { /* Find minimum pruning threshold. */ if (node_info->number_unique > 0) cube_info->colors++; if (node_info->quantize_error < cube_info->next_threshold) cube_info->next_threshold=node_info->quantize_error; } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e d u c e I m a g e C o l o r s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReduceImageColors() repeatedly prunes the tree until the number of nodes % with n2 > 0 is less than or equal to the maximum number of colors allowed % in the output image. On any given iteration over the tree, it selects % those nodes whose E value is minimal for pruning and merges their % color statistics upward. It uses a pruning threshold, Ep, to govern % node selection as follows: % % Ep = 0 % while number of nodes with (n2 > 0) > required maximum number of colors % prune all nodes such that E <= Ep % Set Ep to minimum E in remaining nodes % % This has the effect of minimizing any quantization error when merging % two nodes together. % % When a node to be pruned has offspring, the pruning procedure invokes % itself recursively in order to prune the tree from the leaves upward. % n2, Sr, Sg, and Sb in a node being pruned are always added to the % corresponding data in that node's parent. This retains the pruned % node's color characteristics for later averaging. % % For each node, n2 pixels exist for which that node represents the % smallest volume in RGB space containing those pixel's colors. When n2 % > 0 the node will uniquely define a color in the output image. At the % beginning of reduction, n2 = 0 for all nodes except a the leaves of % the tree which represent colors present in the input image. % % The other pixel count, n1, indicates the total number of colors % within the cubic volume which the node represents. This includes n1 - % n2 pixels whose colors should be defined by nodes at a lower level in % the tree. % % The format of the ReduceImageColors method is: % % ReduceImageColors(const Image *image,CubeInfo *cube_info) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % */ static int QuantizeErrorCompare(const void *error_p,const void *error_q) { double *p, *q; p=(double *) error_p; q=(double *) error_q; if (*p > *q) return(1); if (fabs(*q-*p) <= MagickEpsilon) return(0); return(-1); } static void ReduceImageColors(const Image *image,CubeInfo *cube_info) { #define ReduceImageTag "Reduce/Image" MagickBooleanType proceed; MagickOffsetType offset; size_t span; cube_info->next_threshold=0.0; if (cube_info->colors > cube_info->maximum_colors) { double *quantize_error; /* Enable rapid reduction of the number of unique colors. */ quantize_error=(double *) AcquireQuantumMemory(cube_info->nodes, sizeof(*quantize_error)); if (quantize_error != (double *) NULL) { (void) QuantizeErrorFlatten(cube_info,cube_info->root,0, quantize_error); qsort(quantize_error,cube_info->nodes,sizeof(double), QuantizeErrorCompare); if (cube_info->nodes > (110*(cube_info->maximum_colors+1)/100)) cube_info->next_threshold=quantize_error[cube_info->nodes-110* (cube_info->maximum_colors+1)/100]; quantize_error=(double *) RelinquishMagickMemory(quantize_error); } } for (span=cube_info->colors; cube_info->colors > cube_info->maximum_colors; ) { cube_info->pruning_threshold=cube_info->next_threshold; cube_info->next_threshold=cube_info->root->quantize_error-1; cube_info->colors=0; Reduce(cube_info,cube_info->root); offset=(MagickOffsetType) span-cube_info->colors; proceed=SetImageProgress(image,ReduceImageTag,offset,span- cube_info->maximum_colors+1); if (proceed == MagickFalse) break; } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e m a p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RemapImage() replaces the colors of an image with the closest of the colors % from the reference image. % % The format of the RemapImage method is: % % MagickBooleanType RemapImage(const QuantizeInfo *quantize_info, % Image *image,const Image *remap_image,ExceptionInfo *exception) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % % o image: the image. % % o remap_image: the reference image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType RemapImage(const QuantizeInfo *quantize_info, Image *image,const Image *remap_image,ExceptionInfo *exception) { CubeInfo *cube_info; MagickBooleanType status; /* Initialize color cube. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(remap_image != (Image *) NULL); assert(remap_image->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); cube_info=GetCubeInfo(quantize_info,MaxTreeDepth, quantize_info->number_colors); if (cube_info == (CubeInfo *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); status=ClassifyImageColors(cube_info,remap_image,exception); if (status != MagickFalse) { /* Classify image colors from the reference image. */ cube_info->quantize_info->number_colors=cube_info->colors; status=AssignImageColors(image,cube_info,exception); } DestroyCubeInfo(cube_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e m a p I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RemapImages() replaces the colors of a sequence of images with the % closest color from a reference image. % % The format of the RemapImage method is: % % MagickBooleanType RemapImages(const QuantizeInfo *quantize_info, % Image *images,Image *remap_image,ExceptionInfo *exception) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % % o images: the image sequence. % % o remap_image: the reference image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType RemapImages(const QuantizeInfo *quantize_info, Image *images,const Image *remap_image,ExceptionInfo *exception) { CubeInfo *cube_info; Image *image; MagickBooleanType status; assert(images != (Image *) NULL); assert(images->signature == MagickCoreSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image=images; if (remap_image == (Image *) NULL) { /* Create a global colormap for an image sequence. */ status=QuantizeImages(quantize_info,images,exception); return(status); } /* Classify image colors from the reference image. */ cube_info=GetCubeInfo(quantize_info,MaxTreeDepth, quantize_info->number_colors); if (cube_info == (CubeInfo *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); status=ClassifyImageColors(cube_info,remap_image,exception); if (status != MagickFalse) { /* Classify image colors from the reference image. */ cube_info->quantize_info->number_colors=cube_info->colors; image=images; for ( ; image != (Image *) NULL; image=GetNextImageInList(image)) { status=AssignImageColors(image,cube_info,exception); if (status == MagickFalse) break; } } DestroyCubeInfo(cube_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t G r a y s c a l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetGrayscaleImage() converts an image to a PseudoClass grayscale image. % % The format of the SetGrayscaleImage method is: % % MagickBooleanType SetGrayscaleImage(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: The image. % % o exception: return any errors or warnings in this structure. % */ #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif static int IntensityCompare(const void *x,const void *y) { double intensity; PixelInfo *color_1, *color_2; color_1=(PixelInfo *) x; color_2=(PixelInfo *) y; intensity=GetPixelInfoIntensity((const Image *) NULL,color_1)- GetPixelInfoIntensity((const Image *) NULL,color_2); if (intensity < (double) INT_MIN) intensity=(double) INT_MIN; if (intensity > (double) INT_MAX) intensity=(double) INT_MAX; return((int) intensity); } #if defined(__cplusplus) || defined(c_plusplus) } #endif static MagickBooleanType SetGrayscaleImage(Image *image, ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; PixelInfo *colormap; size_t extent; ssize_t *colormap_index, i, j, y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->type != GrayscaleType) (void) TransformImageColorspace(image,GRAYColorspace,exception); extent=MagickMax(image->colors+1,MagickMax(MaxColormapSize,MaxMap+1)); colormap_index=(ssize_t *) AcquireQuantumMemory(extent, sizeof(*colormap_index)); if (colormap_index == (ssize_t *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); if (image->storage_class != PseudoClass) { (void) memset(colormap_index,(-1),extent*sizeof(*colormap_index)); if (AcquireImageColormap(image,MaxColormapSize,exception) == MagickFalse) { colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } image->colors=0; status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { size_t intensity; intensity=ScaleQuantumToMap(GetPixelRed(image,q)); if (colormap_index[intensity] < 0) { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SetGrayscaleImage) #endif if (colormap_index[intensity] < 0) { colormap_index[intensity]=(ssize_t) image->colors; image->colormap[image->colors].red=(double) GetPixelRed(image,q); image->colormap[image->colors].green=(double) GetPixelGreen(image,q); image->colormap[image->colors].blue=(double) GetPixelBlue(image,q); image->colors++; } } SetPixelIndex(image,(Quantum) colormap_index[intensity],q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); } (void) memset(colormap_index,0,extent*sizeof(*colormap_index)); for (i=0; i < (ssize_t) image->colors; i++) image->colormap[i].alpha=(double) i; qsort((void *) image->colormap,image->colors,sizeof(PixelInfo), IntensityCompare); colormap=(PixelInfo *) AcquireQuantumMemory(image->colors,sizeof(*colormap)); if (colormap == (PixelInfo *) NULL) { colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } j=0; colormap[j]=image->colormap[0]; for (i=0; i < (ssize_t) image->colors; i++) { if (IsPixelInfoEquivalent(&colormap[j],&image->colormap[i]) == MagickFalse) { j++; colormap[j]=image->colormap[i]; } colormap_index[(ssize_t) image->colormap[i].alpha]=j; } image->colors=(size_t) (j+1); image->colormap=(PixelInfo *) RelinquishMagickMemory(image->colormap); image->colormap=colormap; status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { SetPixelIndex(image,(Quantum) colormap_index[ScaleQuantumToMap( GetPixelIndex(image,q))],q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index); image->type=GrayscaleType; if (SetImageMonochrome(image,exception) != MagickFalse) image->type=BilevelType; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S e t I m a g e C o l o r m a p % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageColormap() traverses the color cube tree and sets the colormap of % the image. A colormap entry is any node in the color cube tree where the % of unique colors is not zero. % % The format of the SetImageColormap method is: % % MagickBooleanType SetImageColormap(Image *image,CubeInfo *cube_info, % ExceptionInfo *node_info) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % % o exception: return any errors or warnings in this structure. % */ MagickBooleanType SetImageColormap(Image *image,CubeInfo *cube_info, ExceptionInfo *exception) { size_t number_colors; number_colors=MagickMax(cube_info->maximum_colors,cube_info->colors); if (AcquireImageColormap(image,number_colors,exception) == MagickFalse) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); image->colors=0; DefineImageColormap(image,cube_info,cube_info->root); if (image->colors != number_colors) { image->colormap=(PixelInfo *) ResizeQuantumMemory(image->colormap, image->colors+1,sizeof(*image->colormap)); if (image->colormap == (PixelInfo *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } return(MagickTrue); }
paint.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % PPPP AAA IIIII N N TTTTT % % P P A A I NN N T % % PPPP AAAAA I N N N T % % P A A I N NN T % % P A A IIIII N N T % % % % % % Methods to Paint on an Image % % % % Software Design % % Cristy % % July 1998 % % % % % % Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/channel.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/composite-private.h" #include "MagickCore/draw.h" #include "MagickCore/draw-private.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/gem.h" #include "MagickCore/gem-private.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/paint.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/resource_.h" #include "MagickCore/statistic.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % F l o o d f i l l P a i n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FloodfillPaintImage() changes the color value of any pixel that matches % target and is an immediate neighbor. If the method FillToBorderMethod is % specified, the color value is changed for any neighbor pixel that does not % match the bordercolor member of image. % % By default target must match a particular pixel color exactly. However, % in many cases two colors may differ by a small amount. The fuzz member of % image defines how much tolerance is acceptable to consider two colors as % the same. For example, set fuzz to 10 and the color red at intensities of % 100 and 102 respectively are now interpreted as the same color for the % purposes of the floodfill. % % The format of the FloodfillPaintImage method is: % % MagickBooleanType FloodfillPaintImage(Image *image, % const DrawInfo *draw_info,const PixelInfo target, % const ssize_t x_offset,const ssize_t y_offset, % const MagickBooleanType invert,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o target: the RGB value of the target color. % % o x_offset,y_offset: the starting location of the operation. % % o invert: paint any pixel that does not match the target color. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType FloodfillPaintImage(Image *image, const DrawInfo *draw_info,const PixelInfo *target,const ssize_t x_offset, const ssize_t y_offset,const MagickBooleanType invert, ExceptionInfo *exception) { #define MaxStacksize 524288UL #define PushSegmentStack(up,left,right,delta) \ { \ if (s >= (segment_stack+MaxStacksize)) \ ThrowBinaryException(DrawError,"SegmentStackOverflow",image->filename) \ else \ { \ if ((((up)+(delta)) >= 0) && (((up)+(delta)) < (ssize_t) image->rows)) \ { \ s->x1=(double) (left); \ s->y1=(double) (up); \ s->x2=(double) (right); \ s->y2=(double) (delta); \ s++; \ } \ } \ } CacheView *floodplane_view, *image_view; Image *floodplane_image; MagickBooleanType skip, status; MemoryInfo *segment_info; PixelInfo fill_color, pixel; register SegmentInfo *s; SegmentInfo *segment_stack; ssize_t offset, start, x1, x2, y; /* Check boundary conditions. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (DrawInfo *) NULL); assert(draw_info->signature == MagickCoreSignature); if ((x_offset < 0) || (x_offset >= (ssize_t) image->columns)) return(MagickFalse); if ((y_offset < 0) || (y_offset >= (ssize_t) image->rows)) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); if (IsGrayColorspace(image->colorspace) != MagickFalse) (void) SetImageColorspace(image,sRGBColorspace,exception); if ((image->alpha_trait == UndefinedPixelTrait) && (draw_info->fill.alpha_trait != UndefinedPixelTrait)) (void) SetImageAlpha(image,OpaqueAlpha,exception); /* Set floodfill state. */ floodplane_image=CloneImage(image,image->columns,image->rows,MagickTrue, exception); if (floodplane_image == (Image *) NULL) return(MagickFalse); floodplane_image->alpha_trait=UndefinedPixelTrait; floodplane_image->colorspace=GRAYColorspace; (void) QueryColorCompliance("#000",AllCompliance, &floodplane_image->background_color,exception); (void) SetImageBackgroundColor(floodplane_image,exception); segment_info=AcquireVirtualMemory(MaxStacksize,sizeof(*segment_stack)); if (segment_info == (MemoryInfo *) NULL) { floodplane_image=DestroyImage(floodplane_image); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } segment_stack=(SegmentInfo *) GetVirtualMemoryBlob(segment_info); /* Push initial segment on stack. */ status=MagickTrue; start=0; s=segment_stack; PushSegmentStack(y_offset,x_offset,x_offset,1); PushSegmentStack(y_offset+1,x_offset,x_offset,-1); GetPixelInfo(image,&pixel); image_view=AcquireVirtualCacheView(image,exception); floodplane_view=AcquireAuthenticCacheView(floodplane_image,exception); while (s > segment_stack) { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; /* Pop segment off stack. */ s--; x1=(ssize_t) s->x1; x2=(ssize_t) s->x2; offset=(ssize_t) s->y2; y=(ssize_t) s->y1+offset; /* Recolor neighboring pixels. */ p=GetCacheViewVirtualPixels(image_view,0,y,(size_t) (x1+1),1,exception); q=GetCacheViewAuthenticPixels(floodplane_view,0,y,(size_t) (x1+1),1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) break; p+=x1*GetPixelChannels(image); q+=x1*GetPixelChannels(floodplane_image); for (x=x1; x >= 0; x--) { if (GetPixelGray(floodplane_image,q) != 0) break; GetPixelInfoPixel(image,p,&pixel); if (IsFuzzyEquivalencePixelInfo(&pixel,target) == invert) break; SetPixelGray(floodplane_image,QuantumRange,q); p-=GetPixelChannels(image); q-=GetPixelChannels(floodplane_image); } if (SyncCacheViewAuthenticPixels(floodplane_view,exception) == MagickFalse) break; skip=x >= x1 ? MagickTrue : MagickFalse; if (skip == MagickFalse) { start=x+1; if (start < x1) PushSegmentStack(y,start,x1-1,-offset); x=x1+1; } do { if (skip == MagickFalse) { if (x < (ssize_t) image->columns) { p=GetCacheViewVirtualPixels(image_view,x,y,image->columns-x,1, exception); q=GetCacheViewAuthenticPixels(floodplane_view,x,y,image->columns- x,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) break; for ( ; x < (ssize_t) image->columns; x++) { if (GetPixelGray(floodplane_image,q) != 0) break; GetPixelInfoPixel(image,p,&pixel); if (IsFuzzyEquivalencePixelInfo(&pixel,target) == invert) break; SetPixelGray(floodplane_image,QuantumRange,q); p+=GetPixelChannels(image); q+=GetPixelChannels(floodplane_image); } status=SyncCacheViewAuthenticPixels(floodplane_view,exception); if (status == MagickFalse) break; } PushSegmentStack(y,start,x-1,offset); if (x > (x2+1)) PushSegmentStack(y,x2+1,x-1,-offset); } skip=MagickFalse; x++; if (x <= x2) { p=GetCacheViewVirtualPixels(image_view,x,y,(size_t) (x2-x+1),1, exception); q=GetCacheViewAuthenticPixels(floodplane_view,x,y,(size_t) (x2-x+1),1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) break; for ( ; x <= x2; x++) { if (GetPixelGray(floodplane_image,q) != 0) break; GetPixelInfoPixel(image,p,&pixel); if (IsFuzzyEquivalencePixelInfo(&pixel,target) != invert) break; p+=GetPixelChannels(image); q+=GetPixelChannels(floodplane_image); } } start=x; } while (x <= x2); } status=MagickTrue; for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; /* Tile fill color onto floodplane. */ if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(floodplane_view,0,y,image->columns,1,exception); q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelGray(floodplane_image,p) != 0) { GetFillColor(draw_info,x,y,&fill_color,exception); SetPixelViaPixelInfo(image,&fill_color,q); } p+=GetPixelChannels(floodplane_image); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } floodplane_view=DestroyCacheView(floodplane_view); image_view=DestroyCacheView(image_view); segment_info=RelinquishVirtualMemory(segment_info); floodplane_image=DestroyImage(floodplane_image); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G r a d i e n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GradientImage() applies a continuously smooth color transitions along a % vector from one color to another. % % Note, the interface of this method will change in the future to support % more than one transistion. % % The format of the GradientImage method is: % % MagickBooleanType GradientImage(Image *image,const GradientType type, % const SpreadMethod method,const PixelInfo *start_color, % const PixelInfo *stop_color,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o type: the gradient type: linear or radial. % % o spread: the gradient spread meathod: pad, reflect, or repeat. % % o start_color: the start color. % % o stop_color: the stop color. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GradientImage(Image *image, const GradientType type,const SpreadMethod method,const StopInfo *stops, const size_t number_stops,ExceptionInfo *exception) { const char *artifact; DrawInfo *draw_info; GradientInfo *gradient; MagickBooleanType status; /* Set gradient start-stop end points. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(stops != (const StopInfo *) NULL); assert(number_stops > 0); draw_info=AcquireDrawInfo(); gradient=(&draw_info->gradient); gradient->type=type; gradient->bounding_box.width=image->columns; gradient->bounding_box.height=image->rows; artifact=GetImageArtifact(image,"gradient:bounding-box"); if (artifact != (const char *) NULL) (void) ParseAbsoluteGeometry(artifact,&gradient->bounding_box); gradient->gradient_vector.x2=(double) image->columns-1; gradient->gradient_vector.y2=(double) image->rows-1; artifact=GetImageArtifact(image,"gradient:direction"); if (artifact != (const char *) NULL) { GravityType direction; direction=(GravityType) ParseCommandOption(MagickGravityOptions, MagickFalse,artifact); switch (direction) { case NorthWestGravity: { gradient->gradient_vector.x1=(double) image->columns-1; gradient->gradient_vector.y1=(double) image->rows-1; gradient->gradient_vector.x2=0.0; gradient->gradient_vector.y2=0.0; break; } case NorthGravity: { gradient->gradient_vector.x1=0.0; gradient->gradient_vector.y1=(double) image->rows-1; gradient->gradient_vector.x2=0.0; gradient->gradient_vector.y2=0.0; break; } case NorthEastGravity: { gradient->gradient_vector.x1=0.0; gradient->gradient_vector.y1=(double) image->rows-1; gradient->gradient_vector.x2=(double) image->columns-1; gradient->gradient_vector.y2=0.0; break; } case WestGravity: { gradient->gradient_vector.x1=(double) image->columns-1; gradient->gradient_vector.y1=0.0; gradient->gradient_vector.x2=0.0; gradient->gradient_vector.y2=0.0; break; } case EastGravity: { gradient->gradient_vector.x1=0.0; gradient->gradient_vector.y1=0.0; gradient->gradient_vector.x2=(double) image->columns-1; gradient->gradient_vector.y2=0.0; break; } case SouthWestGravity: { gradient->gradient_vector.x1=(double) image->columns-1; gradient->gradient_vector.y1=0.0; gradient->gradient_vector.x2=0.0; gradient->gradient_vector.y2=(double) image->rows-1; break; } case SouthGravity: { gradient->gradient_vector.x1=0.0; gradient->gradient_vector.y1=0.0; gradient->gradient_vector.x2=0.0; gradient->gradient_vector.y2=(double) image->columns-1; break; } case SouthEastGravity: { gradient->gradient_vector.x1=0.0; gradient->gradient_vector.y1=0.0; gradient->gradient_vector.x2=(double) image->columns-1; gradient->gradient_vector.y2=(double) image->rows-1; break; } default: break; } } artifact=GetImageArtifact(image,"gradient:angle"); if (artifact != (const char *) NULL) gradient->angle=StringToDouble(artifact,(char **) NULL); artifact=GetImageArtifact(image,"gradient:vector"); if (artifact != (const char *) NULL) (void) sscanf(artifact,"%lf%*[ ,]%lf%*[ ,]%lf%*[ ,]%lf", &gradient->gradient_vector.x1,&gradient->gradient_vector.y1, &gradient->gradient_vector.x2,&gradient->gradient_vector.y2); if ((GetImageArtifact(image,"gradient:angle") == (const char *) NULL) && (GetImageArtifact(image,"gradient:direction") == (const char *) NULL) && (GetImageArtifact(image,"gradient:extent") == (const char *) NULL) && (GetImageArtifact(image,"gradient:vector") == (const char *) NULL)) if ((type == LinearGradient) && (gradient->gradient_vector.y2 != 0.0)) gradient->gradient_vector.x2=0.0; gradient->center.x=(double) gradient->gradient_vector.x2/2.0; gradient->center.y=(double) gradient->gradient_vector.y2/2.0; artifact=GetImageArtifact(image,"gradient:center"); if (artifact != (const char *) NULL) (void) sscanf(artifact,"%lf%*[ ,]%lf",&gradient->center.x, &gradient->center.y); artifact=GetImageArtifact(image,"gradient:angle"); if ((type == LinearGradient) && (artifact != (const char *) NULL)) { double sine, cosine, distance; /* Reference https://drafts.csswg.org/css-images-3/#linear-gradients. */ sine=sin((double) DegreesToRadians(gradient->angle-90.0)); cosine=cos((double) DegreesToRadians(gradient->angle-90.0)); distance=fabs((double) (image->columns-1.0)*cosine)+ fabs((double) (image->rows-1.0)*sine); gradient->gradient_vector.x1=0.5*((image->columns-1.0)-distance*cosine); gradient->gradient_vector.y1=0.5*((image->rows-1.0)-distance*sine); gradient->gradient_vector.x2=0.5*((image->columns-1.0)+distance*cosine); gradient->gradient_vector.y2=0.5*((image->rows-1.0)+distance*sine); } gradient->radii.x=(double) MagickMax((image->columns-1.0),(image->rows-1.0))/ 2.0; gradient->radii.y=gradient->radii.x; artifact=GetImageArtifact(image,"gradient:extent"); if (artifact != (const char *) NULL) { if (LocaleCompare(artifact,"Circle") == 0) { gradient->radii.x=(double) MagickMax((image->columns-1.0), (image->rows-1.0))/2.0; gradient->radii.y=gradient->radii.x; } if (LocaleCompare(artifact,"Diagonal") == 0) { gradient->radii.x=(double) (sqrt((image->columns-1.0)* (image->columns-1.0)+(image->rows-1.0)*(image->rows-1.0)))/2.0; gradient->radii.y=gradient->radii.x; } if (LocaleCompare(artifact,"Ellipse") == 0) { gradient->radii.x=(double) (image->columns-1.0)/2.0; gradient->radii.y=(double) (image->rows-1.0)/2.0; } if (LocaleCompare(artifact,"Maximum") == 0) { gradient->radii.x=(double) MagickMax((image->columns-1.0), (image->rows-1.0))/2.0; gradient->radii.y=gradient->radii.x; } if (LocaleCompare(artifact,"Minimum") == 0) { gradient->radii.x=(double) (MagickMin((image->columns-1.0), (image->rows-1.0)))/2.0; gradient->radii.y=gradient->radii.x; } } artifact=GetImageArtifact(image,"gradient:radii"); if (artifact != (const char *) NULL) (void) sscanf(artifact,"%lf%*[ ,]%lf",&gradient->radii.x, &gradient->radii.y); gradient->radius=MagickMax(gradient->radii.x,gradient->radii.y); gradient->spread=method; /* Define the gradient to fill between the stops. */ gradient->number_stops=number_stops; gradient->stops=(StopInfo *) AcquireQuantumMemory(gradient->number_stops, sizeof(*gradient->stops)); if (gradient->stops == (StopInfo *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); (void) CopyMagickMemory(gradient->stops,stops,(size_t) number_stops* sizeof(*stops)); /* Draw a gradient on the image. */ status=DrawGradientImage(image,draw_info,exception); draw_info=DestroyDrawInfo(draw_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % O i l P a i n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % OilPaintImage() applies a special effect filter that simulates an oil % painting. Each pixel is replaced by the most frequent color occurring % in a circular region defined by radius. % % The format of the OilPaintImage method is: % % Image *OilPaintImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the circular neighborhood. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o exception: return any errors or warnings in this structure. % */ static size_t **DestroyHistogramThreadSet(size_t **histogram) { register ssize_t i; assert(histogram != (size_t **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (histogram[i] != (size_t *) NULL) histogram[i]=(size_t *) RelinquishMagickMemory(histogram[i]); histogram=(size_t **) RelinquishMagickMemory(histogram); return(histogram); } static size_t **AcquireHistogramThreadSet(const size_t count) { register ssize_t i; size_t **histogram, number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); histogram=(size_t **) AcquireQuantumMemory(number_threads,sizeof(*histogram)); if (histogram == (size_t **) NULL) return((size_t **) NULL); (void) ResetMagickMemory(histogram,0,number_threads*sizeof(*histogram)); for (i=0; i < (ssize_t) number_threads; i++) { histogram[i]=(size_t *) AcquireQuantumMemory(count,sizeof(**histogram)); if (histogram[i] == (size_t *) NULL) return(DestroyHistogramThreadSet(histogram)); } return(histogram); } MagickExport Image *OilPaintImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { #define NumberPaintBins 256 #define OilPaintImageTag "OilPaint/Image" CacheView *image_view, *paint_view; Image *linear_image, *paint_image; MagickBooleanType status; MagickOffsetType progress; size_t **histograms, width; ssize_t center, y; /* Initialize painted image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); width=GetOptimalKernelWidth2D(radius,sigma); linear_image=CloneImage(image,0,0,MagickTrue,exception); paint_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception); if ((linear_image == (Image *) NULL) || (paint_image == (Image *) NULL)) { if (linear_image != (Image *) NULL) linear_image=DestroyImage(linear_image); if (paint_image != (Image *) NULL) linear_image=DestroyImage(paint_image); return((Image *) NULL); } if (SetImageStorageClass(paint_image,DirectClass,exception) == MagickFalse) { linear_image=DestroyImage(linear_image); paint_image=DestroyImage(paint_image); return((Image *) NULL); } histograms=AcquireHistogramThreadSet(NumberPaintBins); if (histograms == (size_t **) NULL) { linear_image=DestroyImage(linear_image); paint_image=DestroyImage(paint_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } /* Oil paint image. */ status=MagickTrue; progress=0; center=(ssize_t) GetPixelChannels(linear_image)*(linear_image->columns+width)* (width/2L)+GetPixelChannels(linear_image)*(width/2L); image_view=AcquireVirtualCacheView(linear_image,exception); paint_view=AcquireAuthenticCacheView(paint_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(linear_image,paint_image,linear_image->rows,1) #endif for (y=0; y < (ssize_t) linear_image->rows; y++) { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register size_t *histogram; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-((ssize_t) width/2L),y-(ssize_t) (width/2L),linear_image->columns+width,width,exception); q=QueueCacheViewAuthenticPixels(paint_view,0,y,paint_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } histogram=histograms[GetOpenMPThreadId()]; for (x=0; x < (ssize_t) linear_image->columns; x++) { register ssize_t i, u; size_t count; ssize_t j, k, n, v; /* Assign most frequent color. */ k=0; j=0; count=0; (void) ResetMagickMemory(histogram,0,NumberPaintBins* sizeof(*histogram)); for (v=0; v < (ssize_t) width; v++) { for (u=0; u < (ssize_t) width; u++) { n=(ssize_t) ScaleQuantumToChar(ClampToQuantum(GetPixelIntensity( linear_image,p+GetPixelChannels(linear_image)*(u+k)))); histogram[n]++; if (histogram[n] > count) { j=k+u; count=histogram[n]; } } k+=(ssize_t) (linear_image->columns+width); } for (i=0; i < (ssize_t) GetPixelChannels(linear_image); i++) { PixelChannel channel = GetPixelChannelChannel(linear_image,i); PixelTrait traits = GetPixelChannelTraits(linear_image,channel); PixelTrait paint_traits=GetPixelChannelTraits(paint_image,channel); if ((traits == UndefinedPixelTrait) || (paint_traits == UndefinedPixelTrait)) continue; if (((paint_traits & CopyPixelTrait) != 0) || (GetPixelWriteMask(linear_image,p) <= (QuantumRange/2))) { SetPixelChannel(paint_image,channel,p[center+i],q); continue; } SetPixelChannel(paint_image,channel,p[j*GetPixelChannels(linear_image)+ i],q); } p+=GetPixelChannels(linear_image); q+=GetPixelChannels(paint_image); } if (SyncCacheViewAuthenticPixels(paint_view,exception) == MagickFalse) status=MagickFalse; if (linear_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_OilPaintImage) #endif proceed=SetImageProgress(linear_image,OilPaintImageTag,progress++, linear_image->rows); if (proceed == MagickFalse) status=MagickFalse; } } paint_view=DestroyCacheView(paint_view); image_view=DestroyCacheView(image_view); histograms=DestroyHistogramThreadSet(histograms); linear_image=DestroyImage(linear_image); if (status == MagickFalse) paint_image=DestroyImage(paint_image); return(paint_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % O p a q u e P a i n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % OpaquePaintImage() changes any pixel that matches color with the color % defined by fill argument. % % By default color must match a particular pixel color exactly. However, in % many cases two colors may differ by a small amount. Fuzz defines how much % tolerance is acceptable to consider two colors as the same. For example, % set fuzz to 10 and the color red at intensities of 100 and 102 respectively % are now interpreted as the same color. % % The format of the OpaquePaintImage method is: % % MagickBooleanType OpaquePaintImage(Image *image,const PixelInfo *target, % const PixelInfo *fill,const MagickBooleanType invert, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o target: the RGB value of the target color. % % o fill: the replacement color. % % o invert: paint any pixel that does not match the target color. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType OpaquePaintImage(Image *image, const PixelInfo *target,const PixelInfo *fill,const MagickBooleanType invert, ExceptionInfo *exception) { #define OpaquePaintImageTag "Opaque/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; PixelInfo conform_fill, conform_target, zero; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(target != (PixelInfo *) NULL); assert(fill != (PixelInfo *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); ConformPixelInfo(image,fill,&conform_fill,exception); ConformPixelInfo(image,target,&conform_target,exception); /* Make image color opaque. */ status=MagickTrue; progress=0; GetPixelInfo(image,&zero); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { PixelInfo pixel; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelWriteMask(image,q) <= (QuantumRange/2)) { q+=GetPixelChannels(image); continue; } GetPixelInfoPixel(image,q,&pixel); if (IsFuzzyEquivalencePixelInfo(&pixel,&conform_target) != invert) { PixelTrait traits; traits=GetPixelChannelTraits(image,RedPixelChannel); if ((traits & UpdatePixelTrait) != 0) SetPixelRed(image,conform_fill.red,q); traits=GetPixelChannelTraits(image,GreenPixelChannel); if ((traits & UpdatePixelTrait) != 0) SetPixelGreen(image,conform_fill.green,q); traits=GetPixelChannelTraits(image,BluePixelChannel); if ((traits & UpdatePixelTrait) != 0) SetPixelBlue(image,conform_fill.blue,q); traits=GetPixelChannelTraits(image,BlackPixelChannel); if ((traits & UpdatePixelTrait) != 0) SetPixelBlack(image,conform_fill.black,q); traits=GetPixelChannelTraits(image,AlphaPixelChannel); if ((traits & UpdatePixelTrait) != 0) SetPixelAlpha(image,conform_fill.alpha,q); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_OpaquePaintImage) #endif proceed=SetImageProgress(image,OpaquePaintImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s p a r e n t P a i n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransparentPaintImage() changes the opacity value associated with any pixel % that matches color to the value defined by opacity. % % By default color must match a particular pixel color exactly. However, in % many cases two colors may differ by a small amount. Fuzz defines how much % tolerance is acceptable to consider two colors as the same. For example, % set fuzz to 10 and the color red at intensities of 100 and 102 respectively % are now interpreted as the same color. % % The format of the TransparentPaintImage method is: % % MagickBooleanType TransparentPaintImage(Image *image, % const PixelInfo *target,const Quantum opacity, % const MagickBooleanType invert,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o target: the target color. % % o opacity: the replacement opacity value. % % o invert: paint any pixel that does not match the target color. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType TransparentPaintImage(Image *image, const PixelInfo *target,const Quantum opacity,const MagickBooleanType invert, ExceptionInfo *exception) { #define TransparentPaintImageTag "Transparent/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; PixelInfo zero; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(target != (PixelInfo *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); if (image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); /* Make image color transparent. */ status=MagickTrue; progress=0; GetPixelInfo(image,&zero); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { PixelInfo pixel; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelWriteMask(image,q) <= (QuantumRange/2)) { q+=GetPixelChannels(image); continue; } GetPixelInfoPixel(image,q,&pixel); if (IsFuzzyEquivalencePixelInfo(&pixel,target) != invert) SetPixelAlpha(image,opacity,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_TransparentPaintImage) #endif proceed=SetImageProgress(image,TransparentPaintImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s p a r e n t P a i n t I m a g e C h r o m a % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransparentPaintImageChroma() changes the opacity value associated with any % pixel that matches color to the value defined by opacity. % % As there is one fuzz value for the all the channels, TransparentPaintImage() % is not suitable for the operations like chroma, where the tolerance for % similarity of two color component (RGB) can be different. Thus we define % this method to take two target pixels (one low and one high) and all the % pixels of an image which are lying between these two pixels are made % transparent. % % The format of the TransparentPaintImageChroma method is: % % MagickBooleanType TransparentPaintImageChroma(Image *image, % const PixelInfo *low,const PixelInfo *high,const Quantum opacity, % const MagickBooleanType invert,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o low: the low target color. % % o high: the high target color. % % o opacity: the replacement opacity value. % % o invert: paint any pixel that does not match the target color. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType TransparentPaintImageChroma(Image *image, const PixelInfo *low,const PixelInfo *high,const Quantum opacity, const MagickBooleanType invert,ExceptionInfo *exception) { #define TransparentPaintImageTag "Transparent/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(high != (PixelInfo *) NULL); assert(low != (PixelInfo *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); if (image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); /* Make image color transparent. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType match; PixelInfo pixel; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } GetPixelInfo(image,&pixel); for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelWriteMask(image,q) <= (QuantumRange/2)) { q+=GetPixelChannels(image); continue; } GetPixelInfoPixel(image,q,&pixel); match=((pixel.red >= low->red) && (pixel.red <= high->red) && (pixel.green >= low->green) && (pixel.green <= high->green) && (pixel.blue >= low->blue) && (pixel.blue <= high->blue)) ? MagickTrue : MagickFalse; if (match != invert) SetPixelAlpha(image,opacity,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_TransparentPaintImageChroma) #endif proceed=SetImageProgress(image,TransparentPaintImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); }
solver-omp.c
#define lowerb(id, p, n) ( id * (n/p) + (id < (n%p) ? id : n%p) ) #define numElem(id, p, n) ( (n/p) + (id < (n%p)) ) #define upperb(id, p, n) ( lowerb(id, p, n) + numElem(id, p, n) - 1 ) #define min(a, b) ( (a < b) ? a : b ) #define max(a, b) ( (a > b) ? a : b ) #include "omp.h" // Function to copy one matrix into another void copy_mat (double *u, double *v, unsigned sizex, unsigned sizey) { int numprocs = omp_get_num_threads(); #pragma omp parallel { int myid = omp_get_thread_num(); int i_start = lowerb(myid, numprocs, sizex); int i_end = upperb(myid, numprocs, sizex); for (int i=max(1, i_start); i<=min(sizex-2, i_end); i++) { for (int j=1; j<=sizey-2; j++) v[i*sizey+j] = u[i*sizey+j]; } } } // 1D-blocked Jacobi solver: one iteration step double relax_jacobi (double *u, double *utmp, unsigned sizex, unsigned sizey) { double diff, sum=0.0; int howmany=omp_get_max_threads(); #pragma omp parallel private(diff) reduction(+:sum) { int myid = omp_get_thread_num(); int i_start = lowerb(myid, howmany, sizex); int i_end = upperb(myid, howmany, sizex); for (int i=max(1, i_start); i<= min(sizex-2, i_end); i++) { for (int j=1; j<= sizey-2; j++) { utmp[i*sizey+j]= 0.25 * ( u[ i*sizey + (j-1) ]+ // left u[ i*sizey + (j+1) ]+ // right u[ (i-1)*sizey + j ]+ // top u[ (i+1)*sizey + j ]); // bottom diff = utmp[i*sizey+j] - u[i*sizey + j]; sum += diff * diff; } } } return sum; } // 2D-blocked Gauss-Seidel solver: one iteration step double relax_gauss (double *u, unsigned sizex, unsigned sizey) { double unew, diff, sum=0.0; int numprocs=omp_get_max_threads(); #pragma omp parallel for ordered(2) private(unew,diff) reduction(+:sum) for (int r = 0; r < numprocs; ++r) { for (int c = 0; c < numprocs; ++c) { int r_start = lowerb(r, numprocs, sizex); int r_end = upperb(r, numprocs, sizex); int c_start = lowerb(c, numprocs, sizey); int c_end = upperb(c, numprocs, sizey); #pragma omp ordered depend(sink: r-1, c) for (int i=max(1, r_start); i<= min(sizex-2, r_end); i++) { for (int j=max(1, c_start); j<= min(sizey-2,c_end); j++) { unew= 0.25 * ( u[ i*sizey + (j-1) ]+ // left u[ i*sizey + (j+1) ]+ // right u[ (i-1)*sizey + j ]+ // top u[ (i+1)*sizey + j ]); // bottom diff = unew - u[i*sizey+ j]; sum += diff * diff; u[i*sizey+j]=unew; } } #pragma omp ordered depend(source) } } return sum; }
schedule-openmp.c
/***************************************************************************** About: OpenMP program to see scheduling(static,dynamic,guided) of loop iterations among threads. export OMP_SCHEDULE=dynamic,4 //change chunksize to different values export OMP_NUM_THREADS=4 //change no. of threads to different values *****************************************************************************/ #include<stdio.h> #include<omp.h> #include<unistd.h> int main() { int i,tid; #pragma omp parallel private(tid) { tid=omp_get_thread_num(); #pragma omp for schedule(runtime) for(i=0;i<20;i++) { printf("i=%3d tid=%d\n",i,tid); sleep(4); } } return 0; }
convolution_3x3_pack8to1_fp16.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv3x3s1_pack8to1_fp16_avx(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const float* bias = _bias; int nn_outch = 0; int remain_outch_start = 0; #pragma omp parallel for num_threads(opt.num_threads) for (int p = remain_outch_start; p < outch; p++) { Mat out0 = top_blob.channel(p); const float bias0 = bias ? bias[p] : 0.f; out0.fill(bias0); //fp16 const unsigned short* k0 = (const unsigned short*)kernel.channel(p); for (int q = 0; q < inch; q++) { float* outptr0 = out0.row(0); const Mat img0 = bottom_blob.channel(q); __m256 _k00 = loadfp16(k0); __m256 _k01 = loadfp16(k0 + 8); __m256 _k02 = loadfp16(k0 + 16); __m256 _k10 = loadfp16(k0 + 24); __m256 _k11 = loadfp16(k0 + 32); __m256 _k12 = loadfp16(k0 + 40); __m256 _k20 = loadfp16(k0 + 48); __m256 _k21 = loadfp16(k0 + 56); __m256 _k22 = loadfp16(k0 + 64); int i = 0; for (; i < outh; i++) { const float* r0 = img0.row(i); const float* r1 = img0.row(i + 1); const float* r2 = img0.row(i + 2); int j = 0; for (; j < outw; j++) { __m256 _r00 = _mm256_loadu_ps(r0); __m256 _r01 = _mm256_loadu_ps(r0 + 8); __m256 _r02 = _mm256_loadu_ps(r0 + 16); __m256 _sum0 = _mm256_mul_ps(_k00, _r00); __m256 _sum1 = _mm256_mul_ps(_k01, _r01); __m256 _sum2 = _mm256_mul_ps(_k02, _r02); __m256 _r10 = _mm256_loadu_ps(r1); __m256 _r11 = _mm256_loadu_ps(r1 + 8); __m256 _r12 = _mm256_loadu_ps(r1 + 16); _sum0 = _mm256_fmadd_ps(_k10, _r10, _sum0); _sum1 = _mm256_fmadd_ps(_k11, _r11, _sum1); _sum2 = _mm256_fmadd_ps(_k12, _r12, _sum2); __m256 _r20 = _mm256_loadu_ps(r2); __m256 _r21 = _mm256_loadu_ps(r2 + 8); __m256 _r22 = _mm256_loadu_ps(r2 + 16); _sum0 = _mm256_fmadd_ps(_k20, _r20, _sum0); _sum1 = _mm256_fmadd_ps(_k21, _r21, _sum1); _sum2 = _mm256_fmadd_ps(_k22, _r22, _sum2); __m128 _sum = HorizontalSums(_sum0, _sum1, _sum2); *outptr0 += _mm_reduce_add_ps(_sum); // dot outptr0++; r0 += 8; r1 += 8; r2 += 8; } } k0 += 9 * 8; } } }
functionCalls.c
int foobar() { int x; bar(1, 0); #pragma omp barrier x = 10; } int bar(int x, int y) { foobar(); } int foo() { int i = 10, j = 20; bar(i, j); // foobar(); } int main() { int p = 10, q = 30; foo(); // bar(p, q); // foobar(); }
factorize_gmp.c
/***************************************************************************************************************** * Compiling: mpicc fattor.c -lgmp -fopenmp -o fattor * Running: mpirun -n PROCNUM --bind-to none fattor NUMBER * Note: PROCNUM is the number of processes that will be ran, and it must be >=2, NUMBER is the number to factorize *****************************************************************************************************************/ #include <mpi.h> #include <stdio.h> #include <stdlib.h> #include <math.h> #include <omp.h> #include <gmp.h> #include <sys/time.h> struct elem { // Very basic and non-reusable stack mpz_t val; struct elem* next; }; void add(struct elem** head, mpz_t val) { struct elem* app = malloc(sizeof(struct elem)); mpz_init(app->val); mpz_set(app->val, val); // app->val = val; app->next = *head; *head = app; } void pick(struct elem** head, mpz_t toret) { mpz_init(toret); struct elem* app; if(*head == NULL) mpz_set_ui(toret, 0); // toret = 0; else { mpz_set(toret, (*head)->val); // toret = (*head)->val; app = *head; *head = (*head)->next; // mpz_finalize(app->val); free(app); } } void master_procedure(int comm_size) { int i = 1; long long rec; int shit_happened; unsigned char buffer[50]; MPI_Status stat; int count; mpz_t received_number; mpz_init(received_number); char stringa[200]; while(i < comm_size) { shit_happened = MPI_Recv(buffer, 50, MPI_UNSIGNED_CHAR, i, MPI_ANY_TAG, MPI_COMM_WORLD, &stat); MPI_Get_count(&stat, MPI_UNSIGNED_CHAR, &count); mpz_import(received_number, count, 1, 1, 1, 0, buffer); if(shit_happened) { fprintf(stderr, "Recv failed"); MPI_Abort(MPI_COMM_WORLD, 1); } if(mpz_cmp_ui(received_number, 0) == 0) // if(received_number == 0) ++i; /*else { mpz_get_str(stringa, 10, received_number); printf("Factor: %s\n", stringa); }*/ } } void slave_procedure(int my_rank, int comm_size, mpz_t the_number) { int shit_happened; struct elem* head = NULL; unsigned char* buffer; mpz_t temp; mpz_t from; mpz_t to; mpz_t to_send; mpz_init(temp); mpz_init(from); mpz_init(to); mpz_init(to_send); mpz_root(temp, the_number, 2); // temp = sqrt(the_number); mpz_div_ui(temp, temp, comm_size - 1); // temp = temp / (comm_size - 1); mpz_mul_ui(from, temp, my_rank - 1); // from = temp * (my_rank - 1); mpz_mul_ui(to, temp, my_rank); // to = temp * my_rank; mpz_cmp_ui(from, 0) ? : mpz_set_ui(from, 1); // from == 0 ? from = 1 : ; #pragma omp parallel shared(from, to) { int my_thread = omp_get_thread_num(); int threads = omp_get_num_threads(); mpz_t from_thread; mpz_t to_thread; mpz_t divided; mpz_init(from_thread); mpz_init(to_thread); mpz_init(divided); mpz_sub(to_thread, to, from); // to_thread = to - from; mpz_set(from_thread, to_thread); // from_thread = to_thread; mpz_div_ui(to_thread, to_thread, threads); // to_thread = to_thread / threads; mpz_mul_ui(to_thread, to_thread, my_thread + 1); // to_thread = to_thread * (my_thread + 1); mpz_div_ui(from_thread, from_thread, threads); // from_thread = from_thread / threads; mpz_mul_ui(from_thread, from_thread, my_thread); // from_thread = from_thread * my_thread; mpz_add(from_thread, from_thread, from); // from_thread = from_thread + from; mpz_add(to_thread, to_thread, from); // to_thread = to_thread + from; while(mpz_cmp(from_thread, to_thread) <= 0) { if(mpz_divisible_p(the_number, from_thread)) { mpz_divexact(divided, the_number, from_thread); // divided = the_number / from_thread; // Only works if the_number % from_thread == 0; #pragma omp critical { add(&head, from_thread); add(&head, divided); } } mpz_add_ui(from_thread, from_thread, 1); // ++from_thread; } } // TODO IMPORTANT: make work with gmp do { pick(&head, to_send); int how_many_bytes = (mpz_sizeinbase(to_send, 2) + 7) / 8; // How many bytes is to_send buffer = malloc(how_many_bytes); mpz_export(buffer, NULL, 1, 1, 1, 0, to_send); // Export the number to buffer shit_happened = MPI_Send(buffer, how_many_bytes, MPI_UNSIGNED_CHAR, 0, 0, MPI_COMM_WORLD); if(shit_happened) { fprintf(stderr, "Send failed"); MPI_Abort(MPI_COMM_WORLD, 1); } free(buffer); }while(mpz_cmp_ui(to_send, 0)); } double get_time() { struct timeval tempo; gettimeofday(&tempo,0); return tempo.tv_sec+(tempo.tv_usec/1000000.0); } struct result { mpz_t num; double w_time; }; void print_results(struct result r) { //printf("time = %u.%06u", tempo.tv_sec, tempo.tv_usec); //printf("%.6lf seconds elapsed\n", t2-t1); // header //printf("# Factors:") // header printf("# number time\n"); char str[1024]; mpz_get_str(str, 10, r.num); printf("%s %.6lf\n", str, r.w_time); } int main(int argc, char** argv) { int my_rank, comm_size; mpz_t the_number; mpz_init(the_number); MPI_Init(&argc, &argv); MPI_Comm_size(MPI_COMM_WORLD, &comm_size); MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); if(argc <= 1) { fprintf(stderr, "Missing number as argument"); MPI_Abort(MPI_COMM_WORLD, 1); } else mpz_set_str(the_number, argv[1], 10); // 10 is the base if(my_rank == 0) { double t1 = get_time(); master_procedure(comm_size); double t2 = get_time(); struct result r; mpz_init(r.num); mpz_set(r.num, the_number); r.w_time = t2 - t1; print_results(r); } else slave_procedure(my_rank, comm_size, the_number); MPI_Finalize(); return 0; }
rawKeccak_512_fmt_plug.c
/* Keccak-512 cracker patch for JtR. Hacked together during January of 2013 * by Dhiru Kholia <dhiru.kholia at gmail.com>. * * This file is part of John the Ripper password cracker, * Copyright (c) 2012 by Solar Designer * based on rawMD4_fmt.c code, with trivial changes by groszek. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_rawKeccak; #elif FMT_REGISTERS_H john_register_one(&fmt_rawKeccak); #else #include <string.h> #include "arch.h" #include "params.h" #include "common.h" #include "formats.h" #include "options.h" #include "KeccakF-1600-interface.h" #include "KeccakNISTInterface.h" #ifdef _OPENMP #define OMP_SCALE 2048 #include <omp.h> #endif #include "memdbg.h" #define FORMAT_LABEL "Raw-Keccak" #define FORMAT_NAME "" #if defined(__AVX__) #define ALGORITHM_NAME "AVX" #elif defined(__XOP__) #define ALGORITHM_NAME "XOP" #elif defined(__SSE4_1__) #define ALGORITHM_NAME "SSE4.1" #elif defined(__SSSE3__) #define ALGORITHM_NAME "SSSE3" #elif defined(__SSE2__) #define ALGORITHM_NAME "SSE2" #else #define ALGORITHM_NAME "32/" ARCH_BITS_STR #endif #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define PLAINTEXT_LENGTH 125 #define CIPHERTEXT_LENGTH 128 #define BINARY_SIZE 64 #define SALT_SIZE 0 #define BINARY_ALIGN 4 #define SALT_ALIGN 1 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 static struct fmt_tests tests[] = { {"0eab42de4c3ceb9235fc91acffe746b29c29a8c366b7c60e4e67c466f36a4304c00fa9caf9d87976ba469bcbe06713b435f091ef2769fb160cdab33d3670680e", ""}, {"$keccak$d135bb84d0439dbac432247ee573a23ea7d3c9deb2a968eb31d47c4fb45f1ef4422d6c531b5b9bd6f449ebcc449ea94d0a8f05f62130fda612da53c79659f609", "The quick brown fox jumps over the lazy dog"}, {"$keccak$e4a7e8f5572f4853ef26a862f31687c249b1cd7922df2aac1f4348d8ceef944c74d1949e3465704a5f3f89fb53e0dcce3ea142c90af04c84cc7e548f144f8f0b", "abcd"}, {NULL} }; static int (*saved_key_length); static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static ARCH_WORD_32 (*crypt_out) [(BINARY_SIZE + sizeof(ARCH_WORD_32) - 1) / sizeof(ARCH_WORD_32)]; static void init(struct fmt_main *self) { #ifdef _OPENMP int omp_t; omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt = omp_t * MIN_KEYS_PER_CRYPT; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt = omp_t * MAX_KEYS_PER_CRYPT; #endif saved_key_length = mem_calloc_tiny(sizeof(*saved_key_length) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD); saved_key = mem_calloc_tiny(sizeof(*saved_key) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD); crypt_out = mem_calloc_tiny(sizeof(*crypt_out) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD); KeccakInitialize(); } static int valid(char *ciphertext, struct fmt_main *self) { char *p, *q; p = ciphertext; if (!strncmp(p, "$keccak$", 8)) p += 8; q = p; while (atoi16[ARCH_INDEX(*q)] != 0x7F) q++; return !*q && q - p == CIPHERTEXT_LENGTH; } static char *split(char *ciphertext, int index, struct fmt_main *pFmt) { static char out[8 + CIPHERTEXT_LENGTH + 1]; if (!strncmp(ciphertext, "$keccak$", 8)) ciphertext += 8; memcpy(out, "$keccak$", 8); memcpy(out + 8, ciphertext, CIPHERTEXT_LENGTH + 1); strlwr(out + 8); return out; } static void *get_binary(char *ciphertext) { static unsigned char *out; char *p; int i; if (!out) out = mem_alloc_tiny(BINARY_SIZE, MEM_ALIGN_WORD); p = ciphertext + 8; for (i = 0; i < BINARY_SIZE; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } return out; } static int get_hash_0(int index) { return crypt_out[index][0] & 0xF; } static int get_hash_1(int index) { return crypt_out[index][0] & 0xFF; } static int get_hash_2(int index) { return crypt_out[index][0] & 0xFFF; } static int get_hash_3(int index) { return crypt_out[index][0] & 0xFFFF; } static int get_hash_4(int index) { return crypt_out[index][0] & 0xFFFFF; } static int get_hash_5(int index) { return crypt_out[index][0] & 0xFFFFFF; } static int get_hash_6(int index) { return crypt_out[index][0] & 0x7FFFFFF; } static void set_key(char *key, int index) { int len = strlen(key); saved_key_length[index] = len; if (len > PLAINTEXT_LENGTH) len = saved_key_length[index] = PLAINTEXT_LENGTH; memcpy(saved_key[index], key, len); } static char *get_key(int index) { saved_key[index][saved_key_length[index]] = 0; return saved_key[index]; } static int crypt_all(int *pcount, struct db_salt *salt) { int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < count; index++) { Hash(512, (BitSequence *)saved_key[index], saved_key_length[index] * 8, (BitSequence *)crypt_out[index]); } return count; } static int cmp_all(void *binary, int count) { int index = 0; for (; index < count; index++) if (!memcmp(binary, crypt_out[index], BINARY_SIZE)) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } struct fmt_main fmt_rawKeccak = { { FORMAT_LABEL, FORMAT_NAME, "Keccak 512 " ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, PLAINTEXT_LENGTH, BINARY_SIZE, #if FMT_MAIN_VERSION > 9 BINARY_ALIGN, #endif SALT_SIZE, #if FMT_MAIN_VERSION > 9 SALT_ALIGN, #endif MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_SPLIT_UNIFIES_CASE, #if FMT_MAIN_VERSION > 11 { NULL }, #endif tests }, { init, fmt_default_done, fmt_default_reset, fmt_default_prepare, valid, split, get_binary, fmt_default_salt, #if FMT_MAIN_VERSION > 11 { NULL }, #endif fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, fmt_default_set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
convolution_1x1_bf16s.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv1x1s1_sgemm_transform_kernel_bf16s_neon(const Mat& _kernel, Mat& kernel_tm, int inch, int outch) { const float* kernel = _kernel; // interleave #if __ARM_NEON && __aarch64__ kernel_tm.create(4 * 8, inch / 4 + inch % 4, outch / 8 + (outch % 8) / 4 + outch % 4, (size_t)2u, 1); #else kernel_tm.create(4 * 4, inch / 4 + inch % 4, outch / 4 + outch % 4, (size_t)2u, 1); #endif // __ARM_NEON && __aarch64__ int p = 0; #if __ARM_NEON && __aarch64__ for (; p + 7 < outch; p += 8) { const float* kernel0 = kernel + (p + 0) * inch; const float* kernel1 = kernel + (p + 1) * inch; const float* kernel2 = kernel + (p + 2) * inch; const float* kernel3 = kernel + (p + 3) * inch; const float* kernel4 = kernel + (p + 4) * inch; const float* kernel5 = kernel + (p + 5) * inch; const float* kernel6 = kernel + (p + 6) * inch; const float* kernel7 = kernel + (p + 7) * inch; unsigned short* ktmp = kernel_tm.channel(p / 8); for (int q = 0; q < inch; q++) { // kernel0...7 0 ktmp[0] = float32_to_bfloat16(kernel0[0]); ktmp[1] = float32_to_bfloat16(kernel1[0]); ktmp[2] = float32_to_bfloat16(kernel2[0]); ktmp[3] = float32_to_bfloat16(kernel3[0]); ktmp[4] = float32_to_bfloat16(kernel4[0]); ktmp[5] = float32_to_bfloat16(kernel5[0]); ktmp[6] = float32_to_bfloat16(kernel6[0]); ktmp[7] = float32_to_bfloat16(kernel7[0]); ktmp += 8; kernel0 += 1; kernel1 += 1; kernel2 += 1; kernel3 += 1; kernel4 += 1; kernel5 += 1; kernel6 += 1; kernel7 += 1; } } #endif // __ARM_NEON && __aarch64__ for (; p + 3 < outch; p += 4) { const float* kernel0 = kernel + (p + 0) * inch; const float* kernel1 = kernel + (p + 1) * inch; const float* kernel2 = kernel + (p + 2) * inch; const float* kernel3 = kernel + (p + 3) * inch; #if __ARM_NEON && __aarch64__ unsigned short* ktmp = kernel_tm.channel(p / 8 + (p % 8) / 4); #else unsigned short* ktmp = kernel_tm.channel(p / 4); #endif // __ARM_NEON && __aarch64__ for (int q = 0; q < inch; q++) { // kernel0...3 0 ktmp[0] = float32_to_bfloat16(kernel0[0]); ktmp[1] = float32_to_bfloat16(kernel1[0]); ktmp[2] = float32_to_bfloat16(kernel2[0]); ktmp[3] = float32_to_bfloat16(kernel3[0]); ktmp += 4; kernel0 += 1; kernel1 += 1; kernel2 += 1; kernel3 += 1; } } for (; p < outch; p++) { const float* kernel0 = kernel + p * inch; #if __ARM_NEON && __aarch64__ unsigned short* ktmp = kernel_tm.channel(p / 8 + (p % 8) / 4 + p % 4); #else unsigned short* ktmp = kernel_tm.channel(p / 4 + p % 4); #endif // __ARM_NEON && __aarch64__ for (int q = 0; q < inch; q++) { ktmp[0] = float32_to_bfloat16(kernel0[0]); ktmp++; kernel0++; } } } static void conv1x1s1_sgemm_bf16s_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outch = top_blob.c; const int size = w * h; const float* bias = _bias; // interleave Mat tmp(8 * 4, inch / 4 + inch % 4, size / 8 + (size % 8) / 4 + size % 4, 2u, opt.workspace_allocator); { int nn_size = size >> 3; int remain_size_start = nn_size << 3; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = ii * 8; const unsigned short* img0 = bottom_blob.channel(0); img0 += i; unsigned short* tmpptr = tmp.channel(i / 8); for (int q = 0; q < inch; q++) { #if __ARM_NEON #if __aarch64__ vst1q_u16(tmpptr, vld1q_u16(img0)); tmpptr += 8; img0 += bottom_blob.cstep; #else asm volatile( "pld [%0, #128] \n" "vld1.u16 {d0-d1}, [%0 :64] \n" "vst1.u16 {d0-d1}, [%1 :64]! \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "q0"); img0 += bottom_blob.cstep; #endif // __aarch64__ #else tmpptr[0] = img0[0]; tmpptr[1] = img0[1]; tmpptr[2] = img0[2]; tmpptr[3] = img0[3]; tmpptr[4] = img0[4]; tmpptr[5] = img0[5]; tmpptr[6] = img0[6]; tmpptr[7] = img0[7]; tmpptr += 8; img0 += bottom_blob.cstep; #endif // __ARM_NEON } } nn_size = (size - remain_size_start) >> 2; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 4; const unsigned short* img0 = bottom_blob.channel(0); img0 += i; unsigned short* tmpptr = tmp.channel(i / 8 + (i % 8) / 4); for (int q = 0; q < inch; q++) { #if __ARM_NEON #if __aarch64__ vst1_u16(tmpptr, vld1_u16(img0)); tmpptr += 4; img0 += bottom_blob.cstep; #else asm volatile( "pld [%0, #64] \n" "vld1.u16 {d0}, [%0 :64] \n" "vst1.u16 {d0}, [%1 :64]! \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "d0"); img0 += bottom_blob.cstep; #endif // __aarch64__ #else tmpptr[0] = img0[0]; tmpptr[1] = img0[1]; tmpptr[2] = img0[2]; tmpptr[3] = img0[3]; tmpptr += 4; img0 += bottom_blob.cstep; #endif // __ARM_NEON } } remain_size_start += nn_size << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int i = remain_size_start; i < size; i++) { const unsigned short* img0 = bottom_blob.channel(0); img0 += i; unsigned short* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + i % 4); for (int q = 0; q < inch; q++) { tmpptr[0] = img0[0]; tmpptr++; img0 += bottom_blob.cstep; } } } int nn_outch = 0; int remain_outch_start = 0; #if __ARM_NEON && __aarch64__ nn_outch = outch >> 3; remain_outch_start = nn_outch << 3; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 8; unsigned short* outptr0 = top_blob.channel(p); unsigned short* outptr1 = top_blob.channel(p + 1); unsigned short* outptr2 = top_blob.channel(p + 2); unsigned short* outptr3 = top_blob.channel(p + 3); unsigned short* outptr4 = top_blob.channel(p + 4); unsigned short* outptr5 = top_blob.channel(p + 5); unsigned short* outptr6 = top_blob.channel(p + 6); unsigned short* outptr7 = top_blob.channel(p + 7); const float zeros[8] = {0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f}; const float* biasptr = bias ? bias + p : zeros; int i = 0; for (; i + 7 < size; i += 8) { const unsigned short* tmpptr = tmp.channel(i / 8); const unsigned short* kptr = kernel.channel(p / 8); asm volatile( "ld1 {v0.4s, v1.4s}, [%20] \n" "dup v16.4s, v0.s[0] \n" "dup v17.4s, v0.s[0] \n" "dup v18.4s, v0.s[1] \n" "dup v19.4s, v0.s[1] \n" "dup v20.4s, v0.s[2] \n" "dup v21.4s, v0.s[2] \n" "dup v22.4s, v0.s[3] \n" "dup v23.4s, v0.s[3] \n" "dup v24.4s, v1.s[0] \n" "dup v25.4s, v1.s[0] \n" "dup v26.4s, v1.s[1] \n" "dup v27.4s, v1.s[1] \n" "dup v28.4s, v1.s[2] \n" "dup v29.4s, v1.s[2] \n" "dup v30.4s, v1.s[3] \n" "dup v31.4s, v1.s[3] \n" // inch loop "lsr w4, %w21, #2 \n" // w4 = nn = inch >> 2 "cmp w4, #0 \n" "beq 1f \n" "0: \n" "prfm pldl1keep, [%8, #256] \n" "ld1 {v8.4h, v9.4h, v10.4h, v11.4h}, [%8], #32 \n" "shll v8.4s, v8.4h, #16 \n" "shll v9.4s, v9.4h, #16 \n" "shll v10.4s, v10.4h, #16 \n" "shll v11.4s, v11.4h, #16 \n" "prfm pldl1keep, [%9, #256] \n" "ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%9], #32 \n" "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "shll v3.4s, v3.4h, #16 \n" "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v18.4s, v8.4s, v0.s[1] \n" "fmla v20.4s, v8.4s, v0.s[2] \n" "fmla v22.4s, v8.4s, v0.s[3] \n" "fmla v17.4s, v9.4s, v0.s[0] \n" "fmla v19.4s, v9.4s, v0.s[1] \n" "fmla v21.4s, v9.4s, v0.s[2] \n" "fmla v23.4s, v9.4s, v0.s[3] \n" "fmla v24.4s, v8.4s, v1.s[0] \n" "fmla v26.4s, v8.4s, v1.s[1] \n" "fmla v28.4s, v8.4s, v1.s[2] \n" "fmla v30.4s, v8.4s, v1.s[3] \n" "fmla v25.4s, v9.4s, v1.s[0] \n" "fmla v27.4s, v9.4s, v1.s[1] \n" "fmla v29.4s, v9.4s, v1.s[2] \n" "fmla v31.4s, v9.4s, v1.s[3] \n" "prfm pldl1keep, [%8, #256] \n" "ld1 {v12.4h, v13.4h, v14.4h, v15.4h}, [%8], #32 \n" "shll v12.4s, v12.4h, #16 \n" "shll v13.4s, v13.4h, #16 \n" "shll v14.4s, v14.4h, #16 \n" "shll v15.4s, v15.4h, #16 \n" "fmla v16.4s, v10.4s, v2.s[0] \n" "fmla v18.4s, v10.4s, v2.s[1] \n" "fmla v20.4s, v10.4s, v2.s[2] \n" "fmla v22.4s, v10.4s, v2.s[3] \n" "fmla v17.4s, v11.4s, v2.s[0] \n" "fmla v19.4s, v11.4s, v2.s[1] \n" "fmla v21.4s, v11.4s, v2.s[2] \n" "fmla v23.4s, v11.4s, v2.s[3] \n" "fmla v24.4s, v10.4s, v3.s[0] \n" "fmla v26.4s, v10.4s, v3.s[1] \n" "fmla v28.4s, v10.4s, v3.s[2] \n" "fmla v30.4s, v10.4s, v3.s[3] \n" "fmla v25.4s, v11.4s, v3.s[0] \n" "fmla v27.4s, v11.4s, v3.s[1] \n" "fmla v29.4s, v11.4s, v3.s[2] \n" "fmla v31.4s, v11.4s, v3.s[3] \n" "prfm pldl1keep, [%9, #256] \n" "ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%9], #32 \n" "shll v4.4s, v4.4h, #16 \n" "shll v5.4s, v5.4h, #16 \n" "shll v6.4s, v6.4h, #16 \n" "shll v7.4s, v7.4h, #16 \n" "fmla v16.4s, v12.4s, v4.s[0] \n" "fmla v18.4s, v12.4s, v4.s[1] \n" "fmla v20.4s, v12.4s, v4.s[2] \n" "fmla v22.4s, v12.4s, v4.s[3] \n" "fmla v17.4s, v13.4s, v4.s[0] \n" "fmla v19.4s, v13.4s, v4.s[1] \n" "fmla v21.4s, v13.4s, v4.s[2] \n" "fmla v23.4s, v13.4s, v4.s[3] \n" "fmla v24.4s, v12.4s, v5.s[0] \n" "fmla v26.4s, v12.4s, v5.s[1] \n" "fmla v28.4s, v12.4s, v5.s[2] \n" "fmla v30.4s, v12.4s, v5.s[3] \n" "fmla v25.4s, v13.4s, v5.s[0] \n" "fmla v27.4s, v13.4s, v5.s[1] \n" "fmla v29.4s, v13.4s, v5.s[2] \n" "fmla v31.4s, v13.4s, v5.s[3] \n" "subs w4, w4, #1 \n" "fmla v16.4s, v14.4s, v6.s[0] \n" "fmla v18.4s, v14.4s, v6.s[1] \n" "fmla v20.4s, v14.4s, v6.s[2] \n" "fmla v22.4s, v14.4s, v6.s[3] \n" "fmla v17.4s, v15.4s, v6.s[0] \n" "fmla v19.4s, v15.4s, v6.s[1] \n" "fmla v21.4s, v15.4s, v6.s[2] \n" "fmla v23.4s, v15.4s, v6.s[3] \n" "fmla v24.4s, v14.4s, v7.s[0] \n" "fmla v26.4s, v14.4s, v7.s[1] \n" "fmla v28.4s, v14.4s, v7.s[2] \n" "fmla v30.4s, v14.4s, v7.s[3] \n" "fmla v25.4s, v15.4s, v7.s[0] \n" "fmla v27.4s, v15.4s, v7.s[1] \n" "fmla v29.4s, v15.4s, v7.s[2] \n" "fmla v31.4s, v15.4s, v7.s[3] \n" "bne 0b \n" "1: \n" // remain loop "and w4, %w21, #3 \n" // w4 = remain = inch & 3; "cmp w4, #0 \n" "beq 3f \n" "2: \n" "prfm pldl1keep, [%8, #128] \n" "ld1 {v8.4h, v9.4h}, [%8], #16 \n" "shll v8.4s, v8.4h, #16 \n" "shll v9.4s, v9.4h, #16 \n" "prfm pldl1keep, [%9, #128] \n" "ld1 {v0.4h, v1.4h}, [%9], #16 \n" "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v18.4s, v8.4s, v0.s[1] \n" "fmla v20.4s, v8.4s, v0.s[2] \n" "fmla v22.4s, v8.4s, v0.s[3] \n" "fmla v17.4s, v9.4s, v0.s[0] \n" "fmla v19.4s, v9.4s, v0.s[1] \n" "fmla v21.4s, v9.4s, v0.s[2] \n" "fmla v23.4s, v9.4s, v0.s[3] \n" "subs w4, w4, #1 \n" "fmla v24.4s, v8.4s, v1.s[0] \n" "fmla v26.4s, v8.4s, v1.s[1] \n" "fmla v28.4s, v8.4s, v1.s[2] \n" "fmla v30.4s, v8.4s, v1.s[3] \n" "fmla v25.4s, v9.4s, v1.s[0] \n" "fmla v27.4s, v9.4s, v1.s[1] \n" "fmla v29.4s, v9.4s, v1.s[2] \n" "fmla v31.4s, v9.4s, v1.s[3] \n" "bne 2b \n" "3: \n" "shrn v16.4h, v16.4s, #16 \n" "shrn v17.4h, v17.4s, #16 \n" "shrn v18.4h, v18.4s, #16 \n" "shrn v19.4h, v19.4s, #16 \n" "shrn v20.4h, v20.4s, #16 \n" "shrn v21.4h, v21.4s, #16 \n" "shrn v22.4h, v22.4s, #16 \n" "shrn v23.4h, v23.4s, #16 \n" "shrn v24.4h, v24.4s, #16 \n" "shrn v25.4h, v25.4s, #16 \n" "shrn v26.4h, v26.4s, #16 \n" "shrn v27.4h, v27.4s, #16 \n" "shrn v28.4h, v28.4s, #16 \n" "shrn v29.4h, v29.4s, #16 \n" "shrn v30.4h, v30.4s, #16 \n" "shrn v31.4h, v31.4s, #16 \n" "st1 {v16.4h, v17.4h}, [%0], #16 \n" "st1 {v18.4h, v19.4h}, [%1], #16 \n" "st1 {v20.4h, v21.4h}, [%2], #16 \n" "st1 {v22.4h, v23.4h}, [%3], #16 \n" "st1 {v24.4h, v25.4h}, [%4], #16 \n" "st1 {v26.4h, v27.4h}, [%5], #16 \n" "st1 {v28.4h, v29.4h}, [%6], #16 \n" "st1 {v30.4h, v31.4h}, [%7], #16 \n" : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(outptr2), // %2 "=r"(outptr3), // %3 "=r"(outptr4), // %4 "=r"(outptr5), // %5 "=r"(outptr6), // %6 "=r"(outptr7), // %7 "=r"(tmpptr), // %8 "=r"(kptr) // %9 : "0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(outptr4), "5"(outptr5), "6"(outptr6), "7"(outptr7), "8"(tmpptr), "9"(kptr), "r"(biasptr), // %20 "r"(inch) // %21 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } for (; i + 3 < size; i += 4) { const unsigned short* tmpptr = tmp.channel(i / 8 + (i % 8) / 4); const unsigned short* kptr = kernel.channel(p / 8); asm volatile( "ld1 {v0.4s, v1.4s}, [%20] \n" "dup v16.4s, v0.s[0] \n" "dup v17.4s, v0.s[1] \n" "dup v18.4s, v0.s[2] \n" "dup v19.4s, v0.s[3] \n" "dup v20.4s, v1.s[0] \n" "dup v21.4s, v1.s[1] \n" "dup v22.4s, v1.s[2] \n" "dup v23.4s, v1.s[3] \n" // inch loop "lsr w4, %w21, #2 \n" // w4 = nn = inch >> 2 "cmp w4, #0 \n" "beq 1f \n" "0: \n" "prfm pldl1keep, [%8, #256] \n" "ld1 {v8.4h, v9.4h, v10.4h, v11.4h}, [%8], #32 \n" "shll v8.4s, v8.4h, #16 \n" "shll v9.4s, v9.4h, #16 \n" "shll v10.4s, v10.4h, #16 \n" "shll v11.4s, v11.4h, #16 \n" "prfm pldl1keep, [%9, #256] \n" "ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%9], #32 \n" "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "shll v3.4s, v3.4h, #16 \n" "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v8.4s, v0.s[1] \n" "fmla v18.4s, v8.4s, v0.s[2] \n" "fmla v19.4s, v8.4s, v0.s[3] \n" "fmla v20.4s, v8.4s, v1.s[0] \n" "fmla v21.4s, v8.4s, v1.s[1] \n" "fmla v22.4s, v8.4s, v1.s[2] \n" "fmla v23.4s, v8.4s, v1.s[3] \n" "prfm pldl1keep, [%9, #256] \n" "ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%9], #32 \n" "shll v4.4s, v4.4h, #16 \n" "shll v5.4s, v5.4h, #16 \n" "shll v6.4s, v6.4h, #16 \n" "shll v7.4s, v7.4h, #16 \n" "fmla v16.4s, v9.4s, v2.s[0] \n" "fmla v17.4s, v9.4s, v2.s[1] \n" "fmla v18.4s, v9.4s, v2.s[2] \n" "fmla v19.4s, v9.4s, v2.s[3] \n" "fmla v20.4s, v9.4s, v3.s[0] \n" "fmla v21.4s, v9.4s, v3.s[1] \n" "fmla v22.4s, v9.4s, v3.s[2] \n" "fmla v23.4s, v9.4s, v3.s[3] \n" "subs w4, w4, #1 \n" "fmla v16.4s, v10.4s, v4.s[0] \n" "fmla v17.4s, v10.4s, v4.s[1] \n" "fmla v18.4s, v10.4s, v4.s[2] \n" "fmla v19.4s, v10.4s, v4.s[3] \n" "fmla v20.4s, v10.4s, v5.s[0] \n" "fmla v21.4s, v10.4s, v5.s[1] \n" "fmla v22.4s, v10.4s, v5.s[2] \n" "fmla v23.4s, v10.4s, v5.s[3] \n" "fmla v16.4s, v11.4s, v6.s[0] \n" "fmla v17.4s, v11.4s, v6.s[1] \n" "fmla v18.4s, v11.4s, v6.s[2] \n" "fmla v19.4s, v11.4s, v6.s[3] \n" "fmla v20.4s, v11.4s, v7.s[0] \n" "fmla v21.4s, v11.4s, v7.s[1] \n" "fmla v22.4s, v11.4s, v7.s[2] \n" "fmla v23.4s, v11.4s, v7.s[3] \n" "bne 0b \n" "1: \n" // remain loop "and w4, %w21, #3 \n" // w4 = remain = inch & 3; "cmp w4, #0 \n" "beq 3f \n" "2: \n" "prfm pldl1keep, [%8, #64] \n" "ld1 {v8.4h}, [%8], #8 \n" "shll v8.4s, v8.4h, #16 \n" "prfm pldl1keep, [%9, #128] \n" "ld1 {v0.4h, v1.4h}, [%9], #16 \n" "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v8.4s, v0.s[1] \n" "fmla v18.4s, v8.4s, v0.s[2] \n" "fmla v19.4s, v8.4s, v0.s[3] \n" "subs w4, w4, #1 \n" "fmla v20.4s, v8.4s, v1.s[0] \n" "fmla v21.4s, v8.4s, v1.s[1] \n" "fmla v22.4s, v8.4s, v1.s[2] \n" "fmla v23.4s, v8.4s, v1.s[3] \n" "bne 2b \n" "3: \n" "shrn v16.4h, v16.4s, #16 \n" "shrn v17.4h, v17.4s, #16 \n" "shrn v18.4h, v18.4s, #16 \n" "shrn v19.4h, v19.4s, #16 \n" "shrn v20.4h, v20.4s, #16 \n" "shrn v21.4h, v21.4s, #16 \n" "shrn v22.4h, v22.4s, #16 \n" "shrn v23.4h, v23.4s, #16 \n" "st1 {v16.4h}, [%0], #8 \n" "st1 {v17.4h}, [%1], #8 \n" "st1 {v18.4h}, [%2], #8 \n" "st1 {v19.4h}, [%3], #8 \n" "st1 {v20.4h}, [%4], #8 \n" "st1 {v21.4h}, [%5], #8 \n" "st1 {v22.4h}, [%6], #8 \n" "st1 {v23.4h}, [%7], #8 \n" : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(outptr2), // %2 "=r"(outptr3), // %3 "=r"(outptr4), // %4 "=r"(outptr5), // %5 "=r"(outptr6), // %6 "=r"(outptr7), // %7 "=r"(tmpptr), // %8 "=r"(kptr) // %9 : "0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(outptr4), "5"(outptr5), "6"(outptr6), "7"(outptr7), "8"(tmpptr), "9"(kptr), "r"(biasptr), // %20 "r"(inch) // %21 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"); } for (; i < size; i++) { const unsigned short* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + i % 4); const unsigned short* kptr = kernel.channel(p / 8); asm volatile( "ld1 {v24.4s, v25.4s}, [%20] \n" // inch loop "lsr w4, %w21, #2 \n" // w4 = nn = inch >> 2 "cmp w4, #0 \n" "beq 1f \n" "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "eor v20.16b, v20.16b, v20.16b \n" "eor v21.16b, v21.16b, v21.16b \n" "eor v22.16b, v22.16b, v22.16b \n" "eor v23.16b, v23.16b, v23.16b \n" "0: \n" "prfm pldl1keep, [%8, #64] \n" "ld1 {v8.4h}, [%8], #8 \n" "shll v8.4s, v8.4h, #16 \n" "prfm pldl1keep, [%9, #256] \n" "ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%9], #32 \n" "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "shll v3.4s, v3.4h, #16 \n" "fmla v16.4s, v0.4s, v8.s[0] \n" "fmla v17.4s, v1.4s, v8.s[0] \n" "fmla v18.4s, v2.4s, v8.s[1] \n" "fmla v19.4s, v3.4s, v8.s[1] \n" "prfm pldl1keep, [%9, #256] \n" "ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%9], #32 \n" "shll v4.4s, v4.4h, #16 \n" "shll v5.4s, v5.4h, #16 \n" "shll v6.4s, v6.4h, #16 \n" "shll v7.4s, v7.4h, #16 \n" "subs w4, w4, #1 \n" "fmla v20.4s, v4.4s, v8.s[2] \n" "fmla v21.4s, v5.4s, v8.s[2] \n" "fmla v22.4s, v6.4s, v8.s[3] \n" "fmla v23.4s, v7.4s, v8.s[3] \n" "bne 0b \n" "fadd v16.4s, v16.4s, v18.4s \n" "fadd v17.4s, v17.4s, v19.4s \n" "fadd v20.4s, v20.4s, v22.4s \n" "fadd v21.4s, v21.4s, v23.4s \n" "fadd v16.4s, v16.4s, v20.4s \n" "fadd v17.4s, v17.4s, v21.4s \n" "fadd v24.4s, v24.4s, v16.4s \n" "fadd v25.4s, v25.4s, v17.4s \n" "1: \n" // remain loop "and w4, %w21, #3 \n" // w4 = remain = inch & 3; "cmp w4, #0 \n" "beq 3f \n" "2: \n" "prfm pldl1keep, [%8, #16] \n" "ld1r {v8.4h}, [%8], #2 \n" "shll v8.4s, v8.4h, #16 \n" "prfm pldl1keep, [%9, #128] \n" "ld1 {v0.4h, v1.4h}, [%9], #16 \n" "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "subs w4, w4, #1 \n" "fmla v24.4s, v8.4s, v0.4s \n" "fmla v25.4s, v8.4s, v1.4s \n" "bne 2b \n" "3: \n" "shrn v24.4h, v24.4s, #16 \n" "shrn v25.4h, v25.4s, #16 \n" "st1 {v24.h}[0],[%0], #2 \n" "st1 {v24.h}[1],[%1], #2 \n" "st1 {v24.h}[2],[%2], #2 \n" "st1 {v24.h}[3],[%3], #2 \n" "st1 {v25.h}[0],[%4], #2 \n" "st1 {v25.h}[1],[%5], #2 \n" "st1 {v25.h}[2],[%6], #2 \n" "st1 {v25.h}[3],[%7], #2 \n" : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(outptr2), // %2 "=r"(outptr3), // %3 "=r"(outptr4), // %4 "=r"(outptr5), // %5 "=r"(outptr6), // %6 "=r"(outptr7), // %7 "=r"(tmpptr), // %8 "=r"(kptr) // %9 : "0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(outptr4), "5"(outptr5), "6"(outptr6), "7"(outptr7), "8"(tmpptr), "9"(kptr), "r"(biasptr), // %20 "r"(inch) // %21 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25"); } } #endif // __ARM_NEON && __aarch64__ nn_outch = (outch - remain_outch_start) >> 2; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = remain_outch_start + pp * 4; unsigned short* outptr0 = top_blob.channel(p); unsigned short* outptr1 = top_blob.channel(p + 1); unsigned short* outptr2 = top_blob.channel(p + 2); unsigned short* outptr3 = top_blob.channel(p + 3); const float zeros[4] = {0.f, 0.f, 0.f, 0.f}; const float* biasptr = bias ? bias + p : zeros; int i = 0; for (; i + 7 < size; i += 8) { const unsigned short* tmpptr = tmp.channel(i / 8); #if __ARM_NEON && __aarch64__ const unsigned short* kptr = kernel.channel(p / 8 + (p % 8) / 4); #else const unsigned short* kptr = kernel.channel(p / 4); #endif // __ARM_NEON && __aarch64__ #if __ARM_NEON #if __aarch64__ asm volatile( "ld1 {v0.4s}, [%12] \n" "dup v8.4s, v0.s[0] \n" "dup v9.4s, v0.s[0] \n" "dup v10.4s, v0.s[1] \n" "dup v11.4s, v0.s[1] \n" "dup v12.4s, v0.s[2] \n" "dup v13.4s, v0.s[2] \n" "dup v14.4s, v0.s[3] \n" "dup v15.4s, v0.s[3] \n" // inch loop "lsr w4, %w13, #2 \n" // w4 = nn = inch >> 2 "cmp w4, #0 \n" "beq 1f \n" "0: \n" "prfm pldl1keep, [%4, #256] \n" "ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%4], #32 \n" "shll v4.4s, v4.4h, #16 \n" "shll v5.4s, v5.4h, #16 \n" "shll v6.4s, v6.4h, #16 \n" "shll v7.4s, v7.4h, #16 \n" "prfm pldl1keep, [%5, #256] \n" "ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%5], #32 \n" "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "shll v3.4s, v3.4h, #16 \n" "fmla v8.4s, v4.4s, v0.s[0] \n" "fmla v10.4s, v4.4s, v0.s[1] \n" "fmla v12.4s, v4.4s, v0.s[2] \n" "fmla v14.4s, v4.4s, v0.s[3] \n" "fmla v9.4s, v5.4s, v0.s[0] \n" "fmla v11.4s, v5.4s, v0.s[1] \n" "fmla v13.4s, v5.4s, v0.s[2] \n" "fmla v15.4s, v5.4s, v0.s[3] \n" "prfm pldl1keep, [%4, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%4], #32 \n" "shll v16.4s, v16.4h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "shll v18.4s, v18.4h, #16 \n" "shll v19.4s, v19.4h, #16 \n" "fmla v8.4s, v6.4s, v1.s[0] \n" "fmla v10.4s, v6.4s, v1.s[1] \n" "fmla v12.4s, v6.4s, v1.s[2] \n" "fmla v14.4s, v6.4s, v1.s[3] \n" "fmla v9.4s, v7.4s, v1.s[0] \n" "fmla v11.4s, v7.4s, v1.s[1] \n" "fmla v13.4s, v7.4s, v1.s[2] \n" "fmla v15.4s, v7.4s, v1.s[3] \n" "subs w4, w4, #1 \n" "fmla v8.4s, v16.4s, v2.s[0] \n" "fmla v10.4s, v16.4s, v2.s[1] \n" "fmla v12.4s, v16.4s, v2.s[2] \n" "fmla v14.4s, v16.4s, v2.s[3] \n" "fmla v9.4s, v17.4s, v2.s[0] \n" "fmla v11.4s, v17.4s, v2.s[1] \n" "fmla v13.4s, v17.4s, v2.s[2] \n" "fmla v15.4s, v17.4s, v2.s[3] \n" "fmla v8.4s, v18.4s, v3.s[0] \n" "fmla v10.4s, v18.4s, v3.s[1] \n" "fmla v12.4s, v18.4s, v3.s[2] \n" "fmla v14.4s, v18.4s, v3.s[3] \n" "fmla v9.4s, v19.4s, v3.s[0] \n" "fmla v11.4s, v19.4s, v3.s[1] \n" "fmla v13.4s, v19.4s, v3.s[2] \n" "fmla v15.4s, v19.4s, v3.s[3] \n" "bne 0b \n" "1: \n" // remain loop "and w4, %w13, #3 \n" // w4 = remain = inch & 3; "cmp w4, #0 \n" "beq 3f \n" "2: \n" "prfm pldl1keep, [%4, #128] \n" "ld1 {v4.4h, v5.4h}, [%4], #16 \n" "shll v4.4s, v4.4h, #16 \n" "shll v5.4s, v5.4h, #16 \n" "prfm pldl1keep, [%5, #64] \n" "ld1 {v0.4h}, [%5], #8 \n" "shll v0.4s, v0.4h, #16 \n" "fmla v8.4s, v4.4s, v0.s[0] \n" "fmla v10.4s, v4.4s, v0.s[1] \n" "fmla v12.4s, v4.4s, v0.s[2] \n" "fmla v14.4s, v4.4s, v0.s[3] \n" "subs w4, w4, #1 \n" "fmla v9.4s, v5.4s, v0.s[0] \n" "fmla v11.4s, v5.4s, v0.s[1] \n" "fmla v13.4s, v5.4s, v0.s[2] \n" "fmla v15.4s, v5.4s, v0.s[3] \n" "bne 2b \n" "3: \n" "shrn v8.4h, v8.4s, #16 \n" "shrn v9.4h, v9.4s, #16 \n" "shrn v10.4h, v10.4s, #16 \n" "shrn v11.4h, v11.4s, #16 \n" "shrn v12.4h, v12.4s, #16 \n" "shrn v13.4h, v13.4s, #16 \n" "shrn v14.4h, v14.4s, #16 \n" "shrn v15.4h, v15.4s, #16 \n" "st1 {v8.4h, v9.4h}, [%0], #16 \n" "st1 {v10.4h, v11.4h}, [%1], #16 \n" "st1 {v12.4h, v13.4h}, [%2], #16 \n" "st1 {v14.4h, v15.4h}, [%3], #16 \n" : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(outptr2), // %2 "=r"(outptr3), // %3 "=r"(tmpptr), // %4 "=r"(kptr) // %5 : "0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(tmpptr), "5"(kptr), "r"(biasptr), // %12 "r"(inch) // %13 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19"); #else // __aarch64__ asm volatile( "vld1.f32 {d0-d1}, [%12] \n" "vdup.f32 q8, d0[0] \n" "vdup.f32 q9, d0[0] \n" "vdup.f32 q10, d0[1] \n" "vdup.f32 q11, d0[1] \n" "vdup.f32 q12, d1[0] \n" "vdup.f32 q13, d1[0] \n" "vdup.f32 q14, d1[1] \n" "vdup.f32 q15, d1[1] \n" // inch loop "lsr r4, %13, #2 \n" // r4 = nn = inch >> 2 "cmp r4, #0 \n" "beq 1f \n" "0: \n" "pld [%4, #256] \n" "vld1.u16 {d12-d15}, [%4 :64]! \n" "vshll.u16 q4, d12, #16 \n" "vshll.u16 q5, d13, #16 \n" "vshll.u16 q6, d14, #16 \n" "vshll.u16 q7, d15, #16 \n" "pld [%5, #256] \n" "vld1.u16 {d4-d7}, [%5 :64]! \n" "vshll.u16 q0, d4, #16 \n" "vshll.u16 q1, d5, #16 \n" "vshll.u16 q2, d6, #16 \n" "vshll.u16 q3, d7, #16 \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q10, q4, d0[1] \n" "vmla.f32 q12, q4, d1[0] \n" "vmla.f32 q14, q4, d1[1] \n" "vmla.f32 q9, q5, d0[0] \n" "vmla.f32 q11, q5, d0[1] \n" "vmla.f32 q13, q5, d1[0] \n" "vmla.f32 q15, q5, d1[1] \n" "vmla.f32 q8, q6, d2[0] \n" "vmla.f32 q10, q6, d2[1] \n" "vmla.f32 q12, q6, d3[0] \n" "vmla.f32 q14, q6, d3[1] \n" "vmla.f32 q9, q7, d2[0] \n" "vmla.f32 q11, q7, d2[1] \n" "vmla.f32 q13, q7, d3[0] \n" "vmla.f32 q15, q7, d3[1] \n" "pld [%4, #256] \n" "vld1.u16 {d12-d15}, [%4 :64]! \n" "vshll.u16 q4, d12, #16 \n" "vshll.u16 q5, d13, #16 \n" "vshll.u16 q6, d14, #16 \n" "vshll.u16 q7, d15, #16 \n" "vmla.f32 q8, q4, d4[0] \n" "vmla.f32 q10, q4, d4[1] \n" "vmla.f32 q12, q4, d5[0] \n" "vmla.f32 q14, q4, d5[1] \n" "vmla.f32 q9, q5, d4[0] \n" "vmla.f32 q11, q5, d4[1] \n" "vmla.f32 q13, q5, d5[0] \n" "vmla.f32 q15, q5, d5[1] \n" "subs r4, r4, #1 \n" "vmla.f32 q8, q6, d6[0] \n" "vmla.f32 q10, q6, d6[1] \n" "vmla.f32 q12, q6, d7[0] \n" "vmla.f32 q14, q6, d7[1] \n" "vmla.f32 q9, q7, d6[0] \n" "vmla.f32 q11, q7, d6[1] \n" "vmla.f32 q13, q7, d7[0] \n" "vmla.f32 q15, q7, d7[1] \n" "bne 0b \n" "1: \n" // remain loop "and r4, %13, #3 \n" // r4 = remain = inch & 3; "cmp r4, #0 \n" "beq 3f \n" "2: \n" "pld [%4, #128] \n" "vld1.u16 {d10-d11}, [%4 :64]! \n" "vshll.u16 q4, d10, #16 \n" "vshll.u16 q5, d11, #16 \n" "pld [%5, #64] \n" "vld1.u16 {d1}, [%5 :64]! \n" "vshll.u16 q0, d1, #16 \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q10, q4, d0[1] \n" "vmla.f32 q12, q4, d1[0] \n" "vmla.f32 q14, q4, d1[1] \n" "subs r4, r4, #1 \n" "vmla.f32 q9, q5, d0[0] \n" "vmla.f32 q11, q5, d0[1] \n" "vmla.f32 q13, q5, d1[0] \n" "vmla.f32 q15, q5, d1[1] \n" "bne 2b \n" "3: \n" "vshrn.u32 d16, q8, #16 \n" "vshrn.u32 d17, q9, #16 \n" "vshrn.u32 d20, q10, #16 \n" "vshrn.u32 d21, q11, #16 \n" "vshrn.u32 d24, q12, #16 \n" "vshrn.u32 d25, q13, #16 \n" "vshrn.u32 d28, q14, #16 \n" "vshrn.u32 d29, q15, #16 \n" "vst1.u16 {d16-d17}, [%0 :64]! \n" "vst1.u16 {d20-d21}, [%1 :64]! \n" "vst1.u16 {d24-d25}, [%2 :64]! \n" "vst1.u16 {d28-d29}, [%3 :64]! \n" : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(outptr2), // %2 "=r"(outptr3), // %3 "=r"(tmpptr), // %4 "=r"(kptr) // %5 : "0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(tmpptr), "5"(kptr), "r"(biasptr), // %12 "r"(inch) // %13 : "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); #endif // __aarch64__ #else float sum0_0 = biasptr[0]; float sum0_1 = biasptr[0]; float sum0_2 = biasptr[0]; float sum0_3 = biasptr[0]; float sum0_4 = biasptr[0]; float sum0_5 = biasptr[0]; float sum0_6 = biasptr[0]; float sum0_7 = biasptr[0]; float sum1_0 = biasptr[1]; float sum1_1 = biasptr[1]; float sum1_2 = biasptr[1]; float sum1_3 = biasptr[1]; float sum1_4 = biasptr[1]; float sum1_5 = biasptr[1]; float sum1_6 = biasptr[1]; float sum1_7 = biasptr[1]; float sum2_0 = biasptr[2]; float sum2_1 = biasptr[2]; float sum2_2 = biasptr[2]; float sum2_3 = biasptr[2]; float sum2_4 = biasptr[2]; float sum2_5 = biasptr[2]; float sum2_6 = biasptr[2]; float sum2_7 = biasptr[2]; float sum3_0 = biasptr[3]; float sum3_1 = biasptr[3]; float sum3_2 = biasptr[3]; float sum3_3 = biasptr[3]; float sum3_4 = biasptr[3]; float sum3_5 = biasptr[3]; float sum3_6 = biasptr[3]; float sum3_7 = biasptr[3]; for (int q = 0; q < inch; q++) { sum0_0 += bfloat16_to_float32(tmpptr[0]) * bfloat16_to_float32(kptr[0]); sum0_1 += bfloat16_to_float32(tmpptr[1]) * bfloat16_to_float32(kptr[0]); sum0_2 += bfloat16_to_float32(tmpptr[2]) * bfloat16_to_float32(kptr[0]); sum0_3 += bfloat16_to_float32(tmpptr[3]) * bfloat16_to_float32(kptr[0]); sum0_4 += bfloat16_to_float32(tmpptr[4]) * bfloat16_to_float32(kptr[0]); sum0_5 += bfloat16_to_float32(tmpptr[5]) * bfloat16_to_float32(kptr[0]); sum0_6 += bfloat16_to_float32(tmpptr[6]) * bfloat16_to_float32(kptr[0]); sum0_7 += bfloat16_to_float32(tmpptr[7]) * bfloat16_to_float32(kptr[0]); sum1_0 += bfloat16_to_float32(tmpptr[0]) * bfloat16_to_float32(kptr[1]); sum1_1 += bfloat16_to_float32(tmpptr[1]) * bfloat16_to_float32(kptr[1]); sum1_2 += bfloat16_to_float32(tmpptr[2]) * bfloat16_to_float32(kptr[1]); sum1_3 += bfloat16_to_float32(tmpptr[3]) * bfloat16_to_float32(kptr[1]); sum1_4 += bfloat16_to_float32(tmpptr[4]) * bfloat16_to_float32(kptr[1]); sum1_5 += bfloat16_to_float32(tmpptr[5]) * bfloat16_to_float32(kptr[1]); sum1_6 += bfloat16_to_float32(tmpptr[6]) * bfloat16_to_float32(kptr[1]); sum1_7 += bfloat16_to_float32(tmpptr[7]) * bfloat16_to_float32(kptr[1]); sum2_0 += bfloat16_to_float32(tmpptr[0]) * bfloat16_to_float32(kptr[2]); sum2_1 += bfloat16_to_float32(tmpptr[1]) * bfloat16_to_float32(kptr[2]); sum2_2 += bfloat16_to_float32(tmpptr[2]) * bfloat16_to_float32(kptr[2]); sum2_3 += bfloat16_to_float32(tmpptr[3]) * bfloat16_to_float32(kptr[2]); sum2_4 += bfloat16_to_float32(tmpptr[4]) * bfloat16_to_float32(kptr[2]); sum2_5 += bfloat16_to_float32(tmpptr[5]) * bfloat16_to_float32(kptr[2]); sum2_6 += bfloat16_to_float32(tmpptr[6]) * bfloat16_to_float32(kptr[2]); sum2_7 += bfloat16_to_float32(tmpptr[7]) * bfloat16_to_float32(kptr[2]); sum3_0 += bfloat16_to_float32(tmpptr[0]) * bfloat16_to_float32(kptr[3]); sum3_1 += bfloat16_to_float32(tmpptr[1]) * bfloat16_to_float32(kptr[3]); sum3_2 += bfloat16_to_float32(tmpptr[2]) * bfloat16_to_float32(kptr[3]); sum3_3 += bfloat16_to_float32(tmpptr[3]) * bfloat16_to_float32(kptr[3]); sum3_4 += bfloat16_to_float32(tmpptr[4]) * bfloat16_to_float32(kptr[3]); sum3_5 += bfloat16_to_float32(tmpptr[5]) * bfloat16_to_float32(kptr[3]); sum3_6 += bfloat16_to_float32(tmpptr[6]) * bfloat16_to_float32(kptr[3]); sum3_7 += bfloat16_to_float32(tmpptr[7]) * bfloat16_to_float32(kptr[3]); tmpptr += 8; kptr += 4; } outptr0[0] = float32_to_bfloat16(sum0_0); outptr0[1] = float32_to_bfloat16(sum0_1); outptr0[2] = float32_to_bfloat16(sum0_2); outptr0[3] = float32_to_bfloat16(sum0_3); outptr0[4] = float32_to_bfloat16(sum0_4); outptr0[5] = float32_to_bfloat16(sum0_5); outptr0[6] = float32_to_bfloat16(sum0_6); outptr0[7] = float32_to_bfloat16(sum0_7); outptr1[0] = float32_to_bfloat16(sum1_0); outptr1[1] = float32_to_bfloat16(sum1_1); outptr1[2] = float32_to_bfloat16(sum1_2); outptr1[3] = float32_to_bfloat16(sum1_3); outptr1[4] = float32_to_bfloat16(sum1_4); outptr1[5] = float32_to_bfloat16(sum1_5); outptr1[6] = float32_to_bfloat16(sum1_6); outptr1[7] = float32_to_bfloat16(sum1_7); outptr2[0] = float32_to_bfloat16(sum2_0); outptr2[1] = float32_to_bfloat16(sum2_1); outptr2[2] = float32_to_bfloat16(sum2_2); outptr2[3] = float32_to_bfloat16(sum2_3); outptr2[4] = float32_to_bfloat16(sum2_4); outptr2[5] = float32_to_bfloat16(sum2_5); outptr2[6] = float32_to_bfloat16(sum2_6); outptr2[7] = float32_to_bfloat16(sum2_7); outptr3[0] = float32_to_bfloat16(sum3_0); outptr3[1] = float32_to_bfloat16(sum3_1); outptr3[2] = float32_to_bfloat16(sum3_2); outptr3[3] = float32_to_bfloat16(sum3_3); outptr3[4] = float32_to_bfloat16(sum3_4); outptr3[5] = float32_to_bfloat16(sum3_5); outptr3[6] = float32_to_bfloat16(sum3_6); outptr3[7] = float32_to_bfloat16(sum3_7); outptr0 += 8; outptr1 += 8; outptr2 += 8; outptr3 += 8; #endif // __ARM_NEON } for (; i + 3 < size; i += 4) { const unsigned short* tmpptr = tmp.channel(i / 8 + (i % 8) / 4); #if __ARM_NEON && __aarch64__ const unsigned short* kptr = kernel.channel(p / 8 + (p % 8) / 4); #else const unsigned short* kptr = kernel.channel(p / 4); #endif // __ARM_NEON && __aarch64__ #if __ARM_NEON #if __aarch64__ asm volatile( "ld1 {v0.4s}, [%12] \n" "dup v8.4s, v0.s[0] \n" "dup v9.4s, v0.s[1] \n" "dup v10.4s, v0.s[2] \n" "dup v11.4s, v0.s[3] \n" // inch loop "lsr w4, %w13, #2 \n" // w4 = nn = inch >> 2 "cmp w4, #0 \n" "beq 1f \n" "0: \n" "prfm pldl1keep, [%4, #256] \n" "ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%4], #32 \n" "shll v4.4s, v4.4h, #16 \n" "shll v5.4s, v5.4h, #16 \n" "shll v6.4s, v6.4h, #16 \n" "shll v7.4s, v7.4h, #16 \n" "prfm pldl1keep, [%5, #256] \n" "ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%5], #32 \n" "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "shll v3.4s, v3.4h, #16 \n" "fmla v8.4s, v4.4s, v0.s[0] \n" "fmla v9.4s, v4.4s, v0.s[1] \n" "fmla v10.4s, v4.4s, v0.s[2] \n" "fmla v11.4s, v4.4s, v0.s[3] \n" "fmla v8.4s, v5.4s, v1.s[0] \n" "fmla v9.4s, v5.4s, v1.s[1] \n" "fmla v10.4s, v5.4s, v1.s[2] \n" "fmla v11.4s, v5.4s, v1.s[3] \n" "subs w4, w4, #1 \n" "fmla v8.4s, v6.4s, v2.s[0] \n" "fmla v9.4s, v6.4s, v2.s[1] \n" "fmla v10.4s, v6.4s, v2.s[2] \n" "fmla v11.4s, v6.4s, v2.s[3] \n" "fmla v8.4s, v7.4s, v3.s[0] \n" "fmla v9.4s, v7.4s, v3.s[1] \n" "fmla v10.4s, v7.4s, v3.s[2] \n" "fmla v11.4s, v7.4s, v3.s[3] \n" "bne 0b \n" "1: \n" // remain loop "and w4, %w13, #3 \n" // w4 = remain = inch & 3; "cmp w4, #0 \n" "beq 3f \n" "2: \n" "prfm pldl1keep, [%4, #64] \n" "ld1 {v4.4h}, [%4], #8 \n" "shll v4.4s, v4.4h, #16 \n" "prfm pldl1keep, [%5, #64] \n" "ld1 {v0.4h}, [%5], #8 \n" "shll v0.4s, v0.4h, #16 \n" "subs w4, w4, #1 \n" "fmla v8.4s, v4.4s, v0.s[0] \n" "fmla v9.4s, v4.4s, v0.s[1] \n" "fmla v10.4s, v4.4s, v0.s[2] \n" "fmla v11.4s, v4.4s, v0.s[3] \n" "bne 2b \n" "3: \n" "shrn v8.4h, v8.4s, #16 \n" "shrn v9.4h, v9.4s, #16 \n" "shrn v10.4h, v10.4s, #16 \n" "shrn v11.4h, v11.4s, #16 \n" "st1 {v8.4h}, [%0], #8 \n" "st1 {v9.4h}, [%1], #8 \n" "st1 {v10.4h}, [%2], #8 \n" "st1 {v11.4h}, [%3], #8 \n" : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(outptr2), // %2 "=r"(outptr3), // %3 "=r"(tmpptr), // %4 "=r"(kptr) // %5 : "0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(tmpptr), "5"(kptr), "r"(biasptr), // %12 "r"(inch) // %13 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11"); #else // __aarch64__ asm volatile( "vld1.f32 {d0-d1}, [%12] \n" "vdup.f32 q8, d0[0] \n" "vdup.f32 q9, d0[1] \n" "vdup.f32 q10, d1[0] \n" "vdup.f32 q11, d1[1] \n" // inch loop "lsr r4, %13, #2 \n" // r4 = nn = inch >> 2 "cmp r4, #0 \n" "beq 1f \n" "0: \n" "pld [%4, #256] \n" "vld1.u16 {d12-d15}, [%4 :64]! \n" "vshll.u16 q4, d12, #16 \n" "vshll.u16 q5, d13, #16 \n" "vshll.u16 q6, d14, #16 \n" "vshll.u16 q7, d15, #16 \n" "pld [%5, #256] \n" "vld1.u16 {d4-d7}, [%5 :64]! \n" "vshll.u16 q0, d4, #16 \n" "vshll.u16 q1, d5, #16 \n" "vshll.u16 q2, d6, #16 \n" "vshll.u16 q3, d7, #16 \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q9, q4, d0[1] \n" "vmla.f32 q10, q4, d1[0] \n" "vmla.f32 q11, q4, d1[1] \n" "vmla.f32 q8, q5, d2[0] \n" "vmla.f32 q9, q5, d2[1] \n" "vmla.f32 q10, q5, d3[0] \n" "vmla.f32 q11, q5, d3[1] \n" "subs r4, r4, #1 \n" "vmla.f32 q8, q6, d4[0] \n" "vmla.f32 q9, q6, d4[1] \n" "vmla.f32 q10, q6, d5[0] \n" "vmla.f32 q11, q6, d5[1] \n" "vmla.f32 q8, q7, d6[0] \n" "vmla.f32 q9, q7, d6[1] \n" "vmla.f32 q10, q7, d7[0] \n" "vmla.f32 q11, q7, d7[1] \n" "bne 0b \n" "1: \n" // remain loop "and r4, %13, #3 \n" // r4 = remain = inch & 3; "cmp r4, #0 \n" "beq 3f \n" "2: \n" "pld [%4, #64] \n" "vld1.u16 {d9}, [%4 :64]! \n" "vshll.u16 q4, d9, #16 \n" "pld [%5, #64] \n" "vld1.u16 {d1}, [%5 :64]! \n" "vshll.u16 q0, d1, #16 \n" "subs r4, r4, #1 \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q9, q4, d0[1] \n" "vmla.f32 q10, q4, d1[0] \n" "vmla.f32 q11, q4, d1[1] \n" "bne 2b \n" "3: \n" "vshrn.u32 d16, q8, #16 \n" "vshrn.u32 d18, q9, #16 \n" "vshrn.u32 d20, q10, #16 \n" "vshrn.u32 d22, q11, #16 \n" "vst1.u16 {d16}, [%0 :64]! \n" "vst1.u16 {d18}, [%1 :64]! \n" "vst1.u16 {d20}, [%2 :64]! \n" "vst1.u16 {d22}, [%3 :64]! \n" : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(outptr2), // %2 "=r"(outptr3), // %3 "=r"(tmpptr), // %4 "=r"(kptr) // %5 : "0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(tmpptr), "5"(kptr), "r"(biasptr), // %12 "r"(inch) // %13 : "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11"); #endif // __aarch64__ #else float sum0_0 = biasptr[0]; float sum0_1 = biasptr[0]; float sum0_2 = biasptr[0]; float sum0_3 = biasptr[0]; float sum1_0 = biasptr[1]; float sum1_1 = biasptr[1]; float sum1_2 = biasptr[1]; float sum1_3 = biasptr[1]; float sum2_0 = biasptr[2]; float sum2_1 = biasptr[2]; float sum2_2 = biasptr[2]; float sum2_3 = biasptr[2]; float sum3_0 = biasptr[3]; float sum3_1 = biasptr[3]; float sum3_2 = biasptr[3]; float sum3_3 = biasptr[3]; for (int q = 0; q < inch; q++) { sum0_0 += bfloat16_to_float32(tmpptr[0]) * bfloat16_to_float32(kptr[0]); sum0_1 += bfloat16_to_float32(tmpptr[1]) * bfloat16_to_float32(kptr[0]); sum0_2 += bfloat16_to_float32(tmpptr[2]) * bfloat16_to_float32(kptr[0]); sum0_3 += bfloat16_to_float32(tmpptr[3]) * bfloat16_to_float32(kptr[0]); sum1_0 += bfloat16_to_float32(tmpptr[0]) * bfloat16_to_float32(kptr[1]); sum1_1 += bfloat16_to_float32(tmpptr[1]) * bfloat16_to_float32(kptr[1]); sum1_2 += bfloat16_to_float32(tmpptr[2]) * bfloat16_to_float32(kptr[1]); sum1_3 += bfloat16_to_float32(tmpptr[3]) * bfloat16_to_float32(kptr[1]); sum2_0 += bfloat16_to_float32(tmpptr[0]) * bfloat16_to_float32(kptr[2]); sum2_1 += bfloat16_to_float32(tmpptr[1]) * bfloat16_to_float32(kptr[2]); sum2_2 += bfloat16_to_float32(tmpptr[2]) * bfloat16_to_float32(kptr[2]); sum2_3 += bfloat16_to_float32(tmpptr[3]) * bfloat16_to_float32(kptr[2]); sum3_0 += bfloat16_to_float32(tmpptr[0]) * bfloat16_to_float32(kptr[3]); sum3_1 += bfloat16_to_float32(tmpptr[1]) * bfloat16_to_float32(kptr[3]); sum3_2 += bfloat16_to_float32(tmpptr[2]) * bfloat16_to_float32(kptr[3]); sum3_3 += bfloat16_to_float32(tmpptr[3]) * bfloat16_to_float32(kptr[3]); tmpptr += 4; kptr += 4; } outptr0[0] = float32_to_bfloat16(sum0_0); outptr0[1] = float32_to_bfloat16(sum0_1); outptr0[2] = float32_to_bfloat16(sum0_2); outptr0[3] = float32_to_bfloat16(sum0_3); outptr1[0] = float32_to_bfloat16(sum1_0); outptr1[1] = float32_to_bfloat16(sum1_1); outptr1[2] = float32_to_bfloat16(sum1_2); outptr1[3] = float32_to_bfloat16(sum1_3); outptr2[0] = float32_to_bfloat16(sum2_0); outptr2[1] = float32_to_bfloat16(sum2_1); outptr2[2] = float32_to_bfloat16(sum2_2); outptr2[3] = float32_to_bfloat16(sum2_3); outptr3[0] = float32_to_bfloat16(sum3_0); outptr3[1] = float32_to_bfloat16(sum3_1); outptr3[2] = float32_to_bfloat16(sum3_2); outptr3[3] = float32_to_bfloat16(sum3_3); outptr0 += 4; outptr1 += 4; outptr2 += 4; outptr3 += 4; #endif // __ARM_NEON } for (; i < size; i++) { const unsigned short* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + i % 4); #if __ARM_NEON && __aarch64__ const unsigned short* kptr = kernel.channel(p / 8 + (p % 8) / 4); #else const unsigned short* kptr = kernel.channel(p / 4); #endif // __ARM_NEON && __aarch64__ #if __ARM_NEON #if __aarch64__ asm volatile( "ld1 {v12.4s}, [%12] \n" // inch loop "lsr w4, %w13, #2 \n" // w4 = nn = inch >> 2 "cmp w4, #0 \n" "beq 1f \n" "eor v8.16b, v8.16b, v8.16b \n" "eor v9.16b, v9.16b, v9.16b \n" "eor v10.16b, v10.16b, v10.16b \n" "eor v11.16b, v11.16b, v11.16b \n" "0: \n" "prfm pldl1keep, [%4, #64] \n" "ld1 {v4.4h}, [%4], #8 \n" "shll v4.4s, v4.4h, #16 \n" "prfm pldl1keep, [%5, #256] \n" "ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%5], #32 \n" "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "shll v3.4s, v3.4h, #16 \n" "subs w4, w4, #1 \n" "fmla v8.4s, v0.4s, v4.s[0] \n" "fmla v9.4s, v1.4s, v4.s[1] \n" "fmla v10.4s, v2.4s, v4.s[2] \n" "fmla v11.4s, v3.4s, v4.s[3] \n" "bne 0b \n" "fadd v8.4s, v8.4s, v9.4s \n" "fadd v10.4s, v10.4s, v11.4s \n" "fadd v8.4s, v8.4s, v10.4s \n" "fadd v12.4s, v12.4s, v8.4s \n" "1: \n" // remain loop "and w4, %w13, #3 \n" // w4 = remain = inch & 3; "cmp w4, #0 \n" "beq 3f \n" "2: \n" "prfm pldl1keep, [%4, #16] \n" "ld1r {v4.4h}, [%4], #2 \n" "shll v4.4s, v4.4h, #16 \n" "prfm pldl1keep, [%5, #64] \n" "ld1 {v0.4h}, [%5], #8 \n" "shll v0.4s, v0.4h, #16 \n" "subs w4, w4, #1 \n" "fmla v12.4s, v4.4s, v0.4s \n" "bne 2b \n" "3: \n" "shrn v12.4h, v12.4s, #16 \n" "st1 {v12.h}[0], [%0], #2 \n" "st1 {v12.h}[1], [%1], #2 \n" "st1 {v12.h}[2], [%2], #2 \n" "st1 {v12.h}[3], [%3], #2 \n" : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(outptr2), // %2 "=r"(outptr3), // %3 "=r"(tmpptr), // %4 "=r"(kptr) // %5 : "0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(tmpptr), "5"(kptr), "r"(biasptr), // %12 "r"(inch) // %13 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v8", "v9", "v10", "v11", "v12"); #else // __aarch64__ asm volatile( "vld1.f32 {d24-d25}, [%12] \n" // inch loop "lsr r4, %13, #2 \n" // r4 = nn = inch >> 2 "cmp r4, #0 \n" "beq 1f \n" "veor q8, q8, q8 \n" "veor q9, q9, q9 \n" "veor q10, q10, q10 \n" "veor q11, q11, q11 \n" "0: \n" "pld [%4, #64] \n" "vld1.u16 {d9}, [%4 :64]! \n" "vshll.u16 q4, d9, #16 \n" "pld [%5, #256] \n" "vld1.u16 {d4-d7}, [%5 :64]! \n" "vshll.u16 q0, d4, #16 \n" "vshll.u16 q1, d5, #16 \n" "vshll.u16 q2, d6, #16 \n" "vshll.u16 q3, d7, #16 \n" "subs r4, r4, #1 \n" "vmla.f32 q8, q0, d8[0] \n" "vmla.f32 q9, q1, d8[1] \n" "vmla.f32 q10, q2, d9[0] \n" "vmla.f32 q11, q3, d9[1] \n" "bne 0b \n" "vadd.f32 q8, q8, q9 \n" "vadd.f32 q10, q10, q11 \n" "vadd.f32 q8, q8, q10 \n" "vadd.f32 q12, q12, q8 \n" "1: \n" // remain loop "and r4, %13, #3 \n" // r4 = remain = inch & 3; "cmp r4, #0 \n" "beq 3f \n" "2: \n" "pld [%4, #16] \n" "vld1.u16 {d9[]}, [%4]! \n" "vshll.u16 q4, d9, #16 \n" "pld [%5, #64] \n" "vld1.u16 {d1}, [%5 :64]! \n" "vshll.u16 q0, d1, #16 \n" "subs r4, r4, #1 \n" "vmla.f32 q12, q4, q0 \n" "bne 2b \n" "3: \n" "vshrn.u32 d24, q12, #16 \n" "vst1.u16 {d24[0]}, [%0]! \n" "vst1.u16 {d24[1]}, [%1]! \n" "vst1.u16 {d24[2]}, [%2]! \n" "vst1.u16 {d24[3]}, [%3]! \n" : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(outptr2), // %2 "=r"(outptr3), // %3 "=r"(tmpptr), // %4 "=r"(kptr) // %5 : "0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(tmpptr), "5"(kptr), "r"(biasptr), // %12 "r"(inch) // %13 : "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q8", "q9", "q10", "q11", "q12"); #endif // __aarch64__ #else float sum0 = biasptr[0]; float sum1 = biasptr[1]; float sum2 = biasptr[2]; float sum3 = biasptr[3]; for (int q = 0; q < inch; q++) { sum0 += bfloat16_to_float32(tmpptr[0]) * bfloat16_to_float32(kptr[0]); sum1 += bfloat16_to_float32(tmpptr[0]) * bfloat16_to_float32(kptr[1]); sum2 += bfloat16_to_float32(tmpptr[0]) * bfloat16_to_float32(kptr[2]); sum3 += bfloat16_to_float32(tmpptr[0]) * bfloat16_to_float32(kptr[3]); tmpptr++; kptr += 4; } outptr0[0] = float32_to_bfloat16(sum0); outptr1[0] = float32_to_bfloat16(sum1); outptr2[0] = float32_to_bfloat16(sum2); outptr3[0] = float32_to_bfloat16(sum3); outptr0++; outptr1++; outptr2++; outptr3++; #endif // __ARM_NEON } } remain_outch_start += nn_outch << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int p = remain_outch_start; p < outch; p++) { Mat out0 = top_blob.channel(p); const float bias0 = bias ? bias[p] : 0.f; unsigned short* outptr0 = out0; int i = 0; for (; i + 7 < size; i += 8) { const unsigned short* tmpptr = tmp.channel(i / 8); #if __ARM_NEON && __aarch64__ const unsigned short* kptr = kernel.channel(p / 8 + (p % 8) / 4 + p % 4); #else const unsigned short* kptr = kernel.channel(p / 4 + p % 4); #endif // __ARM_NEON && __aarch64__ #if __ARM_NEON #if __aarch64__ asm volatile( "dup v8.4s, %w6 \n" "dup v9.4s, %w6 \n" // inch loop "lsr w4, %w7, #2 \n" // w4 = nn = inch >> 2 "cmp w4, #0 \n" "beq 1f \n" "0: \n" "prfm pldl1keep, [%1, #256] \n" "ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%1], #32 \n" "shll v4.4s, v4.4h, #16 \n" "shll v5.4s, v5.4h, #16 \n" "shll v6.4s, v6.4h, #16 \n" "shll v7.4s, v7.4h, #16 \n" "prfm pldl1keep, [%2, #64] \n" "ld1 {v0.4h}, [%2], #8 \n" "shll v0.4s, v0.4h, #16 \n" "fmla v8.4s, v4.4s, v0.s[0] \n" "fmla v9.4s, v5.4s, v0.s[0] \n" "prfm pldl1keep, [%1, #256] \n" "ld1 {v12.4h, v13.4h, v14.4h, v15.4h}, [%1], #32 \n" "shll v12.4s, v12.4h, #16 \n" "shll v13.4s, v13.4h, #16 \n" "shll v14.4s, v14.4h, #16 \n" "shll v15.4s, v15.4h, #16 \n" "fmla v8.4s, v6.4s, v0.s[1] \n" "fmla v9.4s, v7.4s, v0.s[1] \n" "subs w4, w4, #1 \n" "fmla v8.4s, v12.4s, v0.s[2] \n" "fmla v9.4s, v13.4s, v0.s[2] \n" "fmla v8.4s, v14.4s, v0.s[3] \n" "fmla v9.4s, v15.4s, v0.s[3] \n" "bne 0b \n" "1: \n" // remain loop "and w4, %w7, #3 \n" // w4 = remain = inch & 3; "cmp w4, #0 \n" "beq 3f \n" "2: \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v4.4h, v5.4h}, [%1], #16 \n" "shll v4.4s, v4.4h, #16 \n" "shll v5.4s, v5.4h, #16 \n" "prfm pldl1keep, [%2, #16] \n" "ld1r {v0.4h}, [%2], #2 \n" "shll v0.4s, v0.4h, #16 \n" "subs w4, w4, #1 \n" "fmla v8.4s, v4.4s, v0.4s \n" "fmla v9.4s, v5.4s, v0.4s \n" "bne 2b \n" "3: \n" "shrn v8.4h, v8.4s, #16 \n" "shrn v9.4h, v9.4s, #16 \n" "st1 {v8.4h, v9.4h}, [%0], #16 \n" : "=r"(outptr0), // %0 "=r"(tmpptr), // %1 "=r"(kptr) // %2 : "0"(outptr0), "1"(tmpptr), "2"(kptr), "r"(bias0), // %6 "r"(inch) // %7 : "cc", "memory", "x4", "v0", "v4", "v5", "v6", "v7", "v8", "v9", "v12", "v13", "v14", "v15"); #else // __aarch64__ asm volatile( "vdup.f32 q8, %6 \n" "vdup.f32 q9, %6 \n" // inch loop "lsr r4, %7, #2 \n" // r4 = nn = inch >> 2 "cmp r4, #0 \n" "beq 1f \n" "0: \n" "pld [%1, #256] \n" "vld1.u16 {d12-d15}, [%1 :64]! \n" "vshll.u16 q4, d12, #16 \n" "vshll.u16 q5, d13, #16 \n" "vshll.u16 q6, d14, #16 \n" "vshll.u16 q7, d15, #16 \n" "pld [%2, #64] \n" "vld1.u16 {d1}, [%2 :64]! \n" "vshll.u16 q0, d1, #16 \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q9, q5, d0[0] \n" "pld [%1, #256] \n" "vld1.u16 {d28-d31}, [%1 :64]! \n" "vshll.u16 q12, d28, #16 \n" "vshll.u16 q13, d29, #16 \n" "vshll.u16 q14, d30, #16 \n" "vshll.u16 q15, d31, #16 \n" "vmla.f32 q8, q6, d0[1] \n" "vmla.f32 q9, q7, d0[1] \n" "subs r4, r4, #1 \n" "vmla.f32 q8, q12, d1[0] \n" "vmla.f32 q9, q13, d1[0] \n" "vmla.f32 q8, q14, d1[1] \n" "vmla.f32 q9, q15, d1[1] \n" "bne 0b \n" "1: \n" // remain loop "and r4, %7, #3 \n" // r4 = remain = inch & 3; "cmp r4, #0 \n" "beq 3f \n" "2: \n" "pld [%1, #128] \n" "vld1.u16 {d10-d11}, [%1 :64]! \n" "vshll.u16 q4, d10, #16 \n" "vshll.u16 q5, d11, #16 \n" "pld [%2, #16] \n" "vld1.u16 {d1[]}, [%2]! \n" "vshll.u16 q0, d1, #16 \n" "subs r4, r4, #1 \n" "vmla.f32 q8, q4, q0 \n" "vmla.f32 q9, q5, q0 \n" "bne 2b \n" "3: \n" "vshrn.u32 d16, q8, #16 \n" "vshrn.u32 d17, q9, #16 \n" "vst1.u16 {d16-d17}, [%0 :64]! \n" : "=r"(outptr0), // %0 "=r"(tmpptr), // %1 "=r"(kptr) // %2 : "0"(outptr0), "1"(tmpptr), "2"(kptr), "r"(bias0), // %6 "r"(inch) // %7 : "cc", "memory", "r4", "q0", "q4", "q5", "q6", "q7", "q8", "q9", "q12", "q13", "q14", "q15"); #endif // __aarch64__ #else float sum0 = bias0; float sum1 = bias0; float sum2 = bias0; float sum3 = bias0; float sum4 = bias0; float sum5 = bias0; float sum6 = bias0; float sum7 = bias0; for (int q = 0; q < inch; q++) { sum0 += bfloat16_to_float32(tmpptr[0]) * bfloat16_to_float32(kptr[0]); sum1 += bfloat16_to_float32(tmpptr[1]) * bfloat16_to_float32(kptr[0]); sum2 += bfloat16_to_float32(tmpptr[2]) * bfloat16_to_float32(kptr[0]); sum3 += bfloat16_to_float32(tmpptr[3]) * bfloat16_to_float32(kptr[0]); sum4 += bfloat16_to_float32(tmpptr[4]) * bfloat16_to_float32(kptr[0]); sum5 += bfloat16_to_float32(tmpptr[5]) * bfloat16_to_float32(kptr[0]); sum6 += bfloat16_to_float32(tmpptr[6]) * bfloat16_to_float32(kptr[0]); sum7 += bfloat16_to_float32(tmpptr[7]) * bfloat16_to_float32(kptr[0]); tmpptr += 8; kptr++; } outptr0[0] = float32_to_bfloat16(sum0); outptr0[1] = float32_to_bfloat16(sum1); outptr0[2] = float32_to_bfloat16(sum2); outptr0[3] = float32_to_bfloat16(sum3); outptr0[4] = float32_to_bfloat16(sum4); outptr0[5] = float32_to_bfloat16(sum5); outptr0[6] = float32_to_bfloat16(sum6); outptr0[7] = float32_to_bfloat16(sum7); outptr0 += 8; #endif // __ARM_NEON } for (; i + 3 < size; i += 4) { const unsigned short* tmpptr = tmp.channel(i / 8 + (i % 8) / 4); #if __ARM_NEON && __aarch64__ const unsigned short* kptr = kernel.channel(p / 8 + (p % 8) / 4 + p % 4); #else const unsigned short* kptr = kernel.channel(p / 4 + p % 4); #endif // __ARM_NEON && __aarch64__ #if __ARM_NEON #if __aarch64__ asm volatile( "dup v8.4s, %w6 \n" // inch loop "lsr w4, %w7, #2 \n" // w4 = nn = inch >> 2 "cmp w4, #0 \n" "beq 1f \n" "0: \n" "prfm pldl1keep, [%1, #256] \n" "ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%1], #32 \n" "shll v4.4s, v4.4h, #16 \n" "shll v5.4s, v5.4h, #16 \n" "shll v6.4s, v6.4h, #16 \n" "shll v7.4s, v7.4h, #16 \n" "prfm pldl1keep, [%2, #64] \n" "ld1 {v0.4h}, [%2], #8 \n" "shll v0.4s, v0.4h, #16 \n" "subs w4, w4, #1 \n" "fmla v8.4s, v4.4s, v0.s[0] \n" "fmla v8.4s, v5.4s, v0.s[1] \n" "fmla v8.4s, v6.4s, v0.s[2] \n" "fmla v8.4s, v7.4s, v0.s[3] \n" "bne 0b \n" "1: \n" // remain loop "and w4, %w7, #3 \n" // w4 = remain = inch & 3; "cmp w4, #0 \n" "beq 3f \n" "2: \n" "prfm pldl1keep, [%1, #64] \n" "ld1 {v4.4h}, [%1], #8 \n" "shll v4.4s, v4.4h, #16 \n" "prfm pldl1keep, [%2, #16] \n" "ld1r {v0.4h}, [%2], #2 \n" "shll v0.4s, v0.4h, #16 \n" "subs w4, w4, #1 \n" "fmla v8.4s, v4.4s, v0.4s \n" "bne 2b \n" "3: \n" "shrn v8.4h, v8.4s, #16 \n" "st1 {v8.4h}, [%0], #8 \n" : "=r"(outptr0), // %0 "=r"(tmpptr), // %1 "=r"(kptr) // %2 : "0"(outptr0), "1"(tmpptr), "2"(kptr), "r"(bias0), // %6 "r"(inch) // %7 : "cc", "memory", "x4", "v0", "v4", "v5", "v6", "v7", "v8"); #else // __aarch64__ asm volatile( "vdup.f32 q8, %6 \n" // inch loop "lsr r4, %7, #2 \n" // r4 = nn = inch >> 2 "cmp r4, #0 \n" "beq 1f \n" "0: \n" "pld [%1, #256] \n" "vld1.u16 {d12-d15}, [%1 :64]! \n" "vshll.u16 q4, d12, #16 \n" "vshll.u16 q5, d13, #16 \n" "vshll.u16 q6, d14, #16 \n" "vshll.u16 q7, d15, #16 \n" "pld [%2, #64] \n" "vld1.u16 {d1}, [%2]! \n" "vshll.u16 q0, d1, #16 \n" "subs r4, r4, #1 \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q8, q5, d0[1] \n" "vmla.f32 q8, q6, d1[0] \n" "vmla.f32 q8, q7, d1[1] \n" "bne 0b \n" "1: \n" // remain loop "and r4, %7, #3 \n" // r4 = remain = inch & 3; "cmp r4, #0 \n" "beq 3f \n" "2: \n" "pld [%1, #64] \n" "vld1.u16 {d9}, [%1 :64]! \n" "vshll.u16 q4, d9, #16 \n" "pld [%2, #16] \n" "vld1.u16 {d1[]}, [%2]! \n" "vshll.u16 q0, d1, #16 \n" "subs r4, r4, #1 \n" "vmla.f32 q8, q4, q0 \n" "bne 2b \n" "3: \n" "vshrn.u32 d16, q8, #16 \n" "vst1.u16 {d16}, [%0 :64]! \n" : "=r"(outptr0), // %0 "=r"(tmpptr), // %1 "=r"(kptr) // %2 : "0"(outptr0), "1"(tmpptr), "2"(kptr), "r"(bias0), // %6 "r"(inch) // %7 : "cc", "memory", "r4", "q0", "q4", "q5", "q6", "q7", "q8"); #endif // __aarch64__ #else float sum0 = bias0; float sum1 = bias0; float sum2 = bias0; float sum3 = bias0; for (int q = 0; q < inch; q++) { sum0 += bfloat16_to_float32(tmpptr[0]) * bfloat16_to_float32(kptr[0]); sum1 += bfloat16_to_float32(tmpptr[1]) * bfloat16_to_float32(kptr[0]); sum2 += bfloat16_to_float32(tmpptr[2]) * bfloat16_to_float32(kptr[0]); sum3 += bfloat16_to_float32(tmpptr[3]) * bfloat16_to_float32(kptr[0]); tmpptr += 4; kptr++; } outptr0[0] = float32_to_bfloat16(sum0); outptr0[1] = float32_to_bfloat16(sum1); outptr0[2] = float32_to_bfloat16(sum2); outptr0[3] = float32_to_bfloat16(sum3); outptr0 += 4; #endif // __ARM_NEON } for (; i < size; i++) { const unsigned short* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + i % 4); #if __ARM_NEON && __aarch64__ const unsigned short* kptr = kernel.channel(p / 8 + (p % 8) / 4 + p % 4); #else const unsigned short* kptr = kernel.channel(p / 4 + p % 4); #endif // __ARM_NEON && __aarch64__ int q = 0; #if __ARM_NEON float32x4_t _sum0 = vdupq_n_f32(0.f); for (; q + 3 < inch; q += 4) { float32x4_t _p0 = vcvt_f32_bf16(vld1_u16(tmpptr)); tmpptr += 4; float32x4_t _k0 = vcvt_f32_bf16(vld1_u16(kptr)); kptr += 4; #if __aarch64__ _sum0 = vfmaq_f32(_sum0, _p0, _k0); #else _sum0 = vmlaq_f32(_sum0, _p0, _k0); #endif } #if __aarch64__ float sum0 = bias0 + vaddvq_f32(_sum0); #else float32x2_t _ss = vadd_f32(vget_low_f32(_sum0), vget_high_f32(_sum0)); float sum0 = bias0 + vget_lane_f32(vpadd_f32(_ss, _ss), 0); #endif #else float sum0 = bias0; #endif // __ARM_NEON for (; q < inch; q++) { sum0 += bfloat16_to_float32(tmpptr[0]) * bfloat16_to_float32(kptr[0]); tmpptr++; kptr++; } outptr0[0] = float32_to_bfloat16(sum0); outptr0++; } } // // NOTE sgemm // for (; p<outch; p++) // { // Mat out0 = top_blob.channel(p); // // const float bias0 = bias ? bias[p] : 0.f; // // float* outptr0 = out0; // // for (int i=0; i<size; i++) // { // float sum = bias0; // // const float* kptr = _kernel.channel(p/8 + p%8); // // for (int q=0; q<inch; q++) // { // const float* img0 = bottom_blob.channel(q); // // sum += img0[i] * kptr[0]; // kptr ++; // } // // outptr0[i] = sum; // } // } }
simple.c
/* * Simple program for openmp without init constructor. This tests if * the runtime library creates threads on its own. * * Copyright (c) 2019, Rice University. * See the file LICENSE for details. * * Mark W. Krentel * September 2019 */ #include <sys/types.h> #include <dlfcn.h> #include <err.h> #include <errno.h> #include <signal.h> #include <stdio.h> #include <stdlib.h> #include <omp.h> #define LIBM "libm.so.6" typedef double sin_fcn_t (double); static sin_fcn_t * sin_fcn = NULL; void gotcha_tool_init(void); //---------------------------------------------------------------------- double reduce(double A[], int N) { double ans; int i; ans = 0.0; #pragma omp parallel for default(none) private(i) \ shared(A, N, sin_fcn) reduction(+ : ans) for (i = 0; i < N; i++) { ans += (* sin_fcn)(A[i]); } return ans; } int main(int argc, char **argv) { printf("main: first entry\n"); gotcha_tool_init(); int i, j, N; if (argc < 2 || sscanf(argv[1], "%d", &N) < 1) { N = 1000; } printf("main: N = %d\n", N); double * A = (double *) malloc(N * sizeof(double)); if (A == NULL) { err(1, "malloc array failed"); } printf("main: calling dlopen() and sigprocmask() ...\n"); void * handle = dlopen(LIBM, RTLD_LAZY); if (handle == NULL) { err(1, "unable to dlopen(libm)"); } sin_fcn = dlsym(handle, "sin"); if (sin_fcn == NULL) { err(1, "unable to dlsym(sin)"); } #pragma omp parallel { sigset_t * set = (sigset_t *) malloc(sizeof(sigset_t)); sigemptyset(set); int ret = sigprocmask(SIG_BLOCK, set, NULL); if (ret != 0) { warn("sigprocmask() failed"); } void * handle = dlopen(LIBM, RTLD_LAZY); if (handle == NULL) { warn("dlopen() failed"); } } double ans = 0.0; for (j = 0; j < N; j++) { #pragma omp parallel for default(none) private(i) shared(A, N) for (i = 0; i < N; i++) { A[i] = (double) i; } ans = reduce(A, N); } printf("main: ans = %g\n", ans); return 0; }
GB_unop__floor_fc32_fc32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__floor_fc32_fc32 // op(A') function: GB_unop_tran__floor_fc32_fc32 // C type: GxB_FC32_t // A type: GxB_FC32_t // cast: GxB_FC32_t cij = aij // unaryop: cij = GB_cfloorf (aij) #define GB_ATYPE \ GxB_FC32_t #define GB_CTYPE \ GxB_FC32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_cfloorf (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC32_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC32_t z = aij ; \ Cx [pC] = GB_cfloorf (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_FLOOR || GxB_NO_FC32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__floor_fc32_fc32 ( GxB_FC32_t *Cx, // Cx and Ax may be aliased const GxB_FC32_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = aij ; Cx [p] = GB_cfloorf (z) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__floor_fc32_fc32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
OnDiscMSExperiment.h
// -------------------------------------------------------------------------- // OpenMS -- Open-Source Mass Spectrometry // -------------------------------------------------------------------------- // Copyright The OpenMS Team -- Eberhard Karls University Tuebingen, // ETH Zurich, and Freie Universitaet Berlin 2002-2020. // // This software is released under a three-clause BSD license: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of any author or any participating institution // may be used to endorse or promote products derived from this software // without specific prior written permission. // For a full list of authors, refer to the file AUTHORS. // -------------------------------------------------------------------------- // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL ANY OF THE AUTHORS OR THE CONTRIBUTING // INSTITUTIONS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; // OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR // OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF // ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // -------------------------------------------------------------------------- // $Maintainer: Hannes Roest $ // $Authors: Hannes Roest $ // -------------------------------------------------------------------------- #pragma once #include <OpenMS/INTERFACES/DataStructures.h> #include <OpenMS/KERNEL/MSExperiment.h> #include <OpenMS/KERNEL/MSSpectrum.h> #include <OpenMS/KERNEL/MSChromatogram.h> #include <OpenMS/METADATA/ExperimentalSettings.h> #include <OpenMS/FORMAT/HANDLERS/IndexedMzMLHandler.h> #include <vector> #include <algorithm> #include <limits> #include <boost/shared_ptr.hpp> namespace OpenMS { /** @brief Representation of a mass spectrometry experiment on disk. @ingroup Kernel @note This implementation is @a not thread-safe since it keeps internally a single file access pointer which it moves when accessing a specific data item. Please provide a separate copy to each thread, e.g. @code #pragma omp parallel for firstprivate(ondisc_map) @endcode */ class OPENMS_DLLAPI OnDiscMSExperiment { typedef ChromatogramPeak ChromatogramPeakT; typedef Peak1D PeakT; public: /** @brief Constructor This initializes the object, use openFile to open a file. */ OnDiscMSExperiment() {} /** @brief Open a specific file on disk. This tries to read the indexed mzML by parsing the index and then reading the meta information into memory. @return Whether the parsing of the file was successful (if false, the file most likely was not an indexed mzML file) */ bool openFile(const String& filename, bool skipMetaData = false) { filename_ = filename; indexed_mzml_file_.openFile(filename); if (filename != "" && !skipMetaData) { loadMetaData_(filename); } return indexed_mzml_file_.getParsingSuccess(); } /// Copy constructor OnDiscMSExperiment(const OnDiscMSExperiment& source) : filename_(source.filename_), indexed_mzml_file_(source.indexed_mzml_file_), meta_ms_experiment_(source.meta_ms_experiment_) { } /** @brief Equality operator This only checks whether the underlying file is the same and the parsed meta-information is the same. Note that the file reader (e.g. the std::ifstream of the file) might be in a different state. */ bool operator==(const OnDiscMSExperiment& rhs) const { if (meta_ms_experiment_ == nullptr || rhs.meta_ms_experiment_ == nullptr) { return filename_ == rhs.filename_ && meta_ms_experiment_ == rhs.meta_ms_experiment_; } // check if file and meta information is the same return filename_ == rhs.filename_ && (*meta_ms_experiment_) == (*rhs.meta_ms_experiment_); // do not check if indexed_mzml_file_ is equal -> they have the same filename... } /// Inequality operator bool operator!=(const OnDiscMSExperiment& rhs) const { return !(operator==(rhs)); } /** @brief Checks if all spectra are sorted with respect to ascending RT Note that we cannot check whether all spectra are sorted (except if we were to load them all and check). */ bool isSortedByRT() const { if (!meta_ms_experiment_) return false; return meta_ms_experiment_->isSorted(false); } /// alias for getNrSpectra inline Size size() const { return getNrSpectra(); } /// returns whether spectra are empty inline bool empty() const { return indexed_mzml_file_.getNrSpectra() == 0; } /// get the total number of spectra available inline Size getNrSpectra() const { return indexed_mzml_file_.getNrSpectra(); } /// get the total number of chromatograms available inline Size getNrChromatograms() const { return indexed_mzml_file_.getNrChromatograms(); } /// returns the meta information of this experiment (const access) boost::shared_ptr<const ExperimentalSettings> getExperimentalSettings() const { return boost::static_pointer_cast<const ExperimentalSettings>(meta_ms_experiment_); } boost::shared_ptr<PeakMap> getMetaData() const { return meta_ms_experiment_; } /// alias for getSpectrum inline MSSpectrum operator[](Size n) { return getSpectrum(n); } /** @brief returns a single spectrum @param id The index of the spectrum */ MSSpectrum getSpectrum(Size id) { if (!meta_ms_experiment_) return indexed_mzml_file_.getMSSpectrumById(int(id)); MSSpectrum spectrum(meta_ms_experiment_->operator[](id)); indexed_mzml_file_.getMSSpectrumById(int(id), spectrum); return spectrum; } /** @brief returns a single spectrum */ OpenMS::Interfaces::SpectrumPtr getSpectrumById(Size id) { return indexed_mzml_file_.getSpectrumById(id); } /** @brief returns a single chromatogram @param id The index of the chromatogram */ MSChromatogram getChromatogram(Size id) { if (!meta_ms_experiment_) return indexed_mzml_file_.getMSChromatogramById(int(id)); MSChromatogram chromatogram(meta_ms_experiment_->getChromatogram(id)); indexed_mzml_file_.getMSChromatogramById(int(id), chromatogram); return chromatogram; } /** @brief returns a single chromatogram @param id The native identifier of the chromatogram */ MSChromatogram getChromatogramByNativeId(const std::string& id); /** @brief returns a single spectrum @param id The native identifier of the spectrum */ MSSpectrum getSpectrumByNativeId(const std::string& id); /** @brief returns a single chromatogram */ OpenMS::Interfaces::ChromatogramPtr getChromatogramById(Size id) { return indexed_mzml_file_.getChromatogramById(id); } /// sets whether to skip some XML checks and be fast instead void setSkipXMLChecks(bool skip) { indexed_mzml_file_.setSkipXMLChecks(skip); } private: /// Private Assignment operator -> we cannot copy file streams in IndexedMzMLHandler OnDiscMSExperiment& operator=(const OnDiscMSExperiment& /* source */); void loadMetaData_(const String& filename); MSChromatogram getMetaChromatogramById_(const std::string& id); MSSpectrum getMetaSpectrumById_(const std::string& id); protected: /// The filename of the underlying data file String filename_; /// The index of the underlying data file Internal::IndexedMzMLHandler indexed_mzml_file_; /// The meta-data boost::shared_ptr<PeakMap> meta_ms_experiment_; /// Mapping of chromatogram native ids to offsets std::unordered_map< std::string, Size > chromatograms_native_ids_; /// Mapping of spectra native ids to offsets std::unordered_map< std::string, Size > spectra_native_ids_; }; typedef OpenMS::OnDiscMSExperiment OnDiscPeakMap; } // namespace OpenMS
beta_projectors_gradient.h
/* * Beta_projectors_gradient.h * * Created on: Oct 14, 2016 * Author: isivkov */ #ifndef SRC_BETA_PROJECTORS_BETA_PROJECTORS_GRADIENT_H_ #define SRC_BETA_PROJECTORS_BETA_PROJECTORS_GRADIENT_H_ #include "beta_projectors.h" namespace sirius { /// Stores gradient components of beta over atomic positions d <G+k | Beta > / d Rn class Beta_projectors_gradient { protected: /// local array of gradient components. dimensions: 0 - gk, 1-orbitals std::array<matrix<double_complex>, 3> components_gk_a_; /// the same but for one chunk std::array<matrix<double_complex>, 3> chunk_comp_gk_a_; /// the same but for one chunk on gpu std::array<matrix<double_complex>, 3> chunk_comp_gk_a_gpu_; /// inner product store std::array<mdarray<double, 1>, 3> beta_phi_; Beta_projectors *bp_; public: Beta_projectors_gradient(Beta_projectors* bp) : bp_(bp) { for(int comp: {0,1,2}) { components_gk_a_[comp] = matrix<double_complex>( bp_->beta_gk_a().size(0), bp_->beta_gk_a().size(1) ); calc_gradient(comp); } // on GPU we create arrays without allocation, it will before use #ifdef __GPU for(int comp: {0,1,2}) { chunk_comp_gk_a_gpu_[comp] = matrix<double_complex>( bp_->num_gkvec_loc() , bp_->max_num_beta() , memory_t::none); } #endif } void calc_gradient(int calc_component__) { Gvec const& gkvec = bp_->gk_vectors(); matrix<double_complex> const& beta_comps = bp_->beta_gk_a(); double_complex Im(0, 1); #pragma omp parallel for for (size_t ibf = 0; ibf < bp_->beta_gk_a().size(1); ibf++) { for (int igk_loc = 0; igk_loc < bp_->num_gkvec_loc(); igk_loc++) { int igk = gkvec.gvec_offset(bp_->comm().rank()) + igk_loc; double gkvec_comp = gkvec.gkvec_cart(igk)[calc_component__]; components_gk_a_[calc_component__](igk_loc, ibf) = - Im * gkvec_comp * beta_comps(igk_loc,ibf); } } } void generate(int chunk__, int calc_component__) { if (bp_->proc_unit() == CPU) { chunk_comp_gk_a_[calc_component__] = mdarray<double_complex, 2>(&components_gk_a_[calc_component__](0, bp_->beta_chunk(chunk__).offset_), bp_->num_gkvec_loc(), bp_->beta_chunk(chunk__).num_beta_); } #ifdef __GPU if (bp_->proc_unit() == GPU) { chunk_comp_gk_a_[calc_component__] = mdarray<double_complex, 2>(&components_gk_a_[calc_component__](0, bp_->beta_chunk(chunk__).offset_), chunk_comp_gk_a_gpu_[calc_component__].at<GPU>(), bp_->num_gkvec_loc(), bp_->beta_chunk(chunk__).num_beta_); chunk_comp_gk_a_[calc_component__].copy_to_device(); } #endif } void generate(int chunk__) { for(int comp: {0, 1, 2}) { generate(chunk__, comp); } } /// Calculates inner product <beta_grad | Psi>. template <typename T> void inner(int chunk__, wave_functions& phi__, int idx0__, int n__, int calc_component__) { bp_->inner<T>(chunk__, phi__, idx0__, n__, chunk_comp_gk_a_[calc_component__], beta_phi_[calc_component__]); } //void inner(int chunk__, wave_functions& phi__, int idx0__, int n__, mdarray<double_complex, 2> &beta_gk, mdarray<double, 1> &beta_phi); template <typename T> void inner(int chunk__, wave_functions& phi__, int idx0__, int n__) { for(int comp: {0,1,2}) inner<T>(chunk__, phi__, idx0__, n__, comp); } template <typename T> matrix<T> beta_phi(int chunk__, int n__, int calc_component__) { int nbeta = bp_->beta_chunk(chunk__).num_beta_; if (bp_->proc_unit() == GPU) { return std::move(matrix<T>(reinterpret_cast<T*>(beta_phi_[calc_component__].at<CPU>()), reinterpret_cast<T*>(beta_phi_[calc_component__].at<GPU>()), nbeta, n__)); } else { return std::move(matrix<T>(reinterpret_cast<T*>(beta_phi_[calc_component__].at<CPU>()), nbeta, n__)); } } template <typename T> std::array<matrix<T>,3> beta_phi(int chunk__, int n__) { std::array<matrix<T>,3> chunk_beta_phi; for(int comp: {0,1,2}) chunk_beta_phi[comp] = beta_phi<T>(chunk__, n__, comp); return std::move(chunk_beta_phi); } void prepare() { #ifdef __GPU if (bp_->proc_unit() == GPU) { for(int comp: {0,1,2}) { chunk_comp_gk_a_gpu_[comp].allocate(memory_t::device); beta_phi_[comp].allocate(memory_t::device); } } #endif } void dismiss() { #ifdef __GPU if (bp_->proc_unit() == GPU) { for(int comp: {0,1,2}) { chunk_comp_gk_a_gpu_[comp].deallocate_on_device(); beta_phi_[comp].deallocate_on_device(); } } #endif } }; } #endif /* SRC_BETA_PROJECTORS_BETA_PROJECTORS_GRADIENT_H_ */
ab-totient-omp-1.c
// Distributed and parallel technologies, Andrew Beveridge, 03/03/2014 // To Compile: gcc -Wall -O -o ab-totient-omp -fopenmp ab-totient-omp.c // To Run / Time: /usr/bin/time -v ./ab-totient-omp range_start range_end #include <stdio.h> #include <omp.h> /* When input is a prime number, the totient is simply the prime number - 1. Totient is always even (except for 1). If n is a positive integer, then φ(n) is the number of integers k in the range 1 ≤ k ≤ n for which gcd(n, k) = 1 */ long getTotient (long number) { long result = number; // Check every prime number below the square root for divisibility if(number % 2 == 0){ result -= result / 2; do number /= 2; while(number %2 == 0); } // Primitive replacement for a list of primes, looping through every odd number long prime; for(prime = 3; prime * prime <= number; prime += 2){ if(number %prime == 0){ result -= result / prime; do number /= prime; while(number % prime == 0); } } // Last common factor if(number > 1) result -= result / number; // Return the result. return result; } // Main method. int main(int argc, char ** argv) { // Load inputs long lower, upper; sscanf(argv[1], "%ld", &lower); sscanf(argv[2], "%ld", &upper); int i; long result = 0.0; // We know the answer if it's 1; no need to execute the function if(lower == 1) { result = 1.0; lower = 2; } #pragma omp parallel for default(shared) private(i) schedule(auto) reduction(+:result) num_threads(1) // Sum all totients in the specified range for (i = lower; i <= upper; i++) { result = result + getTotient(i); } // Print the result printf("Sum of Totients between [%ld..%ld] is %ld \n", lower, upper, result); // A-OK! return 0; }
mandel_reduction.c
/* ** PROGRAM: Mandelbrot area (solution) ** ** PURPOSE: Program to compute the area of a Mandelbrot set. ** The correct answer should be around 1.510659. ** ** USAGE: Program runs without input ... just run the executable ** ** ADDITIONAL EXERCISES: Experiment with the schedule clause to fix ** the load imbalance. Experiment with atomic vs. critical vs. ** reduction for numoutside. ** ** HISTORY: Written: (Mark Bull, August 2011). ** ** Changed "comples" to "d_comples" to avoid collsion with ** math.h complex type. Fixed data environment errors ** (Tim Mattson, September 2011) ** ** Implememted a "reduction" version ** (Helen He, November 2020) */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <omp.h> # define NPOINTS 1000 # define MXITR 1000 struct d_complex { double r; double i; }; int testpoint(struct d_complex); struct d_complex c; int numoutside = 0; int main () { int i, j; double area, error, eps = 1.0e-5; // Loop over grid of points in the complex plane which contains the Mandelbrot set, // testing each point to see whether it is inside or outside the set. omp_set_num_threads(8); #pragma omp parallel for private(c,j) firstprivate(eps) reduction(+:numoutside) for (i = 0; i < NPOINTS; i++) { for (j = 0; j < NPOINTS; j++) { c.r = -2.0 + 2.5 * (double)(i)/(double)(NPOINTS) + eps; c.i = 1.125 * (double)(j)/(double)(NPOINTS) + eps; numoutside += testpoint(c); } } // Calculate area of set and error estimate and output the results area = 2.0 * 2.5 * 1.125 * (double)(NPOINTS * NPOINTS \ - numoutside)/(double)(NPOINTS * NPOINTS); error = area / (double)NPOINTS; printf("Area of Mandlebrot set = %12.8f +/- %12.8f\n",area,error); printf("Correct answer should be around 1.510659\n"); } int testpoint(struct d_complex c) { // Does the iteration z=z*z+c, until |z| > 2 when point is known to be outside set // If loop count reaches MAXITER, point is considered to be inside the set struct d_complex z; int iter; double temp; int outside = 0; z = c; for (iter = 0; iter < MXITR; iter++) { temp = (z.r * z.r) - (z.i * z.i) + c.r; z.i = z.r * z.i * 2 + c.i; z.r = temp; if ((z.r * z.r + z.i * z.i) > 4.0) { outside++; break; } } return outside; }
core_sttmlq.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/core_blas/core_zttmlq.c, normal z -> s, Fri Sep 28 17:38:25 2018 * **/ #include <plasma_core_blas.h> #include "plasma_types.h" #include "plasma_internal.h" #include "core_lapack.h" #include <omp.h> /***************************************************************************//** * * @ingroup core_ttmlq * * Overwrites the general complex m1-by-n1 tile A1 and * m2-by-n2 tile A2 with * * side = PlasmaLeft side = PlasmaRight * trans = PlasmaNoTrans Q * | A1 | | A1 A2 | * Q * | A2 | * * trans = PlasmaTrans Q^T * | A1 | | A1 A2 | * Q^T * | A2 | * * where Q is a complex orthogonal matrix defined as the product of k * elementary reflectors * * Q = H(k)^T . . . H(2)^T H(1)^T * * as returned by plasma_core_sttlqt. * ******************************************************************************* * * @param[in] side * - PlasmaLeft : apply Q or Q^T from the Left; * - PlasmaRight : apply Q or Q^T from the Right. * * @param[in] trans * - PlasmaNoTrans : Apply Q; * - PlasmaTrans : Apply Q^T. * * @param[in] m1 * The number of rows of the tile A1. m1 >= 0. * * @param[in] n1 * The number of columns of the tile A1. n1 >= 0. * * @param[in] m2 * The number of rows of the tile A2. m2 >= 0. * * @param[in] n2 * The number of columns of the tile A2. n2 >= 0. * * @param[in] k * The number of elementary reflectors whose product defines * the matrix Q. * * @param[in] ib * The inner-blocking size. ib >= 0. * * @param[in,out] A1 * On entry, the m1-by-n1 tile A1. * On exit, A1 is overwritten by the application of Q. * * @param[in] lda1 * The leading dimension of the array A1. lda1 >= max(1,m1). * * @param[in,out] A2 * On entry, the m2-by-n2 tile A2. * On exit, A2 is overwritten by the application of Q. * * @param[in] lda2 * The leading dimension of the tile A2. lda2 >= max(1,m2). * * @param[in] V * The i-th row must contain the vector which defines the * elementary reflector H(i), for i = 1,2,...,k, as returned by * plasma_core_sttlqt in the first k rows of its array argument V. * * @param[in] ldv * The leading dimension of the array V. ldv >= max(1,k). * * @param[out] T * The ib-by-k triangular factor T of the block reflector. * T is upper triangular by block (economic storage); * The rest of the array is not referenced. * * @param[in] ldt * The leading dimension of the array T. ldt >= ib. * * @param work * Auxiliary workspace array of length * ldwork-by-m1 if side == PlasmaLeft * ldwork-by-ib if side == PlasmaRight * * @param[in] ldwork * The leading dimension of the array work. * ldwork >= max(1,ib) if side == PlasmaLeft * ldwork >= max(1,n1) if side == PlasmaRight * ******************************************************************************* * * @retval PlasmaSuccess successful exit * @retval < 0 if -i, the i-th argument had an illegal value * ******************************************************************************/ __attribute__((weak)) int plasma_core_sttmlq(plasma_enum_t side, plasma_enum_t trans, int m1, int n1, int m2, int n2, int k, int ib, float *A1, int lda1, float *A2, int lda2, const float *V, int ldv, const float *T, int ldt, float *work, int ldwork) { // Check input arguments. if (side != PlasmaLeft && side != PlasmaRight) { plasma_coreblas_error("illegal value of side"); return -1; } if (trans != PlasmaNoTrans && trans != PlasmaTrans) { plasma_coreblas_error("illegal value of trans"); return -2; } if (m1 < 0) { plasma_coreblas_error("illegal value of m1"); return -3; } if (n1 < 0) { plasma_coreblas_error("illegal value of n1"); return -4; } if (m2 < 0 || (m2 != m1 && side == PlasmaRight)) { plasma_coreblas_error("illegal value of m2"); return -5; } if (n2 < 0 || (n2 != n1 && side == PlasmaLeft)) { plasma_coreblas_error("illegal value of n2"); return -6; } if (k < 0 || (side == PlasmaLeft && k > m1 ) || (side == PlasmaRight && k > n1)) { plasma_coreblas_error("illegal value of k"); return -7; } if (ib < 0) { plasma_coreblas_error("illegal value of ib"); return -8; } if (A1 == NULL) { plasma_coreblas_error("NULL A1"); return -9; } if (lda1 < imax(1, m1)) { plasma_coreblas_error("illegal value of lda1"); return -10; } if (A2 == NULL) { plasma_coreblas_error("NULL A2"); return -11; } if (lda2 < imax(1, m2)) { plasma_coreblas_error("illegal value of lda2"); return -12; } if (V == NULL) { plasma_coreblas_error("NULL V"); return -13; } if (ldv < imax(1, k)) { plasma_coreblas_error("illegal value of ldv"); return -14; } if (T == NULL) { plasma_coreblas_error("NULL T"); return -15; } if (ldt < imax(1, ib)) { plasma_coreblas_error("illegal value of ldt"); return -16; } if (work == NULL) { plasma_coreblas_error("NULL work"); return -17; } if (ldwork < imax(1, side == PlasmaLeft ? ib : n1)) { plasma_coreblas_error("illegal value of ldwork"); return -18; } // quick return if (m1 == 0 || n1 == 0 || m2 == 0 || n2 == 0 || k == 0 || ib == 0) return PlasmaSuccess; int i1, i3; if (((side == PlasmaLeft) && (trans == PlasmaNoTrans)) || ((side == PlasmaRight) && (trans != PlasmaNoTrans))) { i1 = 0; i3 = ib; } else { i1 = ((k-1)/ib)*ib; i3 = -ib; } if (trans == PlasmaNoTrans) trans = PlasmaTrans; else trans = PlasmaNoTrans; for (int i = i1; (i > -1) && (i < k); i += i3) { int kb = imin(ib, k-i); int ic = 0; int jc = 0; int mi = m1; int mi2 = m2; int ni = n1; int ni2 = n2; int l; if (side == PlasmaLeft) { mi = kb; // m1 - i; mi2 = imin(i+kb, m2); l = imin(kb, imax(0, m2-i)); ic = i; } else { ni = kb; ni2 = imin(i+kb, n2); l = imin(kb, imax(0, n2-i)); jc = i; } // Apply H or H^T. plasma_core_sparfb( side, trans, PlasmaForward, PlasmaRowwise, mi, ni, mi2, ni2, kb, l, &A1[lda1*jc+ic], lda1, A2, lda2, &V[i], ldv, &T[ldt*i], ldt, work, ldwork); } return PlasmaSuccess; } /******************************************************************************/ void plasma_core_omp_sttmlq(plasma_enum_t side, plasma_enum_t trans, int m1, int n1, int m2, int n2, int k, int ib, float *A1, int lda1, float *A2, int lda2, const float *V, int ldv, const float *T, int ldt, plasma_workspace_t work, plasma_sequence_t *sequence, plasma_request_t *request) { #pragma omp task depend(inout:A1[0:lda1*n1]) \ depend(inout:A2[0:lda2*n2]) \ depend(in:V[0:ldv*n2]) \ depend(in:T[0:ib*k]) { if (sequence->status == PlasmaSuccess) { // Prepare workspaces. int tid = omp_get_thread_num(); float *W = (float*)work.spaces[tid]; int ldwork = side == PlasmaLeft ? ib : n1; // TODO: float check // Call the kernel. int info = plasma_core_sttmlq(side, trans, m1, n1, m2, n2, k, ib, A1, lda1, A2, lda2, V, ldv, T, ldt, W, ldwork); if (info != PlasmaSuccess) { plasma_error("core_sttmlq() failed"); plasma_request_fail(sequence, request, PlasmaErrorInternal); } } } }
vt_otf_trc.c
/** * VampirTrace * http://www.tu-dresden.de/zih/vampirtrace * * Copyright (c) 2005-2008, ZIH, TU Dresden, Federal Republic of Germany * * Copyright (c) 1998-2005, Forschungszentrum Juelich, Juelich Supercomputing * Centre, Federal Republic of Germany * * See the file COPYING in the package base directory for details **/ #include "config.h" #include "vt_thrd.h" #include "vt_trc.h" #include "vt_otf_gen.h" #include "vt_env.h" #include "vt_iowrap.h" #include "vt_mpireg.h" #include "vt_omplock.h" #include "vt_memhook.h" #include "vt_memreg.h" #include "vt_metric.h" #include "vt_pform.h" #include "vt_error.h" #include "util/installdirs.h" #include <limits.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <unistd.h> #include "otf.h" #if (defined (VT_MPI) || defined (VT_OMPI)) #include "mpi.h" #include "vt_sync.h" #endif #if (defined (VT_OMPI) || defined (VT_OMP)) #include <omp.h> #define VT_MY_THREAD omp_get_thread_num() #define VT_NUM_THREADS omp_get_num_threads() extern void POMP_Init(void); #else #define VT_MY_THREAD 0 #define VT_NUM_THREADS 1 #endif /* *----------------------------------------------------------------------------- * Two simple hash tables 1: maps region groups to region group identifier * 2: maps file name to file identifier *----------------------------------------------------------------------------- */ #define HASH_TAB__RDESC 0 #define HASH_TAB__FILE 1 #define HASH_MAX 1021 typedef struct HN_rdesc { const char* rdesc; /* region group name */ uint32_t rdid; /* associated region group identifier */ struct HN_rdesc* next; } HashNode_rdesc; typedef struct HN_file { const char* fname; /* file name */ uint32_t fid; /* associated file identifier */ struct HN_file* next; } HashNode_file; static HashNode_rdesc* htab_rdesc[HASH_MAX]; static HashNode_file* htab_file[HASH_MAX]; /* * Generates hash code for region group name 'n' * Returs hash code */ static long hash_get_key(const char* n) { long h = 0; const char* p = n; while( *p ) h = h<<1 ^ *p++; if( h < 0 ) h = -h; return h; } static void hash_put(int t, const char* n, int i) { long id = hash_get_key(n) % HASH_MAX; if(t==HASH_TAB__RDESC) { HashNode_rdesc *add = (HashNode_rdesc*)malloc(sizeof(HashNode_rdesc)); add->rdesc = strdup(n); add->rdid = i; add->next = htab_rdesc[id]; htab_rdesc[id] = add; } else if(t==HASH_TAB__FILE) { HashNode_file *add = (HashNode_file*)malloc(sizeof(HashNode_file)); add->fname = strdup(n); add->fid = i; add->next = htab_file[id]; htab_file[id] = add; } } static void* hash_get(int t, const char* n) { long id = hash_get_key(n) % HASH_MAX; if(t==HASH_TAB__RDESC) { HashNode_rdesc *curr = htab_rdesc[id]; while ( curr ) { if ( strcmp( curr->rdesc, n ) == 0 ) { return curr; } curr = curr->next; } } else if(t==HASH_TAB__FILE) { HashNode_file *curr = htab_file[id]; while ( curr ) { if ( strcmp( curr->fname, n ) == 0 ) { return curr; } curr = curr->next; } } return NULL; } /* compiler adapter finalizer */ void (*vt_comp_finalize)(void) = NULL; /* vector of the thread objects */ static VTThrd** thrdv; #if (defined (VT_MPI) || defined (VT_OMPI)) static long my_node; static int num_nodes = 1; #endif static int my_trace = 0; static int num_traces = 1; static int init_pid = -1; static int64_t my_ltime[2] = { 0, 1 }; static int64_t my_offset[2] = { 0, 0 }; int vt_trc_regid[VT__REGID_NUM]; uint8_t vt_is_alive = 0; #if (defined (VT_MEMHOOK)) /* id of memory usage counter */ static uint32_t mem_app_alloc_cid; #endif #if (defined (VT_METR)) /* number of performance metrics */ static int num_metrics = 0; #endif static uint8_t vt_open_called = 0; static uint8_t vt_close_called = 0; /* id counter starting with 1 */ static uint32_t curid = 1; static uint8_t thrdgrp_created = 0; static uint32_t thrdgrp_cid = 10000; static uint32_t ompcollop_id = 0; static uint32_t vt_def_file_loc(uint32_t fid, uint32_t begln, uint32_t endln); static uint32_t vt_def_region_desc(const char* rdesc); static uint32_t vt_def_omp_comm(void); static void vt_write_uctl_file(void); static void vt_cpy_to_gdir(uint32_t tid); #if (defined (RFG)) static void vt_write_filt_file(void); #endif void vt_check_thrd_id(uint32_t tid); void vt_open() { int i_am_the_initer = 0; /* check for double initialization error */ #if (defined (VT_OMPI) || defined (VT_OMP)) # pragma omp critical { #endif if ( !vt_open_called ) { vt_open_called = 1; i_am_the_initer = 1; } #if (defined (VT_OMPI) || defined (VT_OMP)) } #endif if (vt_open_called && !i_am_the_initer) return; /* initialization specific to this platform */ vt_pform_init(); #if (defined (VT_METR)) /* initialize hardware counters */ num_metrics = vt_metric_open(); #if (defined (VT_OMPI) || defined (VT_OMP)) /* initialize thread support */ vt_metric_thread_init(omp_get_thread_num); #endif #endif /* trace file creation */ thrdv = (VTThrd**)calloc(vt_env_max_threads(), sizeof(VTThrd*)); if ( thrdv == NULL ) vt_error(); #if (defined (VT_OMPI) || defined (VT_OMP)) #pragma omp parallel { #endif thrdv[VT_MY_THREAD] = VTThrd_create(VT_MY_THREAD); VTThrd_open(thrdv[VT_MY_THREAD], VT_MY_THREAD); #if (defined (VT_OMPI) || defined (VT_OMP)) } #endif #if (defined (RFG)) { char* filter_deffile = vt_env_filter_spec(); char* groups_deffile = vt_env_groups_spec(); /* set default group name */ RFG_Regions_setDefaultGroup( thrdv[0]->rfg_regions, VT_DEF_GROUP ); if( filter_deffile ) { RFG_Regions_setFilterDefFile( thrdv[0]->rfg_regions, filter_deffile ); if( !RFG_Regions_readFilterDefFile( thrdv[0]->rfg_regions ) ) vt_error_msg("Could not read region filter specification file "); } if( groups_deffile ) { RFG_Regions_setGroupsDefFile( thrdv[0]->rfg_regions, groups_deffile ); if( !RFG_Regions_readGroupsDefFile( thrdv[0]->rfg_regions ) ) vt_error_msg("Could not read region group specification file "); } } #endif /* register function "user" */ vt_trc_regid[VT__USER] = vt_def_region("user", VT_NO_ID, VT_NO_LNO, VT_NO_LNO, VT_DEF_GROUP, VT_FUNCTION); /* register function "sync" */ vt_trc_regid[VT__SYNC] = vt_def_region("sync", VT_NO_ID, VT_NO_LNO, VT_NO_LNO, "VT_API", VT_FUNCTION); /* register function "flush" */ vt_trc_regid[VT__FLUSH] = vt_def_region("flush", VT_NO_ID, VT_NO_LNO, VT_NO_LNO, "VT_API", VT_FUNCTION); /* register function "stat" */ vt_trc_regid[VT__STAT] = vt_def_region("stat", VT_NO_ID, VT_NO_LNO, VT_NO_LNO, "VT_API", VT_FUNCTION); #if (defined (VT_OMPI) || defined(VT_OMP)) /* register function "PREG" */ vt_trc_regid[VT__PREG] = vt_def_region("parallel region", VT_NO_ID, VT_NO_LNO, VT_NO_LNO, "PREG", VT_FUNCTION); #endif #if (defined (VT_IOWRAP)) if (vt_env_iotrace()) { vt_iowrap_init(); VT_ENABLE_IO_TRACING(); } #endif #if (defined (VT_MEMHOOK)) /* write group name for memory counters */ if (vt_env_memtrace()) { uint32_t gid; /* GNU C MALLOC HOOKS ARE NOT THREAD-SAFE!!! */ # if (defined (VT_OMPI) || defined (VT_OMP)) vt_error_msg("Memory tracing by GNU C malloc-hooks for threaded application not yet supported"); # endif /* write counter group name */ gid = vt_def_counter_group("Memory"); /* initalize memory hooks */ vt_memhook_init(); /* register memory routines */ vt_mem_register(); /* write counter definition record for allocated memory */ mem_app_alloc_cid = vt_def_counter("MEM_APP_ALLOC", OTF_COUNTER_TYPE_ABS|OTF_COUNTER_SCOPE_NEXT, gid, "Bytes"); } #endif /* register MPI and OpenMP API routines if necessary */ #if (defined (VT_OMPI) || defined (VT_MPI)) vt_mpi_register(); #endif #if (defined (VT_OMPI) || defined (VT_OMP)) POMP_Init(); #endif atexit(vt_close); init_pid = getpid(); #if (defined (VT_METR)) { uint32_t gid; int i; /* return if no counters requested */ if ( num_metrics > 0 ) { /* write counter group name */ gid = vt_def_counter_group("Hardware"); /* write counter definition records */ for ( i = 0; i < num_metrics; i++ ) { VTGen_write_DEF_COUNTER(VTTHRD_GEN(thrdv[0]), i+1, vt_metric_name(i), OTF_COUNTER_TYPE_ACC, gid, "#"); } } } #endif vt_is_alive = 1; return; } void vt_close() { int i, nf; int i_am_the_closer = 0; /* catch vt_close called from child processes through atexit */ if ( init_pid != getpid() ) return; /* check for double finalization error */ #if (defined (VT_OMPI) || defined (VT_OMP)) # pragma omp critical { #endif if ( !vt_close_called ) { vt_close_called = 1; i_am_the_closer = 1; } #if (defined (VT_OMPI) || defined (VT_OMP)) } #endif if (vt_close_called && !i_am_the_closer) return; vt_is_alive = 0; #if (defined (VT_MEMHOOK)) /* finalize memory hooks if enabled */ if (vt_env_memtrace()) vt_memhook_finalize(); #endif #if (defined (VT_IOWRAP)) /* finalize I/O wrapper if enabled */ if (vt_env_iotrace()) { VT_DISABLE_IO_TRACING(); vt_iowrap_finalize(); } #endif /* finalize compiler adapter */ if (vt_comp_finalize) vt_comp_finalize(); /* close trace files */ for (i = 0; i < (int)VTThrd_get_num_thrds(); i++) VTThrd_close(thrdv[i]); /* call cleanup functions */ #if (defined (VT_OMPI) || defined (VT_OMP)) /* OpenMP locks */ vt_lock_close(); #endif /* hardware counters */ #if (defined (VT_METR)) if ( num_metrics > 0 ) vt_metric_close(); #endif /* copy per-process trace from local directory to global directory */ for(i=0; i<(int)VTThrd_get_num_thrds(); i++) vt_cpy_to_gdir(i); /* write unify control file */ vt_write_uctl_file(); #if (defined (RFG)) /* write list of regions whose call limit are reached */ vt_write_filt_file(); #endif /*- Rank 0: unify trace files -*/ if (my_trace == 0 && vt_env_do_unify()) { char* vtunify; char* filename; char* cmd; int len; vtunify = vt_installdirs_expand("${bindir}/vtunify"); if ( vtunify == NULL ) vt_error(); if ( access(vtunify, X_OK) == -1 ) vt_error_msg("Cannot execute %s", vtunify); len = strlen(vt_env_gdir()) + strlen(vt_env_fprefix()) + 32; filename = (char*)calloc(len, sizeof(char)); if ( filename == NULL ) vt_error(); /*- wait for files to be ready -*/ for (i = 0; i < num_traces; i++) { sprintf(filename, "%s/%s.%x.uctl", vt_env_gdir(), vt_env_fprefix(), i+1); vt_cntl_msg("Checking for %s ...", filename); nf = 0; while (access(filename, R_OK) != 0 ) { ++nf; /*- if file not ready in 15 sec give up -*/ if ( nf > 15 ) return; sleep(1); } } /*- do actual merge -*/ cmd = (char*)calloc(strlen(vtunify) + 16 + len, sizeof(char)); if ( cmd == NULL ) vt_error(); sprintf(cmd, "%s %d %s/%s %s %s %s %s", vtunify, num_traces, vt_env_gdir(), vt_env_fprefix(), vt_env_stat_show() ? "" : "-q", vt_env_compression() ? "" : "-c", vt_env_do_clean() ? "" : "-k", vt_env_is_verbose() ? "-v" : ""); vt_cntl_msg(cmd); system(cmd); free(vtunify); free(filename); free(cmd); } /* free temporary file names. This has to be done inside a parallel region because vt_metric_free() needs to be called by the thread itself. */ #if (defined (VT_OMPI) || defined (VT_OMP)) omp_set_num_threads(VTThrd_get_num_thrds()); #pragma omp parallel { #endif VTThrd_delete(thrdv[VT_MY_THREAD], VT_MY_THREAD); #if (defined (VT_OMPI) || defined (VT_OMP)) } #endif free(thrdv); } void vt_trace_on() { vt_check_thrd_id(VT_MY_THREAD); if ( vt_is_alive && VTTHRD_IS_TRACE_ON(thrdv[VT_MY_THREAD]) == 0 ) VTTHRD_IS_TRACE_ON(thrdv[VT_MY_THREAD]) = 1; } void vt_trace_off(uint8_t permanent) { vt_check_thrd_id(VT_MY_THREAD); if ( vt_is_alive && VTTHRD_IS_TRACE_ON(thrdv[VT_MY_THREAD]) != -1 ) { if ( permanent ) { uint64_t time; while(VTTHRD_STACK_LEVEL(thrdv[VT_MY_THREAD]) > 0) { time = vt_pform_wtime(); vt_exit(&time); } VTTHRD_IS_TRACE_ON(thrdv[VT_MY_THREAD]) = -1; } else { VTTHRD_IS_TRACE_ON(thrdv[VT_MY_THREAD]) = 0; } } } uint8_t vt_is_trace_on() { vt_check_thrd_id(VT_MY_THREAD); return ( vt_is_alive ) ? VTTHRD_IS_TRACE_ON(thrdv[VT_MY_THREAD]) : 0; } #if (defined (VT_MPI) || defined (VT_OMPI)) static int longcmp(const void *a, const void *b) { long x = *(long*)a; long y = *(long*)b; return x-y; } #endif void vt_mpi_init() { #if (defined (VT_MPI) || defined (VT_OMPI)) int i; PMPI_Comm_rank(MPI_COMM_WORLD, &my_trace); PMPI_Comm_size(MPI_COMM_WORLD, &num_traces); vt_error_pid(my_trace); /* notify all threads about trace id */ for(i=0; i<(int)VTThrd_get_num_thrds(); i++) VTGen_init_trc_id(VTTHRD_GEN(thrdv[i]), my_trace); /* 1. clock synchronization if necessary*/ if (num_traces > 1) { #if DISABLE_CLOCK_SYNC == 0 { uint64_t time; /* mark begin of clock synchronization */ time = vt_pform_wtime(); vt_enter(&time, vt_trc_regid[VT__SYNC]); /* measure offset */ my_offset[0] = vt_offset(&my_ltime[0], MPI_COMM_WORLD); /* mark end of clock synchronization */ time = vt_pform_wtime(); vt_exit(&time); } #endif } #endif atexit(vt_close); /* re-register to be called on exit before MPI's atexit */ } void vt_mpi_finalize() { #if (defined (VT_MPI) || defined (VT_OMPI)) long* nodeids = NULL; long lastid; int i; /* 2. clock synchronization if necessary */ if (num_traces > 1) { #if DISABLE_CLOCK_SYNC == 0 { uint64_t time; /* mark begin of clock synchronization */ time = vt_pform_wtime(); vt_enter(&time, vt_trc_regid[VT__SYNC]); /* measure offset */ my_offset[1] = vt_offset(&my_ltime[1], MPI_COMM_WORLD); /* mark end of clock synchronization */ time = vt_pform_wtime(); vt_exit(&time); } #endif } /* determine number of nodes */ if (my_trace == 0) { nodeids = (long*)malloc(num_traces * sizeof(long)); if ( nodeids == NULL ) vt_error(); } PMPI_Gather(&my_node, 1, MPI_LONG, nodeids, 1, MPI_LONG, 0, MPI_COMM_WORLD); if (my_trace == 0) { qsort(nodeids, num_traces, sizeof(long), longcmp); lastid=nodeids[0]; for (i=1; i<num_traces; ++i) { if ( nodeids[i] != lastid ) { lastid = nodeids[i]; num_nodes++; } } free(nodeids); } PMPI_Barrier(MPI_COMM_WORLD); #endif } void vt_init_trc_id(int my_id, int num_procs) { int i; my_trace = my_id; num_traces = num_procs; /* notify all threads about trace id */ for(i=0; i<(int)VTThrd_get_num_thrds(); i++) VTGen_init_trc_id(VTTHRD_GEN(thrdv[i]), my_trace); } /* This routine checks if the thread object for the current * thread ID has already been created or not. If not it creates * the corresponding thread object */ void vt_check_thrd_id(uint32_t tid) { #if (defined (VT_OMPI) || defined (VT_OMP)) if( !vt_is_alive ) return; if( VTThrd_get_num_thrds() < (tid+1) ) { vt_cntl_msg("Dynamic thread creation. Thread #%d\n", VT_MY_THREAD); thrdv[VT_MY_THREAD] = VTThrd_create(VT_MY_THREAD); VTThrd_open(thrdv[VT_MY_THREAD], VT_MY_THREAD); VTGen_init_trc_id(VTTHRD_GEN(thrdv[VT_MY_THREAD]), my_trace); } #endif } static void vt_cpy_to_gdir(uint32_t tid) { static const size_t buffer_size = 0x2000000; char* buffer; char* global_dir = vt_env_gdir(); char* file_prefix = vt_env_fprefix(); char global_name[1024]; char* local_name; char* local_prefix; char* suffix; FILE* infile; FILE* outfile; uint8_t i; buffer = (char*)calloc(buffer_size, sizeof(char)); if ( buffer == NULL ) vt_error(); local_prefix = VTGen_get_name(thrdv[tid]->gen); for(i = 0; i < 3; i++) { uint64_t bytes_read; /* get local file name */ if(i == 0) local_name = VTGen_get_defname(thrdv[tid]->gen); else if( i == 1 ) local_name = VTGen_get_eventname(thrdv[tid]->gen); else local_name = VTGen_get_statname(thrdv[tid]->gen); if ( local_name == NULL ) vt_error(); /* determine file suffix */ suffix = strchr(local_name+strlen(local_prefix)+1, '.'); if ( suffix == NULL ) vt_error(); /* build global file name */ snprintf(global_name, sizeof(global_name) - 1, "%s/%s.%x%s", global_dir, file_prefix, 65536*tid+(my_trace+1), suffix); infile = fopen(local_name, "rb"); if ( infile == NULL ) { free(local_name); continue; } outfile = fopen(global_name, "wb"); if ( outfile == NULL ) vt_error_msg("Cannot open file %s for writing", global_name); /* copy file */ while((bytes_read = fread(buffer, sizeof(char), buffer_size, infile))) fwrite(buffer, sizeof(char), bytes_read, outfile); fclose(infile); fclose(outfile); free(local_name); } free(buffer); } #if (defined(RFG)) static void vt_write_filt_file() { int i, j; for(i=0; i<(int)VTThrd_get_num_thrds(); i++) { uint32_t nrinfs = 0; RFG_RegionInfo** rinfs = NULL; /* get regions, whose call limit are reached */ RFG_Regions_getFilteredRegions(VTTHRD_RFGREGIONS(thrdv[i]), &nrinfs, &rinfs); if(nrinfs > 0) { char filename[300]; FILE* filt_file; snprintf(filename, sizeof(filename) - 1, "%s/%s.%x.filt", vt_env_gdir(), vt_env_fprefix(), 65536*i+(my_trace+1)); filt_file = fopen(filename, "w"); if(filt_file == NULL) vt_error_msg("Cannot open file %s", filename); fprintf(filt_file, "# list of regions, which are denied or whose call limit are reached\n"); fprintf(filt_file, "# (region:limit)\n"); /* write region names and call limits */ for(j=0; j<(int)nrinfs; j++) { fprintf(filt_file, "%s:%i\n", rinfs[j]->regionName, rinfs[j]->callLimit == 0 ? 0 : rinfs[j]->callLimit-1); } fclose(filt_file); vt_cntl_msg("Wrote list of filtered regions to file %s", filename); free(rinfs); } } } #endif /* *----------------------------------------------------------------------------- * Defining source code entities *----------------------------------------------------------------------------- */ static uint32_t vt_def_file_loc(uint32_t fid, uint32_t begln, uint32_t endln) { uint32_t sid; vt_check_thrd_id(VT_MY_THREAD); if( fid == VT_NO_ID || begln == VT_NO_LNO ) return 0; sid = curid++; VTGen_write_DEF_SCL(VTTHRD_GEN(thrdv[VT_MY_THREAD]), sid, fid, begln); return sid; } static uint32_t vt_def_region_desc(const char* rdesc) { uint32_t rdid; HashNode_rdesc* hn; vt_check_thrd_id(VT_MY_THREAD); hn = hash_get(HASH_TAB__RDESC, rdesc); if(hn == NULL) { rdid = curid++; VTGen_write_DEF_FUNCTION_GROUP(VTTHRD_GEN(thrdv[VT_MY_THREAD]), rdid, rdesc); hash_put(HASH_TAB__RDESC, rdesc, rdid); } else { rdid = hn->rdid; } return rdid; } static uint32_t vt_def_omp_comm() { uint32_t cid; vt_check_thrd_id(VT_MY_THREAD); cid = thrdgrp_cid; if(VT_MY_THREAD == 0 && thrdgrp_created == 0) { uint32_t cgrpc; uint32_t* cgrpv; int i; cgrpc = VT_NUM_THREADS; cgrpv = (uint32_t*)calloc(cgrpc * 8, sizeof(uint32_t)); if( cgrpv == NULL ) vt_error(); for(i = 0; i < (int)cgrpc; i++) cgrpv[i] = 65536*i+(my_trace+1); VTGen_write_DEF_PROCESS_GROUP(VTTHRD_GEN(thrdv[VT_MY_THREAD]), cid+1, "__OMP_TEAM__", cgrpc, cgrpv); free(cgrpv); thrdgrp_created = 1; } return cid; } static void vt_write_uctl_file() { int i; char filename[300]; FILE* uctl_file; snprintf(filename, sizeof(filename) - 1, "%s/%s.%x.uctl", vt_env_gdir(), vt_env_fprefix(), my_trace+1); uctl_file = fopen(filename, "w"); if(uctl_file == NULL) vt_error_msg("Cannot open file %s", filename); /* write namestubs */ for(i=0; i<(int)VTThrd_get_num_thrds(); i++) { fprintf(uctl_file, "%s%u", i>0 ? ":" : "", 65536*i+(my_trace+1)); } fprintf(uctl_file, "\n"); /* write time offset */ fprintf(uctl_file, "%lli:%lli:%lli:%lli\n", (long long int)my_ltime[0], (long long int)my_offset[0], (long long int)my_ltime[1], (long long int)my_offset[1]); fclose(uctl_file); vt_cntl_msg("Wrote unify control file %s", filename); } void vt_def_comment(const char* comment) { vt_check_thrd_id(VT_MY_THREAD); VTGen_write_DEFINITION_COMMENT(VTTHRD_GEN(thrdv[VT_MY_THREAD]), comment); } uint32_t vt_def_file(const char* fname) { uint32_t fid; HashNode_file* hn; vt_check_thrd_id(VT_MY_THREAD); hn = hash_get(HASH_TAB__FILE, fname); if( hn == NULL ) { fid = curid++; VTGen_write_DEF_SCL_FILE(VTTHRD_GEN(thrdv[VT_MY_THREAD]), fid, fname); hash_put(HASH_TAB__FILE, fname, fid); } else { fid = hn->fid; } return fid; } uint32_t vt_def_fileio_group(const char* gname) { uint32_t gid; vt_check_thrd_id(VT_MY_THREAD); gid = curid++; VTGen_write_DEF_FILE_GROUP(VTTHRD_GEN(thrdv[VT_MY_THREAD]), gid, gname); return gid; } uint32_t vt_def_fileio(const char* fname, uint32_t gid) { uint32_t fid; vt_check_thrd_id(VT_MY_THREAD); fid = curid++; VTGen_write_DEF_FILE(VTTHRD_GEN(thrdv[VT_MY_THREAD]), fid, fname, gid); return fid; } uint32_t vt_def_region(const char* rname, uint32_t fid, uint32_t begln, uint32_t endln, const char* rdesc, uint8_t rtype) { uint32_t sid; uint32_t rid; uint32_t rdid; vt_check_thrd_id(VT_MY_THREAD); sid = vt_def_file_loc(fid, begln, endln); rid = curid++; if((rtype == VT_OMP_BARRIER || rtype == VT_OMP_IBARRIER)) { if(ompcollop_id == 0) { VTGen_write_DEF_COLLECTIVE_OPERATION(VTTHRD_GEN(thrdv[VT_MY_THREAD]), rid, "OMP_Barrier", OTF_COLLECTIVE_TYPE_BARRIER); ompcollop_id = rid; } #if (defined (RFG)) { RFG_RegionInfo* rinf = RFG_Regions_add(VTTHRD_RFGREGIONS(thrdv[0]), rname, rid); if(rinf == NULL) vt_error(); RFG_Regions_addGroupAssign(VTTHRD_RFGREGIONS(thrdv[0]), "OMP-SYNC", 1, rname); } #endif rdid = vt_def_region_desc("OMP-SYNC"); } else if(rtype == VT_FUNCTION_COLL_OTHER || rtype == VT_FUNCTION_COLL_BARRIER || rtype == VT_FUNCTION_COLL_ONE2ALL || rtype == VT_FUNCTION_COLL_ALL2ONE || rtype == VT_FUNCTION_COLL_ALL2ALL) { VTGen_write_DEF_COLLECTIVE_OPERATION(VTTHRD_GEN(thrdv[VT_MY_THREAD]), rid, /* collective id equal region id */ rname, /* collective name equal region name */ (rtype == VT_FUNCTION_COLL_OTHER) ? OTF_COLLECTIVE_TYPE_UNKNOWN : (rtype == VT_FUNCTION_COLL_BARRIER) ? OTF_COLLECTIVE_TYPE_BARRIER : (rtype == VT_FUNCTION_COLL_ONE2ALL) ? OTF_COLLECTIVE_TYPE_ONE2ALL : (rtype == VT_FUNCTION_COLL_ALL2ONE) ? OTF_COLLECTIVE_TYPE_ALL2ONE : (rtype == VT_FUNCTION_COLL_ALL2ALL) ? OTF_COLLECTIVE_TYPE_ALL2ALL : OTF_COLLECTIVE_TYPE_UNKNOWN); #if (defined (RFG)) { RFG_RegionInfo* rinf = RFG_Regions_add(VTTHRD_RFGREGIONS(thrdv[0]), rname, rid); if(rinf == NULL) vt_error(); RFG_Regions_addGroupAssign(VTTHRD_RFGREGIONS(thrdv[0]), "MPI", 1, rname); } #endif rdid = vt_def_region_desc(rdesc); } else { #if (defined (RFG)) { RFG_RegionInfo* rinf = RFG_Regions_add(VTTHRD_RFGREGIONS(thrdv[0]), rname, rid); if(rinf == NULL) vt_error(); if(strcmp(rdesc, VT_DEF_GROUP) != 0) { RFG_Regions_addGroupAssign(VTTHRD_RFGREGIONS(thrdv[0]), rdesc, 1, rname); rdid = vt_def_region_desc(rdesc); } else { rdid = vt_def_region_desc(rinf->groupName); } } #else rdid = vt_def_region_desc(rdesc); #endif } VTGen_write_DEF_FUNCTION(VTTHRD_GEN(thrdv[VT_MY_THREAD]), rid, rname, rdid, sid); return rid; } uint32_t vt_def_counter_group(const char* gname) { uint32_t gid; vt_check_thrd_id(VT_MY_THREAD); gid = curid++; VTGen_write_DEF_COUNTER_GROUP(VTTHRD_GEN(thrdv[VT_MY_THREAD]), gid, gname); return gid; } uint32_t vt_def_counter(const char* cname, uint32_t cprop, uint32_t gid, const char* cunit) { uint32_t cid = 0; vt_check_thrd_id(VT_MY_THREAD); #if defined (VT_METR) cid = num_metrics; #endif cid += curid++; VTGen_write_DEF_COUNTER(VTTHRD_GEN(thrdv[VT_MY_THREAD]), cid, cname, cprop, gid, cunit); return cid; } void vt_def_mpi_comm(uint32_t cid, uint32_t grpc, uint8_t grpv[]) { int i; uint32_t cgrpc; uint32_t* cgrpv; char cname[20]; vt_check_thrd_id(VT_MY_THREAD); cgrpv = (uint32_t*)calloc(grpc * 8, sizeof(uint32_t)); if( cgrpv == NULL ) vt_error(); for(cgrpc = 0, i = 0; i < (int)grpc; i++) { if(grpv[i] & 0x1) cgrpv[cgrpc++] = (i * 8) + 1; if(grpv[i] & 0x2) cgrpv[cgrpc++] = (i * 8) + 2; if(grpv[i] & 0x4) cgrpv[cgrpc++] = (i * 8) + 3; if(grpv[i] & 0x8) cgrpv[cgrpc++] = (i * 8) + 4; if(grpv[i] & 0x10) cgrpv[cgrpc++] = (i * 8) + 5; if(grpv[i] & 0x20) cgrpv[cgrpc++] = (i * 8) + 6; if(grpv[i] & 0x40) cgrpv[cgrpc++] = (i * 8) + 7; if(grpv[i] & 0x80) cgrpv[cgrpc++] = (i * 8) + 8; } if(cid == 0) strncpy(cname, "__MPI_COMM_WORLD__", sizeof(cname) - 1); else if(cid == 1) strncpy(cname, "__MPI_COMM_SELF__", sizeof(cname) - 1); else strncpy(cname, "__MPI_COMM_USER__", sizeof(cname) - 1); VTGen_write_DEF_PROCESS_GROUP(VTTHRD_GEN(thrdv[VT_MY_THREAD]), cid+1, cname, cgrpc, cgrpv); free(cgrpv); } /* *----------------------------------------------------------------------------- * Recording events *----------------------------------------------------------------------------- */ /* -- Region -- */ void vt_enter(uint64_t* time, uint32_t rid) { int8_t trace; vt_check_thrd_id(VT_MY_THREAD); if (VTTHRD_IS_TRACE_ON(thrdv[VT_MY_THREAD]) == -1) return; VTTHRD_STACK_PUSH(thrdv[VT_MY_THREAD]); trace = VTTHRD_IS_TRACE_ON(thrdv[VT_MY_THREAD]); #if (defined (RFG)) { RFG_RegionInfo* rinf; if( !RFG_Regions_stackPush(VTTHRD_RFGREGIONS(thrdv[VT_MY_THREAD]), rid, trace, &rinf) ) { # if (defined (VT_OMPI) || defined (VT_OMP)) RFG_RegionInfo* rinf_master = RFG_Regions_get(VTTHRD_RFGREGIONS(thrdv[0]), rid); if (rinf_master == NULL) vt_error(); rinf = RFG_Regions_add(VTTHRD_RFGREGIONS(thrdv[VT_MY_THREAD]), rinf_master->regionName, rid); /* copy master's call limit */ rinf->callLimit = rinf_master->callLimit; /* initialize call limit count down */ rinf->callLimitCD = rinf->callLimit; if (!RFG_Regions_stackPush(VTTHRD_RFGREGIONS(thrdv[VT_MY_THREAD]), rid, trace, &rinf)) vt_error(); # else vt_error(); # endif } if(rinf->callLimitCD == 0) trace = 0; } #endif if (trace) { # if defined(VT_METR) if ( num_metrics > 0 ) { vt_metric_read(VTTHRD_METV(thrdv[VT_MY_THREAD]), VTTHRD_VALV(thrdv[VT_MY_THREAD])); VTGen_write_ENTER(VTTHRD_GEN(thrdv[VT_MY_THREAD]), time, rid, 0, num_metrics, VTTHRD_VALV(thrdv[VT_MY_THREAD])); } else { VTGen_write_ENTER(VTTHRD_GEN(thrdv[VT_MY_THREAD]), time, rid, 0, 0, NULL); } # else VTGen_write_ENTER(VTTHRD_GEN(thrdv[VT_MY_THREAD]), time, rid, 0, 0, NULL); # endif } } void vt_exit(uint64_t* time) { uint8_t trace; vt_check_thrd_id(VT_MY_THREAD); if (VTTHRD_IS_TRACE_ON(thrdv[VT_MY_THREAD]) == -1) return; VTTHRD_STACK_POP(thrdv[VT_MY_THREAD]); trace = VTTHRD_IS_TRACE_ON(thrdv[VT_MY_THREAD]); #if (defined (RFG)) { RFG_RegionInfo* rinf; int climitbyenter; if(!RFG_Regions_stackPop(VTTHRD_RFGREGIONS(thrdv[VT_MY_THREAD]), &rinf, &climitbyenter)) { vt_error(); } if(climitbyenter == 0) trace = 0; } #endif if ( trace ) { # if defined(VT_METR) if ( num_metrics > 0 ) { vt_metric_read(VTTHRD_METV(thrdv[VT_MY_THREAD]), VTTHRD_VALV(thrdv[VT_MY_THREAD])); VTGen_write_LEAVE(VTTHRD_GEN(thrdv[VT_MY_THREAD]), time, 0, 0, num_metrics, VTTHRD_VALV(thrdv[VT_MY_THREAD])); } else { VTGen_write_LEAVE(VTTHRD_GEN(thrdv[VT_MY_THREAD]), time, 0, 0, 0, NULL); } # else VTGen_write_LEAVE(VTTHRD_GEN(thrdv[VT_MY_THREAD]), time, 0, 0, 0, NULL); # endif } } /* -- File I/O -- */ void vt_ioexit(uint64_t* time, uint64_t* etime, uint32_t fid, uint64_t hid, uint32_t op, uint64_t bytes) { vt_check_thrd_id(VT_MY_THREAD); if (VTTHRD_IS_TRACE_ON(thrdv[VT_MY_THREAD]) < 1) return; VTGen_write_FILE_OPERATION(VTTHRD_GEN(thrdv[VT_MY_THREAD]), time, etime, fid, hid, op, bytes, 0); vt_exit(etime); } /* -- Memory -- */ #if (defined (VT_MEMHOOK)) void vt_mem_alloc(uint64_t* time, uint64_t bytes) { vt_check_thrd_id(VT_MY_THREAD); if (VTTHRD_IS_TRACE_ON(thrdv[VT_MY_THREAD]) < 1) return; VTTHRD_MEM_APP_ALLOC(thrdv[VT_MY_THREAD]) += bytes; VTGen_write_COUNTER(VTTHRD_GEN(thrdv[VT_MY_THREAD]), time, mem_app_alloc_cid, VTTHRD_MEM_APP_ALLOC(thrdv[VT_MY_THREAD])); } void vt_mem_free(uint64_t* time, uint64_t bytes) { vt_check_thrd_id(VT_MY_THREAD); if (VTTHRD_IS_TRACE_ON(thrdv[VT_MY_THREAD]) < 1) return; if( bytes <= VTTHRD_MEM_APP_ALLOC(thrdv[VT_MY_THREAD]) ) VTTHRD_MEM_APP_ALLOC(thrdv[VT_MY_THREAD]) -= bytes; else VTTHRD_MEM_APP_ALLOC(thrdv[VT_MY_THREAD]) = 0; VTGen_write_COUNTER(VTTHRD_GEN(thrdv[VT_MY_THREAD]), time, mem_app_alloc_cid, VTTHRD_MEM_APP_ALLOC(thrdv[VT_MY_THREAD])); } #endif /* -- Counter -- */ void vt_count(uint64_t* time, uint32_t cid, uint64_t cval) { vt_check_thrd_id(VT_MY_THREAD); if (VTTHRD_IS_TRACE_ON(thrdv[VT_MY_THREAD]) < 1) return; VTGen_write_COUNTER(VTTHRD_GEN(thrdv[VT_MY_THREAD]), time, cid, cval); } /* -- Comment -- */ void vt_comment(uint64_t* time, const char* comment) { vt_check_thrd_id(VT_MY_THREAD); if (VTTHRD_IS_TRACE_ON(thrdv[VT_MY_THREAD]) < 1) return; VTGen_write_COMMENT(VTTHRD_GEN(thrdv[VT_MY_THREAD]), time, comment); } /* -- MPI-1 -- */ void vt_mpi_send(uint64_t* time, uint32_t dpid, uint32_t cid, uint32_t tag, uint32_t sent) { vt_check_thrd_id(VT_MY_THREAD); if (VTTHRD_IS_TRACE_ON(thrdv[VT_MY_THREAD]) < 1) return; VTGen_write_SEND_MSG(VTTHRD_GEN(thrdv[VT_MY_THREAD]), time, dpid+1, cid+1, tag, sent, 0); } void vt_mpi_recv(uint64_t* time, uint32_t spid, uint32_t cid, uint32_t tag, uint32_t recvd) { vt_check_thrd_id(VT_MY_THREAD); if (VTTHRD_IS_TRACE_ON(thrdv[VT_MY_THREAD]) < 1) return; VTGen_write_RECV_MSG(VTTHRD_GEN(thrdv[VT_MY_THREAD]), time, spid+1, cid+1, tag, recvd, 0); } void vt_mpi_collexit(uint64_t* time, uint64_t* etime, uint32_t rid, uint32_t rpid, uint32_t cid, uint32_t sent, uint32_t recvd) { vt_check_thrd_id(VT_MY_THREAD); if (VTTHRD_IS_TRACE_ON(thrdv[VT_MY_THREAD]) < 1) return; VTGen_write_COLLECTIVE_OPERATION(VTTHRD_GEN(thrdv[VT_MY_THREAD]), time, etime, rid, cid+1, rpid != VT_NO_ID ? rpid+1 : 0, sent, recvd, 0); vt_exit(etime); } /* -- OpenMP -- */ void vt_omp_fork(uint64_t* time) { vt_check_thrd_id(VT_MY_THREAD); if (VTTHRD_IS_TRACE_ON(thrdv[VT_MY_THREAD]) < 1) return; #if (defined (VT_OMPI) || defined (VT_OMP)) if (!omp_in_parallel()) VTGen_write_OMP_FORK(VTTHRD_GEN(thrdv[VT_MY_THREAD]), time); #endif } void vt_omp_join(uint64_t* time) { vt_check_thrd_id(VT_MY_THREAD); if (VTTHRD_IS_TRACE_ON(thrdv[VT_MY_THREAD]) < 1) return; #if (defined (VT_OMPI) || defined (VT_OMP)) if (!omp_in_parallel()) VTGen_write_OMP_JOIN(VTTHRD_GEN(thrdv[VT_MY_THREAD]), time); #endif } void vt_omp_alock(uint64_t* time, uint32_t lkid) { #if 0 vt_check_thrd_id(VT_MY_THREAD); VTGen_write_OMP_ALOCK(VTTHRD_GEN(thrdv[VT_MY_THREAD]), lkid); #endif } void vt_omp_rlock(uint64_t* time, uint32_t lkid) { #if 0 vt_check_thrd_id(VT_MY_THREAD); VTGen_write_OMP_RLOCK(VTTHRD_GEN(thrdv[VT_MY_THREAD]), lkid); #endif } void vt_omp_collenter(uint64_t* time, uint32_t rid) { vt_check_thrd_id(VT_MY_THREAD); if (VTTHRD_IS_TRACE_ON(thrdv[VT_MY_THREAD]) < 1) return; vt_enter(time, rid); /* store timestamp of beginning for vt_omp_collexit() to calculate collop. duration */ VTTHRD_OMP_COLLOP_STIME(thrdv[VT_MY_THREAD]) = *time; } void vt_omp_collexit(uint64_t* etime) { uint32_t cid; uint64_t time; vt_check_thrd_id(VT_MY_THREAD); if (VTTHRD_IS_TRACE_ON(thrdv[VT_MY_THREAD]) < 1) return; cid = vt_def_omp_comm(); time = VTTHRD_OMP_COLLOP_STIME(thrdv[VT_MY_THREAD]); VTGen_write_COLLECTIVE_OPERATION(VTTHRD_GEN(thrdv[VT_MY_THREAD]), &time, etime, ompcollop_id, cid+1, 0, 0, 0, 0); vt_exit(etime); } void vt_omp_parallel_begin(void) { #if defined(VT_METR) if ( NULL == thrdv[VT_MY_THREAD]->metv && vt_metric_num() > 0 ) { /* create metrics in worker threads */ vt_cntl_msg("restarting counters in thread %d",VT_MY_THREAD); thrdv[VT_MY_THREAD]->metv = vt_metric_create(); } #endif } void vt_omp_parallel_end(void) { #if defined(VT_METR) if ( VT_MY_THREAD > 0 && vt_metric_num() > 0) { /* shut down metrics in worker threads */ vt_metric_free(thrdv[VT_MY_THREAD]->metv); thrdv[VT_MY_THREAD]->metv=NULL; vt_metric_thread_fini(); } #endif } /* -- VampirTrace Internal -- */ void vt_enter_user(uint64_t* time) { vt_check_thrd_id(VT_MY_THREAD); if (VTTHRD_IS_TRACE_ON(thrdv[VT_MY_THREAD]) < 1) return; vt_enter(time, vt_trc_regid[VT__USER]); } void vt_exit_user(uint64_t* time) { vt_check_thrd_id(VT_MY_THREAD); if (VTTHRD_IS_TRACE_ON(thrdv[VT_MY_THREAD]) < 1) return; vt_exit(time); } void vt_enter_stat(uint64_t* time) { vt_check_thrd_id(VT_MY_THREAD); if (VTTHRD_IS_TRACE_ON(thrdv[VT_MY_THREAD]) < 1) return; #if defined(VT_METR) if ( num_metrics > 0 ) { vt_metric_read(VTTHRD_METV(thrdv[VT_MY_THREAD]), VTTHRD_VALV(thrdv[VT_MY_THREAD])); VTGen_write_ENTER_STAT(VTTHRD_GEN(thrdv[VT_MY_THREAD]), time, num_metrics, VTTHRD_VALV(thrdv[VT_MY_THREAD])); } else { VTGen_write_ENTER_STAT(VTTHRD_GEN(thrdv[VT_MY_THREAD]), time, 0, NULL); } #else VTGen_write_ENTER_STAT(VTTHRD_GEN(thrdv[VT_MY_THREAD]), time, 0, NULL); #endif } void vt_exit_stat(uint64_t* time) { vt_check_thrd_id(VT_MY_THREAD); if (VTTHRD_IS_TRACE_ON(thrdv[VT_MY_THREAD]) < 1) return; #if defined(VT_METR) if ( num_metrics > 0 ) { vt_metric_read(VTTHRD_METV(thrdv[VT_MY_THREAD]), VTTHRD_VALV(thrdv[VT_MY_THREAD])); VTGen_write_EXIT_STAT(VTTHRD_GEN(thrdv[VT_MY_THREAD]), time, num_metrics, VTTHRD_VALV(thrdv[VT_MY_THREAD])); } else { VTGen_write_EXIT_STAT(VTTHRD_GEN(thrdv[VT_MY_THREAD]), time, 0, NULL); } #else VTGen_write_EXIT_STAT(VTTHRD_GEN(thrdv[VT_MY_THREAD]), time, 0, NULL); #endif } void vt_enter_flush(uint64_t* time) { vt_check_thrd_id(VT_MY_THREAD); if (VTTHRD_IS_TRACE_ON(thrdv[VT_MY_THREAD]) < 1) return; #if defined(VT_METR) if ( num_metrics > 0 ) { vt_metric_read(VTTHRD_METV(thrdv[VT_MY_THREAD]), VTTHRD_VALV(thrdv[VT_MY_THREAD])); VTGen_write_ENTER_FLUSH(VTTHRD_GEN(thrdv[VT_MY_THREAD]), time, num_metrics, VTTHRD_VALV(thrdv[VT_MY_THREAD])); } else { VTGen_write_ENTER_FLUSH(VTTHRD_GEN(thrdv[VT_MY_THREAD]), time, 0, NULL); } #else VTGen_write_ENTER_FLUSH(VTTHRD_GEN(thrdv[VT_MY_THREAD]), time, 0, NULL); #endif } void vt_exit_flush(uint64_t* time) { vt_check_thrd_id(VT_MY_THREAD); if (VTTHRD_IS_TRACE_ON(thrdv[VT_MY_THREAD]) < 1) return; #if defined(VT_METR) if ( num_metrics > 0 ) { vt_metric_read(VTTHRD_METV(thrdv[VT_MY_THREAD]), VTTHRD_VALV(thrdv[VT_MY_THREAD])); VTGen_write_EXIT_FLUSH(VTTHRD_GEN(thrdv[VT_MY_THREAD]), time, num_metrics, VTTHRD_VALV(thrdv[VT_MY_THREAD])); } else { VTGen_write_EXIT_FLUSH(VTTHRD_GEN(thrdv[VT_MY_THREAD]), time, 0, NULL); } #else VTGen_write_EXIT_FLUSH(VTTHRD_GEN(thrdv[VT_MY_THREAD]), time, 0, NULL); #endif }
__clang_cuda_cmath.h
/*===---- __clang_cuda_cmath.h - Device-side CUDA cmath support ------------=== * * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. * See https://llvm.org/LICENSE.txt for license information. * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception * *===-----------------------------------------------------------------------=== */ #ifndef __CLANG_CUDA_CMATH_H__ #define __CLANG_CUDA_CMATH_H__ #ifndef __CUDA__ #error "This file is for CUDA compilation only." #endif #ifndef __OPENMP_NVPTX__ #include <limits> #endif // CUDA lets us use various std math functions on the device side. This file // works in concert with __clang_cuda_math_forward_declares.h to make this work. // // Specifically, the forward-declares header declares __device__ overloads for // these functions in the global namespace, then pulls them into namespace std // with 'using' statements. Then this file implements those functions, after // their implementations have been pulled in. // // It's important that we declare the functions in the global namespace and pull // them into namespace std with using statements, as opposed to simply declaring // these functions in namespace std, because our device functions need to // overload the standard library functions, which may be declared in the global // namespace or in std, depending on the degree of conformance of the stdlib // implementation. Declaring in the global namespace and pulling into namespace // std covers all of the known knowns. #ifdef __OPENMP_NVPTX__ #define __DEVICE__ static constexpr __attribute__((always_inline, nothrow)) #else #define __DEVICE__ static __device__ __inline__ __attribute__((always_inline)) #endif __DEVICE__ long long abs(long long __n) { return ::llabs(__n); } __DEVICE__ long abs(long __n) { return ::labs(__n); } __DEVICE__ float abs(float __x) { return ::fabsf(__x); } __DEVICE__ double abs(double __x) { return ::fabs(__x); } __DEVICE__ float acos(float __x) { return ::acosf(__x); } __DEVICE__ float asin(float __x) { return ::asinf(__x); } __DEVICE__ float atan(float __x) { return ::atanf(__x); } __DEVICE__ float atan2(float __x, float __y) { return ::atan2f(__x, __y); } __DEVICE__ float ceil(float __x) { return ::ceilf(__x); } __DEVICE__ float cos(float __x) { return ::cosf(__x); } __DEVICE__ float cosh(float __x) { return ::coshf(__x); } __DEVICE__ float exp(float __x) { return ::expf(__x); } __DEVICE__ float fabs(float __x) { return ::fabsf(__x); } __DEVICE__ float floor(float __x) { return ::floorf(__x); } __DEVICE__ float fmod(float __x, float __y) { return ::fmodf(__x, __y); } #ifdef fpclassify #undef fpclassify #endif __DEVICE__ int fpclassify(float __x) { return __builtin_fpclassify(FP_NAN, FP_INFINITE, FP_NORMAL, FP_SUBNORMAL, FP_ZERO, __x); } __DEVICE__ int fpclassify(double __x) { return __builtin_fpclassify(FP_NAN, FP_INFINITE, FP_NORMAL, FP_SUBNORMAL, FP_ZERO, __x); } __DEVICE__ float frexp(float __arg, int *__exp) { return ::frexpf(__arg, __exp); } // For inscrutable reasons, the CUDA headers define these functions for us on // Windows. #if !defined(_MSC_VER) || defined(__OPENMP_NVPTX__) // For OpenMP we work around some old system headers that have non-conforming // `isinf(float)` and `isnan(float)` implementations that return an `int`. We do // this by providing two versions of these functions, differing only in the // return type. To avoid conflicting definitions we disable implicit base // function generation. That means we will end up with two specializations, one // per type, but only one has a base function defined by the system header. #if defined(__OPENMP_NVPTX__) #pragma omp begin declare variant match( \ implementation = {extension(disable_implicit_base)}) // FIXME: We lack an extension to customize the mangling of the variants, e.g., // add a suffix. This means we would clash with the names of the variants // (note that we do not create implicit base functions here). To avoid // this clash we add a new trait to some of them that is always true // (this is LLVM after all ;)). It will only influence the mangled name // of the variants inside the inner region and avoid the clash. #pragma omp begin declare variant match(implementation = {vendor(llvm)}) __DEVICE__ int isinf(float __x) { return ::__isinff(__x); } __DEVICE__ int isinf(double __x) { return ::__isinf(__x); } #ifdef isfinite #undef isfininte #endif __DEVICE__ int isfinite(float __x) { return ::__finitef(__x); } __DEVICE__ int isfinite(double __x) { return ::__isfinited(__x); } #ifdef isnan #undef isnan #endif __DEVICE__ int isnan(float __x) { return ::__isnanf(__x); } __DEVICE__ int isnan(double __x) { return ::__isnan(__x); } #pragma omp end declare variant #endif #ifdef isinf #undef isinf #endif __DEVICE__ bool isinf(float __x) { return ::__isinff(__x); } __DEVICE__ bool isinf(double __x) { return ::__isinf(__x); } #ifdef isfinite #undef isfinite #endif __DEVICE__ bool isfinite(float __x) { return ::__finitef(__x); } // For inscrutable reasons, __finite(), the double-precision version of // __finitef, does not exist when compiling for MacOS. __isfinited is available // everywhere and is just as good. __DEVICE__ bool isfinite(double __x) { return ::__isfinited(__x); } #ifdef isnan #undef isnan #endif __DEVICE__ bool isnan(float __x) { return ::__isnanf(__x); } __DEVICE__ bool isnan(double __x) { return ::__isnan(__x); } #if defined(__OPENMP_NVPTX__) #pragma omp end declare variant #endif #endif #ifdef isgreater #undef isgreater #endif __DEVICE__ bool isgreater(float __x, float __y) { return __builtin_isgreater(__x, __y); } __DEVICE__ bool isgreater(double __x, double __y) { return __builtin_isgreater(__x, __y); } #ifdef isgreaterequal #undef isgreaterequal #endif __DEVICE__ bool isgreaterequal(float __x, float __y) { return __builtin_isgreaterequal(__x, __y); } __DEVICE__ bool isgreaterequal(double __x, double __y) { return __builtin_isgreaterequal(__x, __y); } #ifdef isless #undef isless #endif __DEVICE__ bool isless(float __x, float __y) { return __builtin_isless(__x, __y); } __DEVICE__ bool isless(double __x, double __y) { return __builtin_isless(__x, __y); } #ifdef islessequal #undef islessequal #endif __DEVICE__ bool islessequal(float __x, float __y) { return __builtin_islessequal(__x, __y); } __DEVICE__ bool islessequal(double __x, double __y) { return __builtin_islessequal(__x, __y); } #ifdef islessgreater #undef islessgreater #endif __DEVICE__ bool islessgreater(float __x, float __y) { return __builtin_islessgreater(__x, __y); } __DEVICE__ bool islessgreater(double __x, double __y) { return __builtin_islessgreater(__x, __y); } #ifdef isnormal #undef isnormal #endif __DEVICE__ bool isnormal(float __x) { return __builtin_isnormal(__x); } __DEVICE__ bool isnormal(double __x) { return __builtin_isnormal(__x); } #ifdef isunordered #undef isunordered #endif __DEVICE__ bool isunordered(float __x, float __y) { return __builtin_isunordered(__x, __y); } __DEVICE__ bool isunordered(double __x, double __y) { return __builtin_isunordered(__x, __y); } __DEVICE__ float ldexp(float __arg, int __exp) { return ::ldexpf(__arg, __exp); } __DEVICE__ float log(float __x) { return ::logf(__x); } __DEVICE__ float log10(float __x) { return ::log10f(__x); } __DEVICE__ float modf(float __x, float *__iptr) { return ::modff(__x, __iptr); } __DEVICE__ float pow(float __base, float __exp) { return ::powf(__base, __exp); } __DEVICE__ float pow(float __base, int __iexp) { return ::powif(__base, __iexp); } __DEVICE__ double pow(double __base, int __iexp) { return ::powi(__base, __iexp); } #ifdef signbit #undef signbit #endif __DEVICE__ bool signbit(float __x) { return ::__signbitf(__x); } __DEVICE__ bool signbit(double __x) { return ::__signbitd(__x); } __DEVICE__ float sin(float __x) { return ::sinf(__x); } __DEVICE__ float sinh(float __x) { return ::sinhf(__x); } __DEVICE__ float sqrt(float __x) { return ::sqrtf(__x); } __DEVICE__ float tan(float __x) { return ::tanf(__x); } __DEVICE__ float tanh(float __x) { return ::tanhf(__x); } // There was a redefinition error for this this overload in CUDA mode. // We restrict it to OpenMP mode for now, that is where it is actually needed // anyway. #ifdef __OPENMP_NVPTX__ __DEVICE__ float remquo(float __n, float __d, int *__q) { return ::remquof(__n, __d, __q); } #endif // Notably missing above is nexttoward. We omit it because // libdevice doesn't provide an implementation, and we don't want to be in the // business of implementing tricky libm functions in this header. #ifndef __OPENMP_NVPTX__ // Now we've defined everything we promised we'd define in // __clang_cuda_math_forward_declares.h. We need to do two additional things to // fix up our math functions. // // 1) Define __device__ overloads for e.g. sin(int). The CUDA headers define // only sin(float) and sin(double), which means that e.g. sin(0) is // ambiguous. // // 2) Pull the __device__ overloads of "foobarf" math functions into namespace // std. These are defined in the CUDA headers in the global namespace, // independent of everything else we've done here. // We can't use std::enable_if, because we want to be pre-C++11 compatible. But // we go ahead and unconditionally define functions that are only available when // compiling for C++11 to match the behavior of the CUDA headers. template<bool __B, class __T = void> struct __clang_cuda_enable_if {}; template <class __T> struct __clang_cuda_enable_if<true, __T> { typedef __T type; }; // Defines an overload of __fn that accepts one integral argument, calls // __fn((double)x), and returns __retty. #define __CUDA_CLANG_FN_INTEGER_OVERLOAD_1(__retty, __fn) \ template <typename __T> \ __DEVICE__ \ typename __clang_cuda_enable_if<std::numeric_limits<__T>::is_integer, \ __retty>::type \ __fn(__T __x) { \ return ::__fn((double)__x); \ } // Defines an overload of __fn that accepts one two arithmetic arguments, calls // __fn((double)x, (double)y), and returns a double. // // Note this is different from OVERLOAD_1, which generates an overload that // accepts only *integral* arguments. #define __CUDA_CLANG_FN_INTEGER_OVERLOAD_2(__retty, __fn) \ template <typename __T1, typename __T2> \ __DEVICE__ typename __clang_cuda_enable_if< \ std::numeric_limits<__T1>::is_specialized && \ std::numeric_limits<__T2>::is_specialized, \ __retty>::type \ __fn(__T1 __x, __T2 __y) { \ return __fn((double)__x, (double)__y); \ } __CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, acos) __CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, acosh) __CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, asin) __CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, asinh) __CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, atan) __CUDA_CLANG_FN_INTEGER_OVERLOAD_2(double, atan2); __CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, atanh) __CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, cbrt) __CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, ceil) __CUDA_CLANG_FN_INTEGER_OVERLOAD_2(double, copysign); __CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, cos) __CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, cosh) __CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, erf) __CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, erfc) __CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, exp) __CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, exp2) __CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, expm1) __CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, fabs) __CUDA_CLANG_FN_INTEGER_OVERLOAD_2(double, fdim); __CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, floor) __CUDA_CLANG_FN_INTEGER_OVERLOAD_2(double, fmax); __CUDA_CLANG_FN_INTEGER_OVERLOAD_2(double, fmin); __CUDA_CLANG_FN_INTEGER_OVERLOAD_2(double, fmod); __CUDA_CLANG_FN_INTEGER_OVERLOAD_1(int, fpclassify) __CUDA_CLANG_FN_INTEGER_OVERLOAD_2(double, hypot); __CUDA_CLANG_FN_INTEGER_OVERLOAD_1(int, ilogb) __CUDA_CLANG_FN_INTEGER_OVERLOAD_1(bool, isfinite) __CUDA_CLANG_FN_INTEGER_OVERLOAD_2(bool, isgreater); __CUDA_CLANG_FN_INTEGER_OVERLOAD_2(bool, isgreaterequal); __CUDA_CLANG_FN_INTEGER_OVERLOAD_1(bool, isinf); __CUDA_CLANG_FN_INTEGER_OVERLOAD_2(bool, isless); __CUDA_CLANG_FN_INTEGER_OVERLOAD_2(bool, islessequal); __CUDA_CLANG_FN_INTEGER_OVERLOAD_2(bool, islessgreater); __CUDA_CLANG_FN_INTEGER_OVERLOAD_1(bool, isnan); __CUDA_CLANG_FN_INTEGER_OVERLOAD_1(bool, isnormal) __CUDA_CLANG_FN_INTEGER_OVERLOAD_2(bool, isunordered); __CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, lgamma) __CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, log) __CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, log10) __CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, log1p) __CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, log2) __CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, logb) __CUDA_CLANG_FN_INTEGER_OVERLOAD_1(long long, llrint) __CUDA_CLANG_FN_INTEGER_OVERLOAD_1(long long, llround) __CUDA_CLANG_FN_INTEGER_OVERLOAD_1(long, lrint) __CUDA_CLANG_FN_INTEGER_OVERLOAD_1(long, lround) __CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, nearbyint); __CUDA_CLANG_FN_INTEGER_OVERLOAD_2(double, nextafter); __CUDA_CLANG_FN_INTEGER_OVERLOAD_2(double, pow); __CUDA_CLANG_FN_INTEGER_OVERLOAD_2(double, remainder); __CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, rint); __CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, round); __CUDA_CLANG_FN_INTEGER_OVERLOAD_1(bool, signbit) __CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, sin) __CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, sinh) __CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, sqrt) __CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, tan) __CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, tanh) __CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, tgamma) __CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, trunc); #undef __CUDA_CLANG_FN_INTEGER_OVERLOAD_1 #undef __CUDA_CLANG_FN_INTEGER_OVERLOAD_2 // Overloads for functions that don't match the patterns expected by // __CUDA_CLANG_FN_INTEGER_OVERLOAD_{1,2}. template <typename __T1, typename __T2, typename __T3> __DEVICE__ typename __clang_cuda_enable_if< std::numeric_limits<__T1>::is_specialized && std::numeric_limits<__T2>::is_specialized && std::numeric_limits<__T3>::is_specialized, double>::type fma(__T1 __x, __T2 __y, __T3 __z) { return std::fma((double)__x, (double)__y, (double)__z); } template <typename __T> __DEVICE__ typename __clang_cuda_enable_if<std::numeric_limits<__T>::is_integer, double>::type frexp(__T __x, int *__exp) { return std::frexp((double)__x, __exp); } template <typename __T> __DEVICE__ typename __clang_cuda_enable_if<std::numeric_limits<__T>::is_integer, double>::type ldexp(__T __x, int __exp) { return std::ldexp((double)__x, __exp); } template <typename __T1, typename __T2> __DEVICE__ typename __clang_cuda_enable_if< std::numeric_limits<__T1>::is_specialized && std::numeric_limits<__T2>::is_specialized, double>::type remquo(__T1 __x, __T2 __y, int *__quo) { return std::remquo((double)__x, (double)__y, __quo); } template <typename __T> __DEVICE__ typename __clang_cuda_enable_if<std::numeric_limits<__T>::is_integer, double>::type scalbln(__T __x, long __exp) { return std::scalbln((double)__x, __exp); } template <typename __T> __DEVICE__ typename __clang_cuda_enable_if<std::numeric_limits<__T>::is_integer, double>::type scalbn(__T __x, int __exp) { return std::scalbn((double)__x, __exp); } // We need to define these overloads in exactly the namespace our standard // library uses (including the right inline namespace), otherwise they won't be // picked up by other functions in the standard library (e.g. functions in // <complex>). Thus the ugliness below. #ifdef _LIBCPP_BEGIN_NAMESPACE_STD _LIBCPP_BEGIN_NAMESPACE_STD #else namespace std { #ifdef _GLIBCXX_BEGIN_NAMESPACE_VERSION _GLIBCXX_BEGIN_NAMESPACE_VERSION #endif #endif // Pull the new overloads we defined above into namespace std. using ::acos; using ::acosh; using ::asin; using ::asinh; using ::atan; using ::atan2; using ::atanh; using ::cbrt; using ::ceil; using ::copysign; using ::cos; using ::cosh; using ::erf; using ::erfc; using ::exp; using ::exp2; using ::expm1; using ::fabs; using ::fdim; using ::floor; using ::fma; using ::fmax; using ::fmin; using ::fmod; using ::fpclassify; using ::frexp; using ::hypot; using ::ilogb; using ::isfinite; using ::isgreater; using ::isgreaterequal; using ::isless; using ::islessequal; using ::islessgreater; using ::isnormal; using ::isunordered; using ::ldexp; using ::lgamma; using ::llrint; using ::llround; using ::log; using ::log10; using ::log1p; using ::log2; using ::logb; using ::lrint; using ::lround; using ::nearbyint; using ::nextafter; using ::pow; using ::remainder; using ::remquo; using ::rint; using ::round; using ::scalbln; using ::scalbn; using ::signbit; using ::sin; using ::sinh; using ::sqrt; using ::tan; using ::tanh; using ::tgamma; using ::trunc; // Well this is fun: We need to pull these symbols in for libc++, but we can't // pull them in with libstdc++, because its ::isinf and ::isnan are different // than its std::isinf and std::isnan. #ifndef __GLIBCXX__ using ::isinf; using ::isnan; #endif // Finally, pull the "foobarf" functions that CUDA defines in its headers into // namespace std. using ::acosf; using ::acoshf; using ::asinf; using ::asinhf; using ::atan2f; using ::atanf; using ::atanhf; using ::cbrtf; using ::ceilf; using ::copysignf; using ::cosf; using ::coshf; using ::erfcf; using ::erff; using ::exp2f; using ::expf; using ::expm1f; using ::fabsf; using ::fdimf; using ::floorf; using ::fmaf; using ::fmaxf; using ::fminf; using ::fmodf; using ::frexpf; using ::hypotf; using ::ilogbf; using ::ldexpf; using ::lgammaf; using ::llrintf; using ::llroundf; using ::log10f; using ::log1pf; using ::log2f; using ::logbf; using ::logf; using ::lrintf; using ::lroundf; using ::modff; using ::nearbyintf; using ::nextafterf; using ::powf; using ::remainderf; using ::remquof; using ::rintf; using ::roundf; using ::scalblnf; using ::scalbnf; using ::sinf; using ::sinhf; using ::sqrtf; using ::tanf; using ::tanhf; using ::tgammaf; using ::truncf; #ifdef _LIBCPP_END_NAMESPACE_STD _LIBCPP_END_NAMESPACE_STD #else #ifdef _GLIBCXX_BEGIN_NAMESPACE_VERSION _GLIBCXX_END_NAMESPACE_VERSION #endif } // namespace std #endif #endif // __OPENMP_NVPTX__ #undef __DEVICE__ #endif
cycle_share.c
// SPDX-License-Identifier: BSD-2-Clause /* Copyright 1998-2018 Bernard Parent Copyright 2020 Minindu Weerakoon Copyright 2001 Giovanni Fusina Copyright 2002 Thomas E. Schwartzentruber Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <cycle/share/cycle_share.h> #include <src/data.h> #include <src/common.h> #include <src/bdry.h> #include <src/init.h> #include <cycle/ts/_ts.h> #include <cycle/tsemf/_tsemf.h> #include <cycle/_cycle.h> #include <cycle/res/_res.h> #include <cycle/resconv/_resconv.h> #include <cycle/restime/_restime.h> #include <model/fluid/_fluid.h> #include <model/emfield/_emfield.h> #include <model/metrics/_metrics.h> #include <model/fluid/_fluid.h> #ifdef OPENMPTHREADS #define maxloopthread LONG_MAX #define maxzonethread LONG_MAX #else #define maxloopthread 256 #define maxzonethread 256 #endif #define MAXRATIO_DTAUMAX_DTAUMIN 100.0 typedef struct { np_t *np; gl_t *gl; long theta,ls,le; } segment_t; typedef struct { np_t *np; gl_t *gl; long theta,ls,le; void (*funct)(np_t *, gl_t *, long, long, long); } segmentarg_t; typedef struct { np_t *np; gl_t *gl; zone_t zone; void (*funct)(np_t *, gl_t *, zone_t); } threadzone_t; void *segmentfunct(void *segmentarg){ (((segmentarg_t *) segmentarg)->funct)( ((segmentarg_t *) segmentarg)->np, ((segmentarg_t *) segmentarg)->gl, ((segmentarg_t *) segmentarg)->theta, ((segmentarg_t *) segmentarg)->ls, ((segmentarg_t *) segmentarg)->le); return(NULL); } void find_musclvarscycle(np_t np, gl_t *gl, musclvarscycle_t musclvars){ find_musclvars(np,gl,musclvars); #ifdef _RESTIME_STORAGE_TRAPEZOIDAL_MUSCLVARS long flux; // for (flux=0; flux<nf; flux++) musclvars[nf+flux]=musclvars[flux]; for (flux=0; flux<nf; flux++) musclvars[nf+flux]=np.bs->trapezoidalm1[flux]; #endif } static void execute_function_on_all_segments(segmentarg_t *segmentarg, long numsegment, int SEGMENTWORK){ if ( #if !defined(POSIXTHREADS) && !defined(OPENMPTHREADS) TRUE #else (SEGMENTWORK==SEGMENTWORK_LIGHT && segmentarg[0].gl->NOSHORTTHREADS) #endif ){ long cnt; for (cnt=0; cnt<numsegment; cnt++){ segmentarg[cnt].funct(segmentarg[cnt].np,segmentarg[cnt].gl,segmentarg[cnt].theta,segmentarg[cnt].ls,segmentarg[cnt].le); } } else { #ifdef POSIXTHREADS long cnt; void *retval; pthread_t *pthread; pthread=(pthread_t *)malloc((numsegment+3)*sizeof(pthread_t)); for (cnt=0; cnt<numsegment; cnt++){ if (pthread_create(&((pthread)[cnt]), NULL, segmentfunct, (void *)(&(segmentarg[cnt])))) fatal_error("Cannot create thread."); } for (cnt=0; cnt<numsegment; cnt++){ if (pthread_join(pthread[cnt],&retval)) fatal_error("Cannot join thread %ld.",cnt); } free(pthread); #endif #ifdef OPENMPTHREADS long cnt; #pragma omp parallel for private(cnt) schedule(dynamic) for (cnt=0; cnt<numsegment; cnt++){ segmentarg[cnt].funct(segmentarg[cnt].np,segmentarg[cnt].gl,segmentarg[cnt].theta,segmentarg[cnt].ls,segmentarg[cnt].le); } #endif } } static void create_segments(np_t *np, gl_t *gl, long theta, long ls, long le, void funct(np_t *, gl_t *, long, long, long), segmentarg_t *segmentarg, long *cntsegment, bool COUNTFLAG, int TYPELEVEL, bool is_node_valid_local(np_t, int)){ long l,lm1,ls_local,le_local; bool INSIDE; l=ls; ls_local=ls; /* only needed to avoid compiler warning */ INSIDE=FALSE; do { lm1=l; l=_l_plus_one(l,gl,theta); if ((!INSIDE) && (is_node_valid_local(np[l],TYPELEVEL))) { ls_local=lm1; INSIDE=TRUE; } if ((INSIDE) && ((!is_node_valid_local(np[l],TYPELEVEL)) || (l==le))){ le_local=l; if (!COUNTFLAG) { segmentarg[*cntsegment].np=np; segmentarg[*cntsegment].gl=gl; segmentarg[*cntsegment].theta=theta; segmentarg[*cntsegment].ls=_l_plus_one(ls_local,gl,theta); segmentarg[*cntsegment].le=_l_minus_one(le_local,gl,theta); segmentarg[*cntsegment].funct=funct; } (*cntsegment)++; INSIDE=FALSE; } } while (l!=le); if (INSIDE) fatal_error("Problem setting up segments."); } void sweep_with_1D_segments(np_t *np, gl_t *gl, zone_t zone, void funct(np_t *, gl_t *, long, long, long), int sweeptype, int TYPELEVEL, bool is_node_valid_local(np_t, int), int SEGMENTWORK, int GRIDLEVEL){ long j,k,cntsegment,numthread; ifn1D( long i; ) segmentarg_t *segmentarg; int cnt; bool COUNTFLAG; numthread=0; assert(is_zone_in_zone(zone,gl->domain_all)); segmentarg=(segmentarg_t *)malloc(sizeof(segmentarg_t)); /* do this loop twice: the first time just to count.. */ for (cnt=0; cnt<2; cnt++){ if (cnt==0) COUNTFLAG=TRUE; else COUNTFLAG=FALSE; if (!COUNTFLAG) segmentarg=(segmentarg_t *)realloc(segmentarg,numthread*sizeof(segmentarg_t)); /* the first dimension loop */ if (sweeptype==SWEEPTYPE_IJK || sweeptype==SWEEPTYPE_I) { cntsegment=0; for_2DL(j,zone.js,zone.je){ if (mod(j-gl->domain_all.js,GRIDLEVEL)==0){ for_3DL(k,zone.ks,zone.ke){ if (mod(k-gl->domain_all.ks,GRIDLEVEL)==0){ create_segments(np,gl,0,_ai(gl,zone.is-1,j,k),_ai(gl,zone.ie+1,j,k), funct, segmentarg,&cntsegment, (bool)COUNTFLAG, TYPELEVEL,is_node_valid_local); if (cntsegment>=maxloopthread) { numthread=max(numthread,cntsegment); if (!COUNTFLAG) execute_function_on_all_segments(segmentarg,cntsegment,SEGMENTWORK); cntsegment=0; } } } } } if (cntsegment>0 && !COUNTFLAG) execute_function_on_all_segments(segmentarg,cntsegment,SEGMENTWORK); numthread=max(numthread,cntsegment); } /* the second dimension loop */ #ifdef _2DL if (sweeptype==SWEEPTYPE_IJK || sweeptype==SWEEPTYPE_J) { cntsegment=0; for_1DL(i,zone.is,zone.ie){ if (mod(i-gl->domain_all.is,GRIDLEVEL)==0){ for_3DL(k,zone.ks,zone.ke){ if (mod(k-gl->domain_all.ks,GRIDLEVEL)==0){ create_segments(np,gl,1,_ai(gl,i,zone.js-1,k),_ai(gl,i,zone.je+1,k), funct, segmentarg,&cntsegment,(bool)COUNTFLAG, TYPELEVEL,is_node_valid_local); if (cntsegment>=maxloopthread) { numthread=max(numthread,cntsegment); if (!COUNTFLAG) execute_function_on_all_segments(segmentarg,cntsegment,SEGMENTWORK); cntsegment=0; } } } } } if (cntsegment>0 && !COUNTFLAG) execute_function_on_all_segments(segmentarg,cntsegment,SEGMENTWORK); numthread=max(numthread,cntsegment); } #endif /* the third dimension loop */ #ifdef _3DL if (sweeptype==SWEEPTYPE_IJK || sweeptype==SWEEPTYPE_K) { cntsegment=0; for_1DL(i,zone.is,zone.ie){ if (mod(i-gl->domain_all.is,GRIDLEVEL)==0){ for_2DL(j,zone.js,zone.je){ if (mod(j-gl->domain_all.js,GRIDLEVEL)==0){ create_segments(np,gl,2,_ai(gl,i,j,zone.ks-1),_ai(gl,i,j,zone.ke+1), funct, segmentarg, &cntsegment,(bool)COUNTFLAG, TYPELEVEL,is_node_valid_local); if (cntsegment>=maxloopthread) { numthread=max(numthread,cntsegment); if (!COUNTFLAG) execute_function_on_all_segments(segmentarg,cntsegment,SEGMENTWORK); cntsegment=0; } } } } } if (cntsegment>0 && !COUNTFLAG) execute_function_on_all_segments(segmentarg,cntsegment,SEGMENTWORK); numthread=max(numthread,cntsegment); } #endif } free(segmentarg); } /* the following first sets the offset to 0, then 1, then -1 */ static long _node_offset_from_cnt(long cnt){ long offset; offset=0; if (cnt==0) offset=0; if (cnt==1) offset=1; if (cnt==2) offset=-1; return(offset); } void update_bdry_node(np_t *np, gl_t *gl, long l){ long dim,dimsgn,l_C,l_B,l_A,l_D; bool BDRYDIRECFOUND; #ifdef _2DL long offset1,offset2,cnt1,cnt2; #endif #ifdef _3D long offset3,cnt3; #endif bool UPDATED; assert(is_node_bdry(np[l],TYPELEVEL_FLUID_WORK)); UPDATED=FALSE; BDRYDIRECFOUND=find_bdry_direc(np, gl, l, TYPELEVEL_FLUID_WORK, &dim, &dimsgn); if (is_node_link(np[l],TYPELEVEL_FLUID_WORK)) { // in case the boundary node is a link, U has already been updated: simply update the prim variables find_prim_fluid(np, l, gl); UPDATED=TRUE; } if (BDRYDIRECFOUND && !UPDATED){ l_A=l; l_B=_al(gl,l,dim,dimsgn); l_C=_al(gl,l,dim,dimsgn*2); if (is_node_inner(np[_al(gl,l,dim,dimsgn*3)],TYPELEVEL_FLUID_WORK)) l_D=_al(gl,l,dim,dimsgn*3); else l_D=l_C; assert(is_node_inner(np[l_C],TYPELEVEL_FLUID_WORK)); assert(is_node_inner(np[l_B],TYPELEVEL_FLUID_WORK)); update_bdry_fluid(np,gl,l_A,l_B,l_C,l_D,dim,dimsgn,BDRYDIRECFOUND,TYPELEVEL_FLUID_WORK); UPDATED=TRUE; } /* now, do the corners */ if (!UPDATED) { #ifdef _2D for (cnt1=0; cnt1<=2; cnt1++){ for (cnt2=0; cnt2<=2; cnt2++){ offset1=_node_offset_from_cnt(cnt1); offset2=_node_offset_from_cnt(cnt2); l_C=_all(gl,l,0,offset1*2,1,offset2*2); l_B=_all(gl,l,0,offset1,1,offset2); l_A=l; l_D=l_C; if ( is_node_inner(np[l_B],TYPELEVEL_FLUID_WORK) && is_node_inner(np[l_C],TYPELEVEL_FLUID_WORK) && !UPDATED){ update_bdry_fluid(np,gl,l_A,l_B,l_C,l_D,dim,dimsgn,BDRYDIRECFOUND,TYPELEVEL_FLUID_WORK); UPDATED=TRUE; } } } #endif #ifdef _3D for (cnt1=0; cnt1<=2; cnt1++){ for (cnt2=0; cnt2<=2; cnt2++){ for (cnt3=0; cnt3<=2; cnt3++){ offset1=_node_offset_from_cnt(cnt1); offset2=_node_offset_from_cnt(cnt2); offset3=_node_offset_from_cnt(cnt3); l_C=_al(gl, _al(gl, _al(gl,l,0,offset1*2), 1,offset2*2), 2,offset3*2); l_B=_al(gl, _al(gl, _al(gl,l,0,offset1), 1,offset2), 2,offset3); l_A=l; l_D=l_C; if ( is_node_inner(np[l_B],TYPELEVEL_FLUID_WORK) && is_node_inner(np[l_C],TYPELEVEL_FLUID_WORK) && !UPDATED){ update_bdry_fluid(np,gl,l_A,l_B,l_C,l_D,dim,dimsgn,BDRYDIRECFOUND,TYPELEVEL_FLUID_WORK); UPDATED=TRUE; } } } } #endif } if (!UPDATED) { fatal_error("Problem updating boundary node in update_bdry_node() function."); } } void update_bdry_nodes_on_segment(np_t *np, gl_t *gl, long theta, long ls, long le){ long l; for (l=ls; l!=_l_plus_one(le,gl,theta); l=_l_plus_one(l,gl,theta)){ if (is_node_bdry(np[l],TYPELEVEL_FLUID_WORK)){ thread_lock_node_set(np,l,THREADTYPE_ZONE); update_bdry_node(np, gl, l); thread_lock_node_unset(np,l,THREADTYPE_ZONE); } } } void update_bdry_nodes(np_t *np, gl_t *gl, zone_t zone){ sweep_with_1D_segments(np, gl, zone, &update_bdry_nodes_on_segment, SWEEPTYPE_I, TYPELEVEL_FLUID_WORK,&is_node_valid,SEGMENTWORK_LIGHT,GRIDLEVEL_ONE); } #ifdef DISTMPI #define numfluidvars (nf+1+max(0,hbw_resconv_fluid-1)*nmc) #define numlinkvars ((hbw_resconv_fluid-1)*nmc) #define DOUBLE_INT_MAX 100000000000000 typedef double sendvars_t[max(nfe,numfluidvars)]; typedef struct { sendvars_t vars; int proc; long l; bool SENT; } sendnode_t; void update_linked_nodes_2(np_t *np, gl_t *gl, int TYPELEVEL){ int rankrecv,numproc,ranksend,thisrank; long i,j,k; zone_t zonesend,zonerecv,zone; MPI_Status MPI_Status1; MPI_Comm_rank(MPI_COMM_WORLD, &thisrank); MPI_Comm_size(MPI_COMM_WORLD, &numproc); /* here we need to mpi the linkmusclvars */ for (ranksend=0; ranksend<numproc; ranksend++){ zonesend=_domain_from_rank(ranksend,gl); for (rankrecv=0; rankrecv<numproc; rankrecv++){ if (rankrecv!=ranksend && (ranksend==thisrank || rankrecv==thisrank)){ zonerecv=_domain_lim_from_rank(rankrecv,gl); if (is_zone_intersecting_zone(zonesend,zonerecv)){ zone=_zone_intersection(zonesend,zonerecv); for_ijk(zone,is,js,ks,ie,je,ke){ if (ranksend==thisrank) { // if (is_node_link(np[_ai(gl,i,j,k)],TYPELEVEL_FLUID_WORK)) printf("x"); if (is_node_link(np[_ai(gl,i,j,k)],TYPELEVEL_FLUID_WORK) && is_node_bdry(np[_ai(gl,i,j,k)],TYPELEVEL_FLUID_WORK)){ assert(np[_ai(gl,i,j,k)].numlinkmusclvars!=0); assert(np[_ai(gl,i,j,k)].linkmusclvars!=NULL); MPI_Send(&np[_ai(gl,i,j,k)].numlinkmusclvars,1,MPI_INT,rankrecv,0,MPI_COMM_WORLD); MPI_Send(np[_ai(gl,i,j,k)].linkmusclvars,numlinkvars,MPI_DOUBLE,rankrecv,0,MPI_COMM_WORLD); } } if (rankrecv==thisrank) { if (is_node_link(np[_ai(gl,i,j,k)],TYPELEVEL_FLUID_WORK) && is_node_bdry(np[_ai(gl,i,j,k)],TYPELEVEL_FLUID_WORK)){ MPI_Recv(&np[_ai(gl,i,j,k)].numlinkmusclvars,1,MPI_INT,ranksend,0,MPI_COMM_WORLD,&MPI_Status1); assert(np[_ai(gl,i,j,k)].linkmusclvars!=NULL); MPI_Recv(np[_ai(gl,i,j,k)].linkmusclvars,numlinkvars,MPI_DOUBLE,ranksend,0,MPI_COMM_WORLD,&MPI_Status1); } } } } } } } MPI_Barrier(MPI_COMM_WORLD); } void update_linked_nodes(np_t *np, gl_t *gl, int TYPELEVEL){ long i,j,k,l1,l2,flux,offset,l,cntlink; MPI_Status MPI_Status1; musclvarscycle_t musclvars; sendvars_t mpivars; int thisrank,numproc,rank2,rank1,thisproc; int packsize,buffersize,bbuffersize; double *buffer,*bbuffer; sendnode_t *sendnode; long numsendvars,numvars,numsend,cntsend,cnt; double *sendvars; int *recvproc; int cntproc; zone_t zone; zone=gl->domain; switch (TYPELEVEL){ case TYPELEVEL_FLUID: numvars=numfluidvars; break; case TYPELEVEL_FLUID_WORK: numvars=numfluidvars; break; #ifdef EMFIELD case TYPELEVEL_EMFIELD: numvars=nfe; break; #endif default: fatal_error("TYPELEVEL can not be set to %d.\n",TYPELEVEL); numvars=0; } sendnode=(sendnode_t *)malloc(sizeof(sendnode_t)); sendvars=(double *)malloc(sizeof(double)); cntsend=0; MPI_Comm_rank(MPI_COMM_WORLD, &thisrank); MPI_Comm_size(MPI_COMM_WORLD, &numproc); MPI_Pack_size( 1, MPI_DOUBLE, MPI_COMM_WORLD, &packsize ); recvproc=(int *)malloc((numproc+2)*sizeof(int)); buffersize = min(INT_MAX,nmc*(zone.ie-zone.is)*(zone.je-zone.js)if3DL(*(zone.ke-zone.ks)) * (MPI_BSEND_OVERHEAD + packsize)); buffer = (double *)malloc( buffersize ); MPI_Buffer_attach( buffer, buffersize ); for_ijk(zone,is,js,ks,ie,je,ke){ np[_al(gl,i,j,k)].numlinkmusclvars=0; } /* first send the packets */ cntsend=0; for_ijk(zone,is,js,ks,ie,je,ke){ if (is_node_link(np[_ai(gl,i,j,k)],TYPELEVEL)){ #ifdef _CYCLE_MULTIZONE fatal_error("Linked nodes can not be used with Multizone cycle yet. Need to update update_linked_nodes() function."); #endif #ifdef _CYCLE_MULTIZONE_MARCHING fatal_error("Linked nodes can not be used with MultizoneMarching cycle yet. Need to update update_linked_nodes() function."); #endif if (is_node_inner(np[_ai(gl,i,j,k)],TYPELEVEL)){ for (cntlink=0; cntlink<_num_node_link(np[_ai(gl,i,j,k)],TYPELEVEL); cntlink++){ l1=_ai_all(gl,i,j,k); l2=_node_link(np[_ai(gl,i,j,k)],cntlink,TYPELEVEL); rank1=_node_rank(gl, i, j, k); rank2=_node_rank(gl, _i_all(l2,gl,0), _i_all(l2,gl,1), _i_all(l2,gl,2)); if (rank1==thisrank) { if (TYPELEVEL==TYPELEVEL_FLUID_WORK || TYPELEVEL==TYPELEVEL_FLUID){ for (flux=0; flux<nf; flux++) mpivars[flux]=np[_l_from_l_all(gl,l1)].bs->U[flux]; mpivars[nf]=(double)_nodes_between_link_and_bdry_limited(np, gl, _l_from_l_all(gl,l1), l2, TYPELEVEL, max(0,hbw_resconv_fluid-1)); for (offset=1; offset<hbw_resconv_fluid; offset++) { // find_prim_fluid(np, _al_link(np, gl, _l_from_l_all(gl,l1), offset, TYPELEVEL), gl); find_musclvarscycle(np[_al_link(np, gl, _l_from_l_all(gl,l1), l2, offset, TYPELEVEL)], gl, musclvars); for (flux=0; flux<nmc; flux++) mpivars[1+flux+nf+(offset-1)*nmc]=musclvars[flux]; } if (rank1!=rank2){ for (flux=0; flux<numvars; flux++) sendnode[cntsend].vars[flux]=mpivars[flux]; sendnode[cntsend].proc=(int)rank2; sendnode[cntsend].l=l2; sendnode[cntsend].SENT=FALSE; cntsend++; sendnode=(sendnode_t *)realloc(sendnode,(cntsend+1)*sizeof(sendnode_t)); } else { /* no need to send with MPI*/ //printf("\n --(%ld,%ld,%ld) %d",i,j,k,thisrank); l=_l_from_l_all(gl,l2); for (flux=0; flux<nf; flux++) np[l].bs->U[flux]=mpivars[flux]; assert(np[l].linkmusclvars!=NULL); assert(is_node_bdry(np[l],TYPELEVEL)); assert(is_node_link(np[l],TYPELEVEL)); np[l].numlinkmusclvars=(short)round(mpivars[nf]); for (offset=1; offset<hbw_resconv_fluid; offset++) { for (flux=0; flux<nmc; flux++) np[l].linkmusclvars[flux+(offset-1)*nmc]=mpivars[1+flux+nf+(offset-1)*nmc]; } } } #ifdef EMFIELD if (TYPELEVEL==TYPELEVEL_EMFIELD){ for (flux=0; flux<numvars; flux++) mpivars[flux]=np[_l_from_l_all(gl,l1)].bs->Uemfield[flux]; if (rank1!=rank2) { for (flux=0; flux<numvars; flux++) sendnode[cntsend].vars[flux]=mpivars[flux]; sendnode[cntsend].proc=(int)rank2; sendnode[cntsend].l=l2; sendnode[cntsend].SENT=FALSE; cntsend++; sendnode=(sendnode_t *)realloc(sendnode,(cntsend+1)*sizeof(sendnode_t)); } else { /* no need to send with MPI */ for (flux=0; flux<nfe; flux++) np[_l_from_l_all(gl,l2)].bs->Uemfield[flux]=mpivars[flux]; } } #endif } } } } } numsend=cntsend; /* send nodes in block one proc at a time */ do { thisproc=-1; numsendvars=0; for (cntsend=0; cntsend<numsend; cntsend++){ if (thisproc==-1 && !sendnode[cntsend].SENT) thisproc=sendnode[cntsend].proc; if (sendnode[cntsend].proc==thisproc){ assert(!sendnode[cntsend].SENT); sendvars=(double *)realloc(sendvars,(numsendvars+2*numvars)*sizeof(double)); for (flux=0; flux<numvars; flux++) sendvars[numsendvars+flux]=sendnode[cntsend].vars[flux]; numsendvars+=numvars; #ifndef NDEBUG sendvars[numsendvars]=(double)mod(sendnode[cntsend].l,DOUBLE_INT_MAX); numsendvars++; #endif sendnode[cntsend].SENT=TRUE; } } if (thisproc!=-1){ if (MPI_Bsend(&numsendvars,1,MPI_LONG,thisproc,0,MPI_COMM_WORLD)!=MPI_SUCCESS) fatal_error("Problem with MPI_Bsend in update_linked_nodes()."); if (MPI_Bsend(sendvars,numsendvars,MPI_DOUBLE,thisproc,0,MPI_COMM_WORLD)!=MPI_SUCCESS) fatal_error("Problem with MPI_Bsend in update_linked_nodes()."); } } while (thisproc!=-1); for (cnt=0; cnt<(numproc+2); cnt++){ recvproc[cnt]=-1; } for_ijk(zone,is,js,ks,ie,je,ke){ if (is_node_link(np[_ai(gl,i,j,k)],TYPELEVEL) && is_node_bdry(np[_ai(gl,i,j,k)],TYPELEVEL)){ l1=_node_link(np[_ai(gl,i,j,k)],0,TYPELEVEL); rank2=_node_rank(gl, i, j, k); rank1=_node_rank(gl, _i_all(l1,gl,0), _i_all(l1,gl,1), _i_all(l1,gl,2)); if (rank1!=rank2 && rank2==thisrank){ /* rank1 is one process that we will need to get data from; store it in recvproc */ cntproc=0; while(recvproc[cntproc]!=-1 && recvproc[cntproc]!=rank1 ) { cntproc++; } assert(cntproc<numproc); recvproc[cntproc]=rank1; } } } cntproc=0; while (recvproc[cntproc]!=-1) { thisproc=recvproc[cntproc]; MPI_Recv(&numsendvars,1,MPI_LONG,thisproc,0,MPI_COMM_WORLD,&MPI_Status1); sendvars=(double *)realloc(sendvars,numsendvars*sizeof(double)); MPI_Recv(sendvars,numsendvars,MPI_DOUBLE,thisproc,0,MPI_COMM_WORLD,&MPI_Status1); cntsend=0; for_ijk(zone,is,js,ks,ie,je,ke){ if (is_node_link(np[_ai(gl,i,j,k)],TYPELEVEL) && is_node_bdry(np[_ai(gl,i,j,k)],TYPELEVEL)){ l2=_ai_all(gl,i,j,k); assert(is_node_bdry(np[_ai(gl,i,j,k)],TYPELEVEL)); l1=_node_link(np[_ai(gl,i,j,k)],0,TYPELEVEL); rank2=_node_rank(gl, i, j, k); rank1=_node_rank(gl, _i_all(l1,gl,0), _i_all(l1,gl,1), _i_all(l1,gl,2)); if (rank1!=rank2 && rank2==thisrank){ if (thisproc==rank1){ for (flux=0; flux<numvars; flux++) mpivars[flux]=sendvars[cntsend+flux]; cntsend+=numvars; #ifndef NDEBUG assert(mod(l2,DOUBLE_INT_MAX)==(long)sendvars[cntsend]); cntsend++; #endif l=_l_from_l_all(gl,l2); assert(is_node_bdry(np[l],TYPELEVEL)); assert(is_node_link(np[l],TYPELEVEL)); if (TYPELEVEL==TYPELEVEL_FLUID_WORK || TYPELEVEL==TYPELEVEL_FLUID){ for (flux=0; flux<nf; flux++) np[l].bs->U[flux]=mpivars[flux]; assert(np[l].linkmusclvars!=NULL); np[l].numlinkmusclvars=(short)round(mpivars[nf]); for (offset=1; offset<hbw_resconv_fluid; offset++) { for (flux=0; flux<nmc; flux++) np[l].linkmusclvars[flux+(offset-1)*nmc]=mpivars[1+flux+nf+(offset-1)*nmc]; } } #ifdef EMFIELD if (TYPELEVEL==TYPELEVEL_EMFIELD){ for (flux=0; flux<nfe; flux++) np[l].bs->Uemfield[flux]=mpivars[flux]; } #endif } } } } cntproc++; } MPI_Buffer_detach( &bbuffer, &bbuffersize ); free(buffer); MPI_Barrier(MPI_COMM_WORLD); free(sendnode); free(recvproc); free(sendvars); update_linked_nodes_2(np, gl, TYPELEVEL); } #else//DISTMPI void update_linked_nodes(np_t *np, gl_t *gl, int TYPELEVEL){ long i,j,k,l1,l2,flux; for_ijk(gl->domain,is,js,ks,ie,je,ke){ l1=_ai(gl,i,j,k); if (is_node_bdry(np[l1],TYPELEVEL) && is_node_link(np[l1],TYPELEVEL)){ #ifdef _CYCLE_MULTIZONE fatal_error("Linked nodes can not be used with Multizone cycle yet. Need to update update_linked_nodes() function."); #endif #ifdef _CYCLE_MULTIZONE_MARCHING fatal_error("Linked nodes can not be used with MultizoneMarching cycle yet. Need to update update_linked_nodes() function."); #endif assert(is_node_bdry(np[l1],TYPELEVEL)); l2=_node_link(np[l1],0,TYPELEVEL); if (TYPELEVEL==TYPELEVEL_FLUID_WORK || TYPELEVEL==TYPELEVEL_FLUID){ for (flux=0; flux<nf; flux++) np[l1].bs->U[flux]=np[l2].bs->U[flux]; } #ifdef EMFIELD if (TYPELEVEL==TYPELEVEL_EMFIELD){ for (flux=0; flux<nfe; flux++) np[l1].bs->Uemfield[flux]=np[l2].bs->Uemfield[flux]; } #endif } } } #endif//DISTMPI static bool is_node_in_region(bool(*FUNCT)(gl_t *, long, long, long), gl_t *gl, long i, long j, long k){ bool tmp; tmp=FUNCT(gl,i,j,k); return(tmp); } static bool is_node_in_region_extended_by_bb(bool(*FUNCT)(gl_t *, long, long, long), gl_t *gl, long i, long j, long k){ bool tmp; long cnti,cntj,cntk; tmp=FALSE; for_1DL(cnti,i-hbw_bdry_fluid,i+hbw_bdry_fluid){ for_2DL(cntj,j-hbw_bdry_fluid,j+hbw_bdry_fluid){ for_3DL(cntk,k-hbw_bdry_fluid,k+hbw_bdry_fluid){ if (FUNCT(gl,cnti,cntj,cntk)) tmp=TRUE; } } } return(tmp); } void resume_nodes_specified_in_function(np_t *np, gl_t *gl, bool(*FUNCT)(gl_t *, long, long, long)){ long i,j,k; long *noderes; long *bdryres; long numnoderes,numbdryres,cnt; copy_base_to_work_node_type(np,gl,gl->domain_lim); noderes=(long *)malloc((gl->domain.ie-gl->domain.is+4)if2DL(*(gl->domain.je-gl->domain.js+4)) if3DL(*(gl->domain.ke-gl->domain.ks+4))*sizeof(long)); bdryres=(long *)malloc((gl->domain.ie-gl->domain.is+4)if2DL(*(gl->domain.je-gl->domain.js+4)) if3DL(*(gl->domain.ke-gl->domain.ks+4))*sizeof(long)); numnoderes=0; numbdryres=0; for_ijk(gl->domain,is-1,js-1,ks-1,ie+1,je+1,ke+1){ if (is_node_valid(np[_ai(gl,i,j,k)],TYPELEVEL_FLUID) && is_node_in_region_extended_by_bb(FUNCT,gl,i,j,k)) { if (resume_node(&(np[_ai(gl,i,j,k)])) ) { if (is_node_in_region(FUNCT,gl,i,j,k) ){ bdryres[numbdryres]=_ai(gl,i,j,k); numbdryres++; } noderes[numnoderes]=_ai(gl,i,j,k); numnoderes++; } } else { suspend_node(&(np[_ai(gl,i,j,k)])); } } /* rebuild the working variables of the inner nodes of the nodes resumed*/ for (cnt=0; cnt<numnoderes; cnt++){ if (is_node_resumed(np[noderes[cnt]]) && is_node_inner(np[noderes[cnt]],TYPELEVEL_FLUID)){ find_prim_fluid(np,noderes[cnt],gl); } } /* rebuild the working variables of the boundary nodes of the nodes resumed*/ for (cnt=0; cnt<numbdryres; cnt++){ if (is_node_resumed(np[bdryres[cnt]]) && is_node_bdry(np[bdryres[cnt]],TYPELEVEL_FLUID)) { update_bdry_node(np,gl,bdryres[cnt]); } } /* suspend all nodes needed only to compute the boundary nodes. this is necessary to ensure that all non-suspended nodes are properly updated.*/ for_ijk(gl->domain,is-1,js-1,ks-1,ie+1,je+1,ke+1){ if (!(is_node_valid(np[_ai(gl,i,j,k)],TYPELEVEL_FLUID) && is_node_in_region(FUNCT,gl,i,j,k))) suspend_node(&(np[_ai(gl,i,j,k)])); } free(noderes); free(bdryres); } void resume_nodes_only_in_zone_and_update_bdry_nodes(np_t *np, gl_t *gl, zone_t zone){ long i,j,k; long *noderes; long *bdryres; long numnoderes,numbdryres,cnt; copy_base_to_work_node_type(np,gl,gl->domain_lim); noderes=(long *)malloc((gl->domain_lim.ie-gl->domain_lim.is+1) if2DL(*(gl->domain_lim.je-gl->domain_lim.js+1)) if3DL(*(gl->domain_lim.ke-gl->domain_lim.ks+1))*sizeof(long)); bdryres=(long *)malloc((gl->domain_lim.ie-gl->domain_lim.is+1) if2DL(*(gl->domain_lim.je-gl->domain_lim.js+1)) if3DL(*(gl->domain_lim.ke-gl->domain_lim.ks+1))*sizeof(long)); numnoderes=0; numbdryres=0; for_ijk(gl->domain_lim,is,js,ks,ie,je,ke){ if (is_node_valid(np[_ai(gl,i,j,k)],TYPELEVEL_FLUID) && (i>=zone.is-hbw_bdry_fluid) && (i<=zone.ie+hbw_bdry_fluid) if2DL(&& (j>=zone.js-hbw_bdry_fluid) && (j<=zone.je+hbw_bdry_fluid)) if3DL(&& (k>=zone.ks-hbw_bdry_fluid) && (k<=zone.ke+hbw_bdry_fluid))) { if (resume_node(&(np[_ai(gl,i,j,k)])) ) { if (is_node_in_zone(i,j,k,zone)){ bdryres[numbdryres]=_ai(gl,i,j,k); numbdryres++; } noderes[numnoderes]=_ai(gl,i,j,k); numnoderes++; } } else { suspend_node(&(np[_ai(gl,i,j,k)])); } } /* rebuild the working variables of the inner nodes of the nodes resumed*/ #ifdef OPENMPTHREADS #pragma omp parallel for private(cnt) schedule(dynamic) #endif for (cnt=0; cnt<numnoderes; cnt++){ if (is_node_resumed(np[noderes[cnt]]) && is_node_inner(np[noderes[cnt]],TYPELEVEL_FLUID)){ find_prim_fluid(np,noderes[cnt],gl); } } free(noderes); /* rebuild the working variables of the boundary nodes of the nodes resumed*/ #ifdef OPENMPTHREADS #pragma omp parallel for private(cnt) schedule(dynamic) #endif for (cnt=0; cnt<numbdryres; cnt++){ if (is_node_resumed(np[bdryres[cnt]]) && is_node_bdry(np[bdryres[cnt]],TYPELEVEL_FLUID)) { find_ijk_from_l(gl, bdryres[cnt], &i, &j, &k); if (is_node_in_zone(i, j, k, gl->domain)){ update_bdry_node(np,gl,bdryres[cnt]); } else { find_prim_fluid(np,bdryres[cnt],gl); } } } free(bdryres); /* suspend all nodes needed only to compute the boundary nodes. this is necessary to ensure that all non-suspended nodes are properly updated.*/ for_ijk(gl->domain_lim,is,js,ks,ie,je,ke){ if (!(is_node_valid(np[_ai(gl,i,j,k)],TYPELEVEL_FLUID) && is_node_in_zone(i,j,k,zone))) suspend_node(&(np[_ai(gl,i,j,k)])); } } void resume_nodes_in_zone(np_t *np, gl_t *gl, zone_t zone){ long i,j,k; long *noderes; long numnoderes,cnt; zone_t zoneint; copy_base_to_work_node_type(np,gl,gl->domain_lim); noderes=(long *)malloc((gl->domain_lim.ie-gl->domain_lim.is+1) if2DL(*(gl->domain_lim.je-gl->domain_lim.js+1)) if3DL(*(gl->domain_lim.ke-gl->domain_lim.ks+1))*sizeof(long)); numnoderes=0; zoneint=_zone_intersection(gl->domain_lim,zone); for_ijk(zoneint,is,js,ks,ie,je,ke){ if (is_node_valid(np[_ai(gl,i,j,k)],TYPELEVEL_FLUID)) { if (resume_node(&(np[_ai(gl,i,j,k)])) ) { noderes[numnoderes]=_ai(gl,i,j,k); numnoderes++; } } } /* rebuild the working variables of the inner nodes of the nodes resumed*/ #ifdef OPENMPTHREADS #pragma omp parallel for private(cnt) schedule(dynamic) #endif for (cnt=0; cnt<numnoderes; cnt++){ if (is_node_resumed(np[noderes[cnt]]) && is_node_valid(np[noderes[cnt]],TYPELEVEL_FLUID)){ find_prim_fluid(np,noderes[cnt],gl); } } free(noderes); } void resume_nodes_only_in_zone(np_t *np, gl_t *gl, zone_t zone){ long i,j,k; long *noderes; long numnoderes,cnt; copy_base_to_work_node_type(np,gl,gl->domain_lim); noderes=(long *)malloc((gl->domain_lim.ie-gl->domain_lim.is+1) if2DL(*(gl->domain_lim.je-gl->domain_lim.js+1)) if3DL(*(gl->domain_lim.ke-gl->domain_lim.ks+1))*sizeof(long)); numnoderes=0; for_ijk(gl->domain_lim,is,js,ks,ie,je,ke){ if (is_node_valid(np[_ai(gl,i,j,k)],TYPELEVEL_FLUID) && (i>=zone.is) && (i<=zone.ie) if2DL(&& (j>=zone.js) && (j<=zone.je)) if3DL(&& (k>=zone.ks) && (k<=zone.ke))) { if (resume_node(&(np[_ai(gl,i,j,k)])) ) { noderes[numnoderes]=_ai(gl,i,j,k); numnoderes++; } } else { suspend_node(&(np[_ai(gl,i,j,k)])); } } /* rebuild the working variables of the inner nodes of the nodes resumed*/ #ifdef OPENMPTHREADS #pragma omp parallel for private(cnt) schedule(dynamic) #endif for (cnt=0; cnt<numnoderes; cnt++){ if (is_node_resumed(np[noderes[cnt]]) && is_node_valid(np[noderes[cnt]],TYPELEVEL_FLUID)){ find_prim_fluid(np,noderes[cnt],gl); } } free(noderes); /* suspend all nodes needed only to compute the boundary nodes. this is necessary to ensure that all non-suspended nodes are properly updated.*/ for_ijk(gl->domain_lim,is,js,ks,ie,je,ke){ if (!(is_node_valid(np[_ai(gl,i,j,k)],TYPELEVEL_FLUID) && is_node_in_zone(i,j,k,zone))) suspend_node(&(np[_ai(gl,i,j,k)])); } } #ifdef UNSTEADY void increase_time_level(np_t *np, gl_t *gl){ long i,j,k,flux,l; gl->time+=gl->dt; gl->iter=0; add_double_to_codex(&(gl->cycle.codex),"time",gl->time); for_ijk(gl->domain_lim,is,js,ks,ie,je,ke){ l=_ai(gl,i,j,k); if ((is_node_valid(np[_ai(gl,i,j,k)],TYPELEVEL_FLUID))){ for (flux=0; flux<nf; flux++){ #if _RESTIME_BW > 3 np[l].bs->Um3[flux]=np[l].bs->Um2[flux]; #endif #if _RESTIME_BW > 2 np[l].bs->Um2[flux]=np[l].bs->Um1[flux]; #endif np[l].bs->Um1[flux]=np[l].bs->U[flux]; #ifdef _RESTIME_STORAGE_TRAPEZOIDAL_RESIDUAL np[l].bs->trapezoidalm1[flux]=np[l].bs->trapezoidalm1_next[flux]; #endif } #ifdef _RESTIME_STORAGE_TRAPEZOIDAL_MUSCLVARS find_musclvars(np[l],gl,np[l].bs->trapezoidalm1); #endif } #ifdef EMFIELD if ((is_node_valid(np[_ai(gl,i,j,k)],TYPELEVEL_EMFIELD))){ for (flux=0; flux<nfe; flux++){ np[l].bs->Uemfieldm1[flux]=np[l].bs->Uemfield[flux]; } } #endif } } #endif//UNSTEADY void runtime_actions(char *actionname, char **argum, SOAP_codex_t *codex){ char *oldfilename; oldfilename=(char *)malloc(sizeof(char)*(5+strlen((((readcontrolarg_t *)codex->action_args)->gl->output_filename)))); strcpy(oldfilename,(((readcontrolarg_t *)codex->action_args)->gl->output_filename)); if (strcmp(actionname,"WriteDataFile")==0) { if (SOAP_number_argums(*argum)==1){ SOAP_substitute_all_argums(argum, codex); SOAP_get_argum_string(codex,&(((readcontrolarg_t *)codex->action_args)->gl->output_filename),*argum,0); } if (SOAP_number_argums(*argum)>1){ SOAP_fatal_error(codex,"Action WriteDataFile() can not be called with more than 1 argument. Either it is called with one argument (a string containing the data file name) or with no argument. If no argument is given, the default data file name as specified on the command line will be used."); } write_data_file(*((readcontrolarg_t *)codex->action_args)->np, ((readcontrolarg_t *)codex->action_args)->gl); codex->ACTIONPROCESSED=TRUE; } strcpy((((readcontrolarg_t *)codex->action_args)->gl->output_filename),oldfilename); free(oldfilename); if (strcmp(actionname,"Init")==0) { read_init(*argum, codex); codex->action=&runtime_actions; ((readcontrolarg_t *)codex->action_args)->gl->RESIDUAL_ALTERED=TRUE; #ifdef EMFIELD ((readcontrolarg_t *)codex->action_args)->gl->RESIDUAL_ALTERED_EMFIELD=TRUE; #endif codex->ACTIONPROCESSED=TRUE; } if (strcmp(actionname,"Model")==0) { read_model(*argum, codex); codex->action=&runtime_actions; ((readcontrolarg_t *)codex->action_args)->gl->RESIDUAL_ALTERED=TRUE; #ifdef EMFIELD ((readcontrolarg_t *)codex->action_args)->gl->RESIDUAL_ALTERED_EMFIELD=TRUE; #endif codex->ACTIONPROCESSED=TRUE; } if (strcmp(actionname,"Disc")==0) { read_disc(*argum, codex); codex->action=&runtime_actions; ((readcontrolarg_t *)codex->action_args)->gl->RESIDUAL_ALTERED=TRUE; #ifdef EMFIELD ((readcontrolarg_t *)codex->action_args)->gl->RESIDUAL_ALTERED_EMFIELD=TRUE; #endif codex->ACTIONPROCESSED=TRUE; } if (strcmp(actionname,"Cycle")==0) { read_cycle(*argum, codex); codex->action=&runtime_actions; codex->ACTIONPROCESSED=TRUE; } runtime_actions_cycle_specific(actionname,argum,codex); } void write_cycle_template(FILE **controlfile){ wfprintf(*controlfile, "\n\n" "Cycle(\n" ); write_cycle_fluid_template(controlfile); #ifdef EMFIELD write_cycle_emfield_template(controlfile); #endif write_runtime_template(controlfile); wfprintf(*controlfile, ");\n" ); } void read_cycle_actions(char *actionname, char **argum, SOAP_codex_t *codex){ gl_t *gl; gl=((readcontrolarg_t *)codex->action_args)->gl; if (strcmp(actionname,_CYCLE_ACTIONNAME)==0 && !gl->CONTROL_READ) { if (((readcontrolarg_t *)codex->action_args)->VERBOSE) wfprintf(stdout,"%s..",_CYCLE_ACTIONNAME); ((readcontrolarg_t *)codex->action_args)-> gl->cycle.code_runtime=(char *)malloc((strlen(*argum)+2)*sizeof(char)); strcpy(((readcontrolarg_t *)codex->action_args)->gl->cycle.code_runtime,*argum); ((readcontrolarg_t *)codex->action_args)->gl->cycle.RUNTIMEMODULEFOUND=TRUE; codex->ACTIONPROCESSED=TRUE; } read_cycle_fluid_actions(actionname, argum, codex); read_cycle_emfield_actions(actionname, argum, codex); } void read_cycle(char *argum, SOAP_codex_t *codexcontrol){ gl_t *gl; gl=((readcontrolarg_t *)codexcontrol->action_args)->gl; if (!gl->CONTROL_READ){ gl->cycle.RUNTIMEMODULEFOUND=FALSE; } codexcontrol->action=&read_cycle_actions; SOAP_process_code(argum, codexcontrol, SOAP_VARS_KEEP_ALL); if (!gl->CONTROL_READ){ if (!gl->CYCLE_FLUID_READ) fatal_error("The fluid module %s() was not found within Cycle().",_FLUID_ACTIONNAME); if (!gl->CYCLE_EMFIELD_READ) fatal_error("The emfield module %s() was not found within Cycle().",_EMFIELD_ACTIONNAME); if (!gl->cycle.RUNTIMEMODULEFOUND) fatal_error("The module %s() was not found within Cycle().",_CYCLE_ACTIONNAME); init_cycle(argum,codexcontrol); } } void write_disc_template(FILE **controlfile){ wfprintf(*controlfile, "\n\n" "Disc(\n" ); write_disc_fluid_template(controlfile); #ifdef EMFIELD write_disc_emfield_template(controlfile); #endif write_disc_resconv_template(controlfile); write_disc_restime_template(controlfile); wfprintf(*controlfile, ");\n" ); } void read_disc_actions(char *actionname, char **argum, SOAP_codex_t *codex){ // gl_t *gl; // gl=((readcontrolarg_t *)codex->action_args)->gl; read_disc_fluid_actions(actionname, argum, codex); read_disc_emfield_actions(actionname, argum, codex); read_disc_resconv_actions(actionname, argum, codex); read_disc_restime_actions(actionname, argum, codex); } void read_disc(char *argum, SOAP_codex_t *codexcontrol){ gl_t *gl; gl=((readcontrolarg_t *)codexcontrol->action_args)->gl; codexcontrol->action=&read_disc_actions; gl->DISC_FLUID_READ=FALSE; gl->DISC_EMFIELD_READ=FALSE; gl->DISC_RESCONV_READ=FALSE; gl->DISC_RESTIME_READ=FALSE; SOAP_process_code(argum, codexcontrol, SOAP_VARS_KEEP_ALL); if (!gl->CONTROL_READ){ if (!gl->DISC_FLUID_READ) fatal_error("The fluid module %s() was not found within Disc().",_FLUID_ACTIONNAME); if (!gl->DISC_EMFIELD_READ) fatal_error("The emfield module %s() was not found within Disc().",_EMFIELD_ACTIONNAME); if (!gl->DISC_RESCONV_READ) fatal_error("The residual convection module %s() was not found within Disc().",_RESCONV_ACTIONNAME); if (!gl->DISC_RESTIME_READ) fatal_error("The residual time module %s() was not found within Disc().",_RESTIME_ACTIONNAME); } } #ifdef DISTMPI /* not used anymore */ void MPI_Allreduce_Sum_Cliplist(char **cliplist_str){ int rank,numproc,proc,thiscliplist_len; char *cliplistmem_str,*thiscliplist_str; cliplistmem_str=(char *)malloc((strlen(*cliplist_str)+10)*sizeof(char)); strcpy(cliplistmem_str,*cliplist_str); thiscliplist_str=(char *)malloc(sizeof(char)); strcpy(*cliplist_str,""); MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &numproc); for (proc=0; proc<numproc; proc++){ if (proc==rank) { thiscliplist_len=strlen(cliplistmem_str); thiscliplist_str=(char *)realloc(thiscliplist_str,sizeof(char)*(thiscliplist_len+1)); strcpy(thiscliplist_str,cliplistmem_str); } MPI_Bcast(&thiscliplist_len,1,MPI_INT,proc,MPI_COMM_WORLD); thiscliplist_str=(char *)realloc(thiscliplist_str,sizeof(char)*(thiscliplist_len+1)); MPI_Bcast(thiscliplist_str,thiscliplist_len+1,MPI_CHAR,proc,MPI_COMM_WORLD); *cliplist_str=(char *)realloc(*cliplist_str,sizeof(char)*(strlen(*cliplist_str)+thiscliplist_len+1)); strcat(*cliplist_str,thiscliplist_str); } free(cliplistmem_str); free(thiscliplist_str); } void find_clipped_variables_all(gl_t *gl){ int rank,numproc,proc,cnt; int thisclipnamenum,thisclipname_len; char *thisclipname; long thisclipnum; reset_clipped_variables_all(gl); thisclipname=(char *)malloc(sizeof(char)); MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &numproc); for (proc=0; proc<numproc; proc++){ if (proc==rank) { thisclipnamenum=gl->model.clipnamenum; } MPI_Bcast(&thisclipnamenum,1,MPI_INT,proc,MPI_COMM_WORLD); for (cnt=0; cnt<thisclipnamenum; cnt++){ if (proc==rank) { thisclipname_len=strlen(gl->model.clipname[cnt]); } MPI_Bcast(&thisclipname_len,1,MPI_INT,proc,MPI_COMM_WORLD); thisclipname=(char *)realloc(thisclipname,sizeof(char)*(thisclipname_len+1)); if (proc==rank) { strcpy(thisclipname,gl->model.clipname[cnt]); thisclipnum=gl->model.clipnum[cnt]; } MPI_Bcast(thisclipname,thisclipname_len+1,MPI_CHAR,proc,MPI_COMM_WORLD); MPI_Bcast(&thisclipnum,1,MPI_LONG,proc,MPI_COMM_WORLD); add_to_clipped_variables_all(gl, thisclipname, thisclipnum); // if (rank==0) printf("\n_%s(%ld)%d_",thisclipname,thisclipnum,proc); } } free(thisclipname); } #endif void update_runtime_codex_xi_from_gl(gl_t *gl, SOAP_codex_t *codex){ char *cliplist_str; #ifdef DISTMPI int rank,proc; long ijk_ximax; struct { double ximax; int rank; } ximaxrank,ximaxrank_max; #ifdef EMFIELD long ijk_ximax_emfield; struct { double ximax; int rank; } ximaxrank_emfield,ximaxrank_max_emfield; #endif #endif//DISTMPI #ifdef DISTMPI MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &proc); if (rank!=0) codex->SCREENOUTPUT=FALSE; #endif cliplist_str=(char *)malloc(sizeof(char)); #ifdef DISTMPI find_clipped_variables_all(gl); find_clipped_variables_list_all(gl,&cliplist_str); add_string_to_codex(codex,"clipinfo",cliplist_str); find_clipped_muscl_variables_list_all(gl,&cliplist_str); add_string_to_codex(codex,"clipinfo_muscl",cliplist_str); find_clipped_bdry_variables_list_all(gl,&cliplist_str); add_string_to_codex(codex,"clipinfo_bdry",cliplist_str); #else find_clipped_variables_list(gl,&cliplist_str); add_string_to_codex(codex,"clipinfo",cliplist_str); find_clipped_muscl_variables_list(gl,&cliplist_str); add_string_to_codex(codex,"clipinfo_muscl",cliplist_str); find_clipped_bdry_variables_list(gl,&cliplist_str); add_string_to_codex(codex,"clipinfo_bdry",cliplist_str); //MPI_Allreduce_Sum_Cliplist(&cliplist_str); #endif free(cliplist_str); #ifdef DISTMPI ximaxrank.ximax=gl->ximax; ximaxrank.rank=rank; MPI_Allreduce(&ximaxrank, &ximaxrank_max, 1, MPI_DOUBLE_INT, MPI_MAXLOC, MPI_COMM_WORLD); add_double_to_codex(codex,"ximax",ximaxrank_max.ximax); ijk_ximax=gl->i_ximax; MPI_Bcast(&ijk_ximax,1,MPI_LONG,ximaxrank_max.rank,MPI_COMM_WORLD); add_int_to_codex(codex,"i_ximax",ijk_ximax); #ifdef EMFIELD ximaxrank_emfield.ximax=gl->ximax_emfield; ximaxrank_emfield.rank=rank; MPI_Allreduce(&ximaxrank_emfield, &ximaxrank_max_emfield, 1, MPI_DOUBLE_INT, MPI_MAXLOC, MPI_COMM_WORLD); add_double_to_codex(codex,"ximax_emfield",ximaxrank_max_emfield.ximax); ijk_ximax_emfield=gl->i_ximax_emfield; MPI_Bcast(&ijk_ximax_emfield,1,MPI_LONG,ximaxrank_max_emfield.rank,MPI_COMM_WORLD); add_int_to_codex(codex,"i_ximax_emfield",ijk_ximax_emfield); #endif #ifdef _2DL ijk_ximax=gl->j_ximax; MPI_Bcast(&ijk_ximax,1,MPI_LONG,ximaxrank_max.rank,MPI_COMM_WORLD); add_int_to_codex(codex,"j_ximax",ijk_ximax); #ifdef EMFIELD ijk_ximax_emfield=gl->j_ximax_emfield; MPI_Bcast(&ijk_ximax_emfield,1,MPI_LONG,ximaxrank_max_emfield.rank,MPI_COMM_WORLD); add_int_to_codex(codex,"j_ximax_emfield",ijk_ximax_emfield); #endif #endif//_2DL #ifdef _3DL ijk_ximax=gl->k_ximax; MPI_Bcast(&ijk_ximax,1,MPI_LONG,ximaxrank_max.rank,MPI_COMM_WORLD); add_int_to_codex(codex,"k_ximax",ijk_ximax); #ifdef EMFIELD ijk_ximax_emfield=gl->k_ximax_emfield; MPI_Bcast(&ijk_ximax_emfield,1,MPI_LONG,ximaxrank_max_emfield.rank,MPI_COMM_WORLD); add_int_to_codex(codex,"k_ximax_emfield",ijk_ximax_emfield); #endif #endif//_3DL #else//DISTMPI add_double_to_codex(codex,"ximax",gl->ximax); add_int_to_codex(codex,"i_ximax",gl->i_ximax); #ifdef EMFIELD add_double_to_codex(codex,"ximax_emfield",gl->ximax_emfield); add_int_to_codex(codex,"i_ximax_emfield",gl->i_ximax_emfield); #endif #ifdef _2DL add_int_to_codex(codex,"j_ximax",gl->j_ximax); #ifdef EMFIELD add_int_to_codex(codex,"j_ximax_emfield",gl->j_ximax_emfield); #endif #endif//_2DL #ifdef _3DL add_int_to_codex(codex,"k_ximax",gl->k_ximax); #ifdef EMFIELD add_int_to_codex(codex,"k_ximax_emfield",gl->k_ximax_emfield); #endif #endif//_3DL #endif//DISTMPI } void update_runtime_codex_vars_except_xi_from_gl(gl_t *gl, SOAP_codex_t *codex){ #ifdef DISTMPI double effiter_U_sum,effiter_R_sum; int rank,proc; #ifdef EMFIELD double effiter_U_sum_emfield,effiter_R_sum_emfield; #endif #endif//DISTMPI #ifdef DISTMPI MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &proc); if (rank!=0) codex->SCREENOUTPUT=FALSE; #endif add_int_to_codex(codex,"iter", gl->iter); add_double_to_codex(codex,"xiverge",gl->cycle.fluid.xiverge); add_string_to_codex(codex,"outputfilename", gl->output_filename); #ifdef EMFIELD add_double_to_codex(codex,"xiverge_emfield",gl->cycle.emfield.xiverge); #endif #if defined(UNSTEADY) add_double_to_codex(codex,"time",gl->time); #endif add_double_to_codex(codex,"CFL",gl->CFL); #ifdef UNSTEADY add_double_to_codex(codex,"dt",gl->dt); #endif #ifdef _CYCLE_MULTIZONE_MARCHING add_double_to_codex(codex,"window.is",gl->window.is); add_double_to_codex(codex,"window.ie",gl->window.ie); add_int_to_codex(&(gl->cycle.codex), "numzones_updated", 0); add_int_to_codex(&(gl->cycle.codex), "numzones_total", 0); #endif #ifdef _CYCLE_MULTIZONE add_int_to_codex(&(gl->cycle.codex), "numzones_updated", 0); add_int_to_codex(&(gl->cycle.codex), "numzones_total", 0); #endif #ifdef DISTMPI MPI_Allreduce(&gl->effiter_U, &effiter_U_sum, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); add_double_to_codex(codex,"effiter_U",effiter_U_sum); MPI_Allreduce(&gl->effiter_R, &effiter_R_sum, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); add_double_to_codex(codex,"effiter_R",effiter_R_sum); #ifdef EMFIELD MPI_Allreduce(&gl->effiter_U_emfield, &effiter_U_sum_emfield, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); add_double_to_codex(codex,"effiter_U_emfield",effiter_U_sum_emfield); MPI_Allreduce(&gl->effiter_R_emfield, &effiter_R_sum_emfield, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); add_double_to_codex(codex,"effiter_R_emfield",effiter_R_sum_emfield); #endif #else//DISTMPI add_double_to_codex(codex,"effiter_U",gl->effiter_U); add_double_to_codex(codex,"effiter_R",gl->effiter_R); #ifdef EMFIELD add_double_to_codex(codex,"Lc",gl->Lc); // add_double_to_codex(codex,"relaxEMF",gl->relaxEMF); add_double_to_codex(codex,"effiter_U_emfield",gl->effiter_U_emfield); add_double_to_codex(codex,"effiter_R_emfield",gl->effiter_R_emfield); #endif #endif//DISTMPI } void add_constants_to_codex(gl_t *gl, SOAP_codex_t *codex){ char str[100]; sprintf(str, "%d", TSEMF_DEFAULT); SOAP_add_to_vars(codex,"TSEMF_DEFAULT",str); sprintf(str, "%d", TSEMF_ADI); SOAP_add_to_vars(codex,"TSEMF_ADI",str); sprintf(str, "%d", TSEMF_DDADI); SOAP_add_to_vars(codex,"TSEMF_DDADI",str); sprintf(str, "%d", TSEMF_IMAF); SOAP_add_to_vars(codex,"TSEMF_IMAF",str); sprintf(str, "%d", TSEMF_ADIIMAF); SOAP_add_to_vars(codex,"TSEMF_ADIIMAF",str); sprintf(str, "%d", TSEMF_NEWTON); SOAP_add_to_vars(codex,"TSEMF_NEWTON",str); sprintf(str, "%d", TSEMF_ADIi); SOAP_add_to_vars(codex,"TSEMF_ADIi",str); sprintf(str, "%d", TSEMF_ADIk); SOAP_add_to_vars(codex,"TSEMF_ADIk",str); sprintf(str, "%d", TSEMF_IMAFk); SOAP_add_to_vars(codex,"TSEMF_IMAFk",str); sprintf(str, "%d", TSEMF_IMAFi); SOAP_add_to_vars(codex,"TSEMF_IMAFi",str); sprintf(str, "%d", TSEMF_SOR); SOAP_add_to_vars(codex,"TSEMF_SOR",str); sprintf(str, "%d", TSEMF_SOR2); SOAP_add_to_vars(codex,"TSEMF_SOR2",str); sprintf(str, "%d", PRECON_CONSTANTTIMESTEP); SOAP_add_to_vars(codex,"PRECON_CONSTANTTIMESTEP",str); sprintf(str, "%d", PRECON_LOCALTIMESTEP); SOAP_add_to_vars(codex,"PRECON_LOCALTIMESTEP",str); sprintf(str, "%d", PRECON_LOCALTIMESTEP2); SOAP_add_to_vars(codex,"PRECON_LOCALTIMESTEP2",str); sprintf(str, "%d", PRECON_LOCALEIGENVALUE); SOAP_add_to_vars(codex,"PRECON_LOCALEIGENVALUE",str); sprintf(str, "%d", PRECON_LOCALEIGENVALUE2); SOAP_add_to_vars(codex,"PRECON_LOCALEIGENVALUE2",str); } void process_code_runtime(np_t *np, gl_t *gl, char *code_runtime, SOAP_codex_t *codex){ char *code; SOAP_vars_t *varsmem; readcontrolarg_t Runtimearg; varsmem=(SOAP_vars_t *)malloc(sizeof(SOAP_vars_t)); SOAP_copy_all_vars(codex->vars, &varsmem); Runtimearg.np=&np; Runtimearg.gl=gl; Runtimearg.input=(input_t *)malloc(sizeof(input_t)); Runtimearg.input->READDATAFILE=FALSE; Runtimearg.TYPELEVEL=TYPELEVEL_FLUID; Runtimearg.module_level=0; Runtimearg.POSTMODULE=FALSE; Runtimearg.CYCLEMODULE=FALSE; Runtimearg.RESETITERCOUNT=FALSE; Runtimearg.VERBOSE=FALSE; Runtimearg.gl_post=*gl; Runtimearg.domain_post=gl->domain; Runtimearg.np_post=np; if (!gl->cycle.RUNTIMEMODULEFOUND) fatal_error("The %s() module was not found within Cycle().",_CYCLE_ACTIONNAME); code=(char *)malloc((strlen(code_runtime)+2)*sizeof(char)); strcpy(code,code_runtime); codex->ACTION=TRUE; codex->action=&runtime_actions; codex->action_args=(void *)&Runtimearg; ((readcontrolarg_t *)codex->action_args)->np=&np; ((readcontrolarg_t *)codex->action_args)->gl=gl; /* if (codex->action_being_processed==NULL){ codex->action_being_processed=(char *)malloc((strlen(_CYCLE_ACTIONNAME)+2)*sizeof(char)); strcpy(codex->action_being_processed,_CYCLE_ACTIONNAME); }*/ codex->VERBOSE=FALSE; codex->SCREENOUTPUT=TRUE; add_constants_to_codex(gl, codex); update_runtime_codex_xi_from_gl(gl, codex); update_runtime_codex_vars_except_xi_from_gl(gl,codex); SOAP_process_code(code, codex, SOAP_VARS_KEEP_ALL); gl->CFL=SOAP_var_value(codex,"CFL"); #ifdef UNSTEADY gl->dt=SOAP_var_value(codex,"dt"); #endif gl->ximax=SOAP_var_value(codex,"ximax"); assert(gl->CFL>=0.0e0); /* here, make sure that all changes to vars within runtime module are erased, because those will not be written to datafile -> CFL and ximax and dt are exception to this, and this is why they are probed through SOAP_var_value above */ if (gl->RESETRUNTIMEVARS){ SOAP_free_all_vars(codex->vars); SOAP_copy_all_vars(varsmem,&(codex->vars)); } free(Runtimearg.input); SOAP_free_all_vars(varsmem); free(varsmem); free(code); reset_clipped_variables(gl); } void find_ximax(np_t *np, gl_t *gl, zone_t zone, int IJK_UPDATE){ long i,j,k; double xi; gl->ximax=0.0e0; for_ijk(zone,is,js,ks,ie,je,ke){ if (is_node_inner(np[_ai(gl,i,j,k)],TYPELEVEL_FLUID_WORK)) { assert(is_node_resumed(np[_ai(gl,i,j,k)])); xi=np[_ai(gl,i,j,k)].wk->xi; if (xi<-1.0e99 || isnan(xi)) { fatal_error("problem with xi (xi=%E) at i=%ld, j=%ld, k=%ld.",xi,i,j,k); } if (xi>=gl->ximax) { gl->ximax=xi; if (IJK_UPDATE==IJK_UPDATE_YES) { gl->i_ximax=i; gl->j_ximax=j; gl->k_ximax=k; } } } } } /* static void PrintZones(zone_t *zones, long numzone){ long cnt; for (cnt=0; cnt<numzone; cnt++){ printf("%ld is=%ld js=%ld ie=%ld je=%ld\n",cnt,zones[cnt].is,zones[cnt].js, zones[cnt].ie,zones[cnt].je); } printf("\n"); } */ static void rearrange_overlapping_zones(zone_t *zones, long numzone){ long cnt1,cnt2; /* PrintZone(zones,numzones); */ for (cnt1=0; cnt1<numzone; cnt1++){ for (cnt2=0; cnt2<numzone; cnt2++){ if (cnt2!=cnt1){ /* do overlap along i : make ie of zones[cnt1] smaller and is of zones[cnt2] bigger */ if (if3DL( zones[cnt1].ks==zones[cnt2].ks && ) if2DL( zones[cnt1].js==zones[cnt2].js && ) if3DL( zones[cnt1].ke==zones[cnt2].ke && ) if2DL( zones[cnt1].je==zones[cnt2].je && ) zones[cnt1].ie< zones[cnt2].ie && zones[cnt1].ie>=zones[cnt2].is) { zones[cnt1].ie=(zones[cnt1].ie+zones[cnt2].is)/2; zones[cnt2].is=zones[cnt1].ie+1; if ( zones[cnt1].is>zones[cnt1].ie || zones[cnt2].is>zones[cnt2].ie ) fatal_error("Problem modifying zones along i."); } } } } #ifdef _2DL for (cnt1=0; cnt1<numzone; cnt1++){ for (cnt2=0; cnt2<numzone; cnt2++){ if (cnt2!=cnt1){ /* do overlap along j : make je of zones[cnt1] smaller and js of zones[cnt2] bigger*/ if (if3DL( zones[cnt1].ks==zones[cnt2].ks && ) zones[cnt1].is==zones[cnt2].is && if3DL( zones[cnt1].ke==zones[cnt2].ke && ) zones[cnt1].ie==zones[cnt2].ie && zones[cnt1].je< zones[cnt2].je && zones[cnt1].je>=zones[cnt2].js) { zones[cnt1].je=(zones[cnt1].je+zones[cnt2].js)/2; zones[cnt2].js=zones[cnt1].je+1; if ( zones[cnt1].js>zones[cnt1].je || zones[cnt2].js>zones[cnt2].je ) fatal_error("Problem modifying zones along j."); } } } } #endif #ifdef _3DL for (cnt1=0; cnt1<numzone; cnt1++){ for (cnt2=0; cnt2<numzone; cnt2++){ if (cnt2!=cnt1){ /* do overlap along k : make je of zones[cnt1] smaller and js of zones[cnt2] bigger*/ if (zones[cnt1].is==zones[cnt2].is && zones[cnt1].js==zones[cnt2].js && zones[cnt1].ie==zones[cnt2].ie && zones[cnt1].je==zones[cnt2].je && zones[cnt1].ke< zones[cnt2].ke && zones[cnt1].ke>=zones[cnt2].ks) { zones[cnt1].ke=(zones[cnt1].ke+zones[cnt2].ks)/2; zones[cnt2].ks=zones[cnt1].ke+1; if ( zones[cnt1].ks>zones[cnt1].ke || zones[cnt2].ks>zones[cnt2].ke ) fatal_error("Problem modifying zones along k."); } } } } #endif /* PrintZone(zone,numzone); */ } /* setup multizone situated inside zone */ void setup_multizone(np_t *np, gl_t *gl, zone_t zone, zone_t lim, double xiverge, long zonelength, bool UPDATE_ALL_ZONES, multizone_t *multizone){ long cnt; long numsubzones; zone_t *subzones; double ximax; long i,j,k; /* find the zones for the ts process */ subzones=(zone_t *)malloc(sizeof(zone_t)); find_subzones_in_zone_given_zonelength(zonelength, zone, &numsubzones, &subzones); /* find out which zones need to be updated */ multizone->numzones_ts=0; multizone->ts=(zone_t *)malloc(numsubzones*sizeof(zone_t)); for (cnt=0; cnt<numsubzones; cnt++){ ximax=0.0e0; for_ijk(subzones[cnt],is,js,ks,ie,je,ke){ if (is_node_inner(np[_ai(gl,i,j,k)],TYPELEVEL_FLUID_WORK)) { ximax=max(ximax,np[_ai(gl,i,j,k)].wk->xi); } } if (ximax>xiverge || UPDATE_ALL_ZONES) { multizone->ts[multizone->numzones_ts]=subzones[cnt]; (multizone->numzones_ts)++; } } /* setup res and bdry, limited by lim_is,lim_js, etc*/ multizone->bdry=(zone_t *)malloc(multizone->numzones_ts*sizeof(zone_t)); multizone->res=(zone_t *)malloc(multizone->numzones_ts*sizeof(zone_t)); for (cnt=0; cnt<multizone->numzones_ts; cnt++){ multizone->bdry[cnt].is=max(lim.is,multizone->ts[cnt].is-hbw_bdry_fluid); multizone->bdry[cnt].ie=min(lim.ie,multizone->ts[cnt].ie+hbw_bdry_fluid); #ifdef _2DL multizone->bdry[cnt].js=max(lim.js,multizone->ts[cnt].js-hbw_bdry_fluid); multizone->bdry[cnt].je=min(lim.je,multizone->ts[cnt].je+hbw_bdry_fluid); #endif #ifdef _3DL multizone->bdry[cnt].ks=max(lim.ks,multizone->ts[cnt].ks-hbw_bdry_fluid); multizone->bdry[cnt].ke=min(lim.ke,multizone->ts[cnt].ke+hbw_bdry_fluid); #endif multizone->res[cnt].is=max(lim.is,multizone->ts[cnt].is-hbw_bdry_fluid-hbw_res_fluid); multizone->res[cnt].ie=min(lim.ie,multizone->ts[cnt].ie+hbw_bdry_fluid+hbw_res_fluid); #ifdef _2DL multizone->res[cnt].js=max(lim.js,multizone->ts[cnt].js-hbw_bdry_fluid-hbw_res_fluid); multizone->res[cnt].je=min(lim.je,multizone->ts[cnt].je+hbw_bdry_fluid+hbw_res_fluid); #endif #ifdef _3DL multizone->res[cnt].ks=max(lim.ks,multizone->ts[cnt].ks-hbw_bdry_fluid-hbw_res_fluid); multizone->res[cnt].ke=min(lim.ke,multizone->ts[cnt].ke+hbw_bdry_fluid+hbw_res_fluid); #endif } multizone->numzones_total=numsubzones; multizone->numzones_res=multizone->numzones_ts; multizone->numzones_bdry=multizone->numzones_ts; free(subzones); rearrange_overlapping_zones(multizone->res,multizone->numzones_res); } void *thread_zone(void *threadzone){ np_t * np = ((threadzone_t *) threadzone)->np; gl_t * gl = ((threadzone_t *) threadzone)->gl; zone_t zone = ((threadzone_t *) threadzone)->zone; ((threadzone_t *) threadzone)->funct(np,gl,zone); return(NULL); } void create_thread_zone(np_t *np, gl_t * gl, zone_t zone, void (*funct)(np_t *, gl_t *, zone_t zone), pthread_t *pthread, threadzone_t *threadzone){ threadzone->np=np; threadzone->gl=gl; threadzone->zone=zone; threadzone->funct=funct; #ifdef ZONETHREADS if (pthread_create(pthread, NULL, thread_zone, threadzone)) fatal_error("Cannot create thread."); #else (*thread_zone)(threadzone); #endif } void join_all_threads_zone(long numthread, pthread_t *pthread, bool COUNTFLAG){ #ifdef ZONETHREADS long thread; void *retval; if (!COUNTFLAG) { for (thread=0; thread<numthread; thread++){ if (pthread_join(pthread[thread],&retval)) fatal_error("Cannot join thread %ld.",thread); } } #endif } static void update_U_from_dUstar_1(np_t *np, gl_t *gl, long theta, long ls, long le){ long l; for (l=ls; l!=_l_plus_one(le,gl,theta); l=_l_plus_one(l,gl,theta)){ thread_lock_node_set(np,l,THREADTYPE_ZONE); add_dUstar_to_U(np,l,gl,np[l].wk->dUstar); thread_lock_node_unset(np,l,THREADTYPE_ZONE); /* - if not using SMALLTHREADS, only need to lock for the loop threads, since gl is local for the zone thread - if using SMALLTHREADS, then need to lock for both the loop and zone threads For now, lock for both the loop and zone threads */ thread_lock_global_set(gl,THREADTYPE_ALL); gl->effiter_U+=1.0/(double)(gl->nn); thread_lock_global_unset(gl,THREADTYPE_ALL); } } static void update_U_from_dUstar(np_t *np, gl_t *gl, zone_t zone){ sweep_with_1D_segments(np,gl,zone,&update_U_from_dUstar_1,SWEEPTYPE_I,TYPELEVEL_FLUID_WORK, &is_node_inner,SEGMENTWORK_HEAVY,GRIDLEVEL_ONE); } long _numthread_optimized(long numzone){ long l,cnt,lmax,numthread; numthread=numzone; if (numzone>maxzonethread) { lmax=0; for (cnt=1; cnt<=maxzonethread; cnt++){ l=mod(numzone,cnt); if (l==0) l=cnt; if (l>lmax) { numthread=cnt; lmax=l; } } } return(numthread); } #ifdef DISTMPI void exchange_U(np_t *np, gl_t *gl){ int bl,rankrecv,numproc,ranksend,thisrank,pack_size_Ulocal,pack_size_cnt; long i,j,k,flux,iterator,cnt,prevcnt,total; long primcnt=0; long bufsize=0; long *recvcnt,*sendcnt,*processcnt; long *primnodenums=NULL; long *processnodenums; double *buf,*bufptr; zone_t zonesend,zonerecv,zone; flux_t *recvUlocal; flux_t *sendUlocal=NULL; MPI_Status MPI_Status1; MPI_Comm_rank(MPI_COMM_WORLD, &thisrank); MPI_Comm_size(MPI_COMM_WORLD, &numproc); recvcnt=(long *)malloc(numproc*sizeof(long)); sendcnt=(long *)malloc(numproc*sizeof(long)); processcnt=(long *)malloc(numproc*sizeof(long)); processnodenums=(long *)malloc(numproc*sizeof(long)); for (i=0; i<numproc; i++){ sendcnt[i]=0; processcnt[i]=0; } for (ranksend=0; ranksend<numproc; ranksend++){ zonesend=_domain_from_rank(ranksend,gl); for (rankrecv=0; rankrecv<numproc; rankrecv++){ if (rankrecv!=ranksend && (ranksend==thisrank || rankrecv==thisrank)){ zonerecv=_domain_lim_from_rank(rankrecv,gl); if (is_zone_intersecting_zone(zonesend,zonerecv)){ zone=_zone_intersection(zonesend,zonerecv); for_ijk(zone,is,js,ks,ie,je,ke){ if (ranksend==thisrank) { for (total=0,iterator=0; iterator<numproc; iterator++) total+=sendcnt[iterator]; for (prevcnt=0,iterator=0; iterator<=rankrecv; iterator++) prevcnt+=sendcnt[iterator]; sendUlocal=(flux_t *)realloc(sendUlocal,(total+1)*sizeof(flux_t)); for (iterator=prevcnt+1;iterator<total+1;iterator++){ for (flux=0; flux<nf; flux++) *(*(sendUlocal + iterator) + flux)=*(*(sendUlocal + iterator-1) + flux); } for (flux=0; flux<nf; flux++) *(*(sendUlocal + prevcnt) + flux)=np[_ai(gl,i,j,k)].bs->U[flux]; sendcnt[rankrecv]++; } if (rankrecv==thisrank){ for (prevcnt=0,iterator=0; iterator<=ranksend; iterator++) prevcnt+=processcnt[iterator]; processnodenums=(long *)realloc(processnodenums,(prevcnt+1)*sizeof(long)); processnodenums[prevcnt]=_ai(gl,i,j,k); processcnt[ranksend]++; if (is_node_resumed(np[_ai(gl,i,j,k)])){ primnodenums=(long *)realloc(primnodenums,(primcnt+1)*sizeof(long)); primnodenums[primcnt]=_ai(gl,i,j,k); primcnt++; } } } } } } } if(numproc != 1){ for (rankrecv=0; rankrecv<numproc; rankrecv++){ if (thisrank!=rankrecv){ MPI_Pack_size(nf*sendcnt[rankrecv],MPI_DOUBLE,MPI_COMM_WORLD,&pack_size_Ulocal); MPI_Pack_size(1,MPI_LONG,MPI_COMM_WORLD,&pack_size_cnt); bufsize+=(2*MPI_BSEND_OVERHEAD)+pack_size_Ulocal+pack_size_cnt; } } buf=(double *)malloc(bufsize); MPI_Buffer_attach(buf, bufsize); for (rankrecv=0; rankrecv<numproc; rankrecv++){ if (thisrank!=rankrecv){ for (prevcnt=0,iterator=0; iterator<rankrecv; iterator++) prevcnt+=sendcnt[iterator]; MPI_Bsend(&sendcnt[rankrecv],1,MPI_LONG,rankrecv,1,MPI_COMM_WORLD); MPI_Bsend(&sendUlocal[prevcnt],nf*sendcnt[rankrecv],MPI_DOUBLE,rankrecv,0,MPI_COMM_WORLD); } } free(sendUlocal); for (ranksend=0; ranksend<numproc; ranksend++){ if (thisrank!=ranksend){ MPI_Recv(&recvcnt[ranksend],1,MPI_LONG,ranksend,1,MPI_COMM_WORLD,&MPI_Status1); recvUlocal=(flux_t *)malloc(recvcnt[ranksend]*sizeof(flux_t)); MPI_Recv(recvUlocal,recvcnt[ranksend]*nf,MPI_DOUBLE,ranksend,0,MPI_COMM_WORLD,&MPI_Status1); for (cnt=0; cnt<recvcnt[ranksend]; cnt++){ for (prevcnt=0,iterator=0; iterator<ranksend; iterator++) prevcnt+=processcnt[iterator]; for (flux=0; flux<nf; flux++) np[processnodenums[prevcnt+cnt]].bs->U[flux]=*(*(recvUlocal + cnt) + flux); } free(recvUlocal); } } #ifdef OPENMPTHREADS #pragma omp parallel for private(cnt) schedule(dynamic) #endif for (cnt=0; cnt<primcnt; cnt++) find_prim_fluid(np,primnodenums[cnt],gl); MPI_Buffer_detach(&bufptr,&bl); free(buf); } free(processnodenums); free(primnodenums); free(processcnt); free(recvcnt); free(sendcnt); MPI_Barrier(MPI_COMM_WORLD); } void exchange_U_old(np_t *np, gl_t *gl){ //same as above but without the MPI_Buffer int rankrecv,numproc,ranksend,thisrank; long i,j,k,flux; long cnt = 0; long *nodenums = NULL; zone_t zonesend,zonerecv,zone; flux_t Ulocal; MPI_Status MPI_Status1; MPI_Comm_rank(MPI_COMM_WORLD, &thisrank); MPI_Comm_size(MPI_COMM_WORLD, &numproc); for (ranksend=0; ranksend<numproc; ranksend++){ zonesend=_domain_from_rank(ranksend,gl); for (rankrecv=0; rankrecv<numproc; rankrecv++){ if (rankrecv!=ranksend && (ranksend==thisrank || rankrecv==thisrank)){ zonerecv=_domain_lim_from_rank(rankrecv,gl); if (is_zone_intersecting_zone(zonesend,zonerecv)){ zone=_zone_intersection(zonesend,zonerecv); for_ijk(zone,is,js,ks,ie,je,ke){ if (ranksend==thisrank) { for (flux=0; flux<nf; flux++) Ulocal[flux]=np[_ai(gl,i,j,k)].bs->U[flux]; MPI_Send(Ulocal,nf,MPI_DOUBLE,rankrecv,0,MPI_COMM_WORLD); } if (rankrecv==thisrank) { MPI_Recv(Ulocal,nf,MPI_DOUBLE,ranksend,0,MPI_COMM_WORLD,&MPI_Status1); for (flux=0; flux<nf; flux++) np[_ai(gl,i,j,k)].bs->U[flux]=Ulocal[flux]; if (is_node_resumed(np[_ai(gl,i,j,k)])){ nodenums=(long *)realloc(nodenums,(cnt+1)*sizeof(long)); nodenums[cnt]=_ai(gl,i,j,k); cnt++; } } } } } } } #ifdef OPENMPTHREADS #pragma omp parallel for private(i) schedule(dynamic) #endif for (i=0; i<cnt; i++) find_prim_fluid(np,nodenums[i],gl); free(nodenums); MPI_Barrier(MPI_COMM_WORLD); } #endif void update_U_with_multizone(np_t *np, gl_t *gl, multizone_t multizone){ long cnt,numzonethread,cntthread; pthread_t *pthread; threadzone_t *threadzone; /* Find dUstar for inner nodes*/ numzonethread=_numthread_optimized(multizone.numzones_ts); pthread=(pthread_t *)malloc(numzonethread*sizeof(pthread_t)); threadzone=(threadzone_t *)malloc(numzonethread*sizeof(threadzone_t)); cntthread=0; for (cnt=0; cnt<multizone.numzones_ts; cnt++) { create_thread_zone(np, gl, multizone.ts[cnt], &find_dU, &(pthread[cntthread]), &(threadzone[cntthread])); cntthread++; if (cntthread==numzonethread) { join_all_threads_zone(cntthread, pthread, FALSE); cntthread=0; } } if (cntthread>0) join_all_threads_zone(cntthread, pthread, FALSE); for (cnt=0; cnt<multizone.numzones_ts; cnt++) update_U_from_dUstar(np, gl, multizone.ts[cnt]); free(pthread); free(threadzone); } void update_bdry_nodes_with_multizone(np_t *np, gl_t *gl, multizone_t multizone){ long cnt; for (cnt=0; cnt<multizone.numzones_bdry; cnt++) update_bdry_nodes(np, gl, multizone.bdry[cnt]); } void find_residual_with_multizone(np_t *np, gl_t *gl, multizone_t multizone){ long cnt,numzonethread,cntthread; pthread_t *pthread; threadzone_t *threadzone; numzonethread=_numthread_optimized(multizone.numzones_res); pthread=(pthread_t *)malloc(numzonethread*sizeof(pthread_t)); threadzone=(threadzone_t *)malloc(numzonethread*sizeof(threadzone_t)); cntthread=0; for (cnt=0; cnt<multizone.numzones_res; cnt++) { create_thread_zone(np, gl, multizone.res[cnt], &find_residual, &(pthread[cntthread]), &(threadzone[cntthread])); cntthread++; if (cntthread==numzonethread) { join_all_threads_zone(cntthread, pthread, FALSE); cntthread=0; } } if (cntthread>0) { join_all_threads_zone(cntthread, pthread, FALSE); } free(pthread); free(threadzone); } void solve_multizone(np_t *np, gl_t *gl, multizone_t multizone){ update_U_with_multizone(np,gl,multizone); update_bdry_nodes_with_multizone(np,gl,multizone); find_residual_with_multizone(np,gl,multizone); } void free_multizone(multizone_t *multizone){ free(multizone->res); free(multizone->bdry); free(multizone->ts); } void check_residual(np_t *np, gl_t *gl, zone_t zone){ resume_nodes_in_zone(np, gl, zone); #ifdef EMFIELD update_prim_emfield_mem_in_zone(np, gl, zone); #endif find_residual(np, gl, zone); find_ximax(np,gl,zone,IJK_UPDATE_YES); #ifdef EMFIELD find_residual_emfield(np,gl,zone); find_ximax_emfield(np, gl, zone); #endif #ifdef DISTMPI int rank,proc; struct { double ximax; int rank; } ximaxrank,ximaxrank_max; #ifdef EMFIELD struct { double ximax; int rank; } ximaxrank_emfield,ximaxrank_max_emfield; #endif MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &proc); ximaxrank.ximax=gl->ximax; ximaxrank.rank=rank; MPI_Allreduce(&ximaxrank, &ximaxrank_max, 1, MPI_DOUBLE_INT, MPI_MAXLOC, MPI_COMM_WORLD); gl->ximax=ximaxrank_max.ximax; MPI_Bcast(&(gl->i_ximax),1,MPI_LONG,ximaxrank_max.rank,MPI_COMM_WORLD); #ifdef EMFIELD ximaxrank_emfield.ximax=gl->ximax_emfield; ximaxrank_emfield.rank=rank; MPI_Allreduce(&ximaxrank_emfield, &ximaxrank_max_emfield, 1, MPI_DOUBLE_INT, MPI_MAXLOC, MPI_COMM_WORLD); gl->ximax_emfield=ximaxrank_max_emfield.ximax; MPI_Bcast(&(gl->i_ximax_emfield),1,MPI_LONG,ximaxrank_max_emfield.rank,MPI_COMM_WORLD); #endif #ifdef _2DL MPI_Bcast(&(gl->j_ximax),1,MPI_LONG,ximaxrank_max.rank,MPI_COMM_WORLD); #ifdef EMFIELD MPI_Bcast(&(gl->j_ximax_emfield),1,MPI_LONG,ximaxrank_max_emfield.rank,MPI_COMM_WORLD); #endif #endif //_2DL #ifdef _3DL MPI_Bcast(&(gl->k_ximax),1,MPI_LONG,ximaxrank_max.rank,MPI_COMM_WORLD); #ifdef EMFIELD MPI_Bcast(&(gl->k_ximax_emfield),1,MPI_LONG,ximaxrank_max_emfield.rank,MPI_COMM_WORLD); #endif #endif //_3DL #endif //DISTMPI } double _xi(np_t np, gl_t *gl, flux_t Res){ long flux; double xi,xitmp; assert_np(np,is_node_resumed(np)); xi=0.0; for (flux=0; flux<nf; flux++) { xitmp=fabs(Res[flux]/_Omega(np,gl)/gl->cycle.fluid.Uref[flux]); xi=max(xi,xitmp); if (isnan(xitmp)){ fatal_error("problem computing xitmp in function _xi() in cycle_share.c;\n xitmp=%E\n Res[%ld]=%E\n Omega=%E\n Uref[%ld]=%E\n",xitmp,flux,Res[flux],_Omega(np,gl),flux,gl->cycle.fluid.Uref[flux]); } } return(xi); } static void find_Delta_Lambda_for_dtau_local(np_t *np, gl_t *gl, long l, long dim, flux_t Delta_Lambda){ long offset,maxoffset,flux,dim2; flux_t Delta_Lambda_tmp; find_Delta_Lambda_for_dtau(np, gl, l, dim, Delta_Lambda); if (gl->PRECONDITIONER==PRECON_LOCALTIMESTEP2){ maxoffset=1; for (dim2=dim; dim2<=dim; dim2++){ for (offset=1; offset<=maxoffset; offset++){ if (is_node_inner(np[_al(gl,l,dim2,-offset)],TYPELEVEL_FLUID_WORK)){ find_Delta_Lambda_for_dtau(np, gl, _al(gl,l,dim2,-offset), dim, Delta_Lambda_tmp); for (flux=0; flux<nf; flux++) Delta_Lambda[flux]=max(Delta_Lambda[flux],Delta_Lambda_tmp[flux]); } if (is_node_inner(np[_al(gl,l,dim2,+offset)],TYPELEVEL_FLUID_WORK)){ find_Delta_Lambda_for_dtau(np, gl, _al(gl,l,dim2,+offset), dim, Delta_Lambda_tmp); for (flux=0; flux<nf; flux++) Delta_Lambda[flux]=max(Delta_Lambda[flux],Delta_Lambda_tmp[flux]); } } } } } void find_dtau(np_t *np, gl_t *gl, long l, flux_t dtau){ double dtaumin,dtaumax; long dim,flux; double dtaulocal[nf][nd]; #ifdef UNSTEADY sqmat_t LambdaZ; #endif flux_t Delta_Lambda; assert_np(np[l],is_node_inner(np[l],TYPELEVEL_FLUID_WORK)); if (gl->PRECONDITIONER!=PRECON_CONSTANTTIMESTEP){ #ifdef UNSTEADY find_LambdaZ(np,gl,l,LambdaZ); set_matrix_to_identity(LambdaZ); //turn off effect of LambdaZ -> seems to be detrimental not beneficial for (dim=0; dim<nd; dim++){ find_Delta_Lambda_for_dtau_local(np, gl, l, dim, Delta_Lambda); for (flux=0; flux<nf; flux++){ assert(LambdaZ[flux][flux]>0.0); dtaulocal[flux][dim]=gl->dt/LambdaZ[flux][flux]/notzero(Delta_Lambda[flux]*gl->dt/LambdaZ[flux][flux]+1.0,1e-39); } } #else for (dim=0; dim<nd; dim++){ find_Delta_Lambda_for_dtau_local(np, gl, l, dim, Delta_Lambda); for (flux=0; flux<nf; flux++){ dtaulocal[flux][dim]=1.0/notzero(Delta_Lambda[flux],1e-39); } } #endif /* find optimal dtaus for each flux */ for (flux=0; flux<nf; flux++){ dtaumin=1.0e99; dtaumax=0.0e0; for (dim=0; dim<nd; dim++){ dtaumin=min(dtaulocal[flux][dim],dtaumin); dtaumax=max(dtaulocal[flux][dim],dtaumax); } dtaumax=min(dtaumin*MAXRATIO_DTAUMAX_DTAUMIN,dtaumax); dtau[flux]=gl->CFL*pow(dtaumin,1.0e0-gl->sigma1)*pow(dtaumax,gl->sigma1); } } else { for (flux=0; flux<nf; flux++){ dtau[flux]=gl->dtau; } } } void find_constant_dtau(np_t *np, gl_t *gl, long l, double *dtau){ long flux; flux_t dtau_vector; double dtaumin,dtaumax; find_dtau(np,gl,l,dtau_vector); /* average min and max dtau */ dtaumin=1.0e99; dtaumax=-1.0e99; for (flux=0; flux<nf; flux++) dtaumin=min(dtaumin,dtau_vector[flux]); for (flux=0; flux<nf; flux++) dtaumax=max(dtaumax,dtau_vector[flux]); dtaumax=min(dtaumin*MAXRATIO_DTAUMAX_DTAUMIN,dtaumax); *dtau=pow(dtaumin,1.0-gl->sigma2)*pow(dtaumax,gl->sigma2); } #ifdef EMFIELD #ifdef DISTMPI void exchange_U_emfield(np_t *np, gl_t *gl){ int rankrecv,numproc,ranksend,thisrank; long i,j,k,flux; zone_t zonesend,zonerecv,zone; fluxemfield_t Ulocal; MPI_Status MPI_Status1; MPI_Comm_rank(MPI_COMM_WORLD, &thisrank); MPI_Comm_size(MPI_COMM_WORLD, &numproc); for (ranksend=0; ranksend<numproc; ranksend++){ zonesend=_domain_from_rank(ranksend,gl); for (rankrecv=0; rankrecv<numproc; rankrecv++){ if (rankrecv!=ranksend && (ranksend==thisrank || rankrecv==thisrank)){ zonerecv=_domain_lim_from_rank(rankrecv,gl); if (is_zone_intersecting_zone(zonesend,zonerecv)){ zone=_zone_intersection(zonesend,zonerecv); for_ijk(zone,is,js,ks,ie,je,ke){ if (ranksend==thisrank) { for (flux=0; flux<nfe; flux++) Ulocal[flux]=np[_ai(gl,i,j,k)].bs->Uemfield[flux]; MPI_Send(Ulocal,nfe,MPI_DOUBLE,rankrecv,0,MPI_COMM_WORLD); } if (rankrecv==thisrank) { MPI_Recv(Ulocal,nfe,MPI_DOUBLE,ranksend,0,MPI_COMM_WORLD,&MPI_Status1); for (flux=0; flux<nfe; flux++) np[_ai(gl,i,j,k)].bs->Uemfield[flux]=Ulocal[flux]; } } } } } } MPI_Barrier(MPI_COMM_WORLD); } void exchange_U_emfield_old(np_t *np, gl_t *gl){ int rank; long i,j,k,flux; fluxemfield_t Ulocal; MPI_Comm_rank(MPI_COMM_WORLD, &rank); for_ijk (gl->domain_all,is,js,ks,ie,je,ke){ if (rank==_node_rank(gl, i, j, k) && is_node_valid(np[_ai(gl,i,j,k)],TYPELEVEL_EMFIELD)) { for (flux=0; flux<nfe; flux++) { Ulocal[flux]=np[_ai(gl,i,j,k)].bs->Uemfield[flux]; } } MPI_Bcast_Node(Ulocal,nfe,MPI_DOUBLE,_node_rank(gl,i,j,k),MPI_COMM_WORLD,i,j,k,gl); if (is_node_in_zone(i,j,k,gl->domain_lim) && is_node_valid(np[_ai(gl,i,j,k)],TYPELEVEL_EMFIELD)) { for (flux=0; flux<nfe; flux++) { np[_ai(gl,i,j,k)].bs->Uemfield[flux]=Ulocal[flux]; } } } MPI_Barrier(MPI_COMM_WORLD); } #endif void update_prim_emfield_mem_in_zone_1(np_t *np, gl_t *gl, long theta, long ls, long le){ long l; //printf("(%ld,%ld) to (%ld,%ld)\n",_i(ls,gl,0),_i(ls,gl,1),_i(le,gl,0),_i(le,gl,1)); for (l=ls; l!=_l_plus_one(le,gl,theta); l=_l_plus_one(l,gl,theta)){ if (is_node_valid(np[l],TYPELEVEL_EMFIELD)){ find_prim_emfield_mem_1(np, gl, l); } } } void update_prim_emfield_mem_in_zone_2(np_t *np, gl_t *gl, long theta, long ls, long le){ long l; for (l=ls; l!=_l_plus_one(le,gl,theta); l=_l_plus_one(l,gl,theta)){ if (is_node_valid(np[l],TYPELEVEL_EMFIELD)){ find_prim_emfield_mem_2(np, gl, l); } } } void update_prim_emfield_mem_in_zone_3(np_t *np, gl_t *gl, long theta, long ls, long le){ long l; for (l=ls; l!=_l_plus_one(le,gl,theta); l=_l_plus_one(l,gl,theta)){ if (is_node_valid(np[l],TYPELEVEL_EMFIELD)){ find_prim_emfield_mem_3(np, gl, l); } } } #ifdef _TSEMF_STORE_COEFFICIENTS void update_prim_emfield_mem_in_zone_4(np_t *np, gl_t *gl, long theta, long ls, long le){ long l,dim,flux; for (l=ls; l!=_l_plus_one(le,gl,theta); l=_l_plus_one(l,gl,theta)){ if (is_node_inner(np[l],TYPELEVEL_EMFIELD)){ for (flux=0; flux<nfe; flux++){ find_dtau_emfield(np,gl,l,flux,&(np[l].bs->dtauemfield[flux])); np[l].bs->coeffp0sum[flux]=0.0; for (dim=0; dim<nd; dim++){ find_linearization_coefficients_inner_node_emfield(np, gl, l, dim, flux, &(np[l].bs->coeffm1[dim][flux]), &(np[l].bs->coeffp0[dim][flux]), &(np[l].bs->coeffp1[dim][flux])); np[l].bs->coeffp0sum[flux]+=np[l].bs->coeffp0[dim][flux]; } } } } } #endif void update_prim_emfield_mem_in_zone(np_t *np, gl_t *gl, zone_t zone){ sweep_with_1D_segments(np,gl,zone,&update_prim_emfield_mem_in_zone_1,SWEEPTYPE_I, TYPELEVEL_EMFIELD,&is_node_valid,SEGMENTWORK_LIGHT,GRIDLEVEL_ONE); sweep_with_1D_segments(np,gl,zone,&update_prim_emfield_mem_in_zone_2,SWEEPTYPE_I, TYPELEVEL_EMFIELD,&is_node_valid,SEGMENTWORK_LIGHT,GRIDLEVEL_ONE); sweep_with_1D_segments(np,gl,zone,&update_prim_emfield_mem_in_zone_3,SWEEPTYPE_I, TYPELEVEL_EMFIELD,&is_node_valid,SEGMENTWORK_LIGHT,GRIDLEVEL_ONE); #ifdef _TSEMF_STORE_COEFFICIENTS sweep_with_1D_segments(np,gl,zone,&update_prim_emfield_mem_in_zone_4,SWEEPTYPE_I, TYPELEVEL_EMFIELD,&is_node_valid,SEGMENTWORK_LIGHT,GRIDLEVEL_ONE); #endif } void add_convection_residual_emfield(long theta, long ls, long le, np_t *np, gl_t *gl){ long l,flux; fluxemfield_t Fm1h; for (l=ls; l!=_l_plus_one(_l_plus_one(le,gl,theta),gl,theta); l=_l_plus_one(l,gl,theta)){ find_Fstar_interface_emfield(np,gl,_al(gl,l,theta,-1),_al(gl,l,theta,+0),theta,Fm1h); for (flux=0; flux<nfe; flux++){ if (l!=_l_plus_one(le,gl,theta)) np[l].bs->Resemfield[flux]-=Fm1h[flux]; if (l!=ls) np[_al(gl,l,theta,-1)].bs->Resemfield[flux]+=Fm1h[flux]; } } } void add_source_residual_emfield(long theta, long ls, long le, np_t *np, gl_t *gl){ long l; long flux; fluxemfield_t S; if (theta==0) { for (l=ls; l!=_l_plus_one(le,gl,theta); l=_l_plus_one(l,gl,theta)){ find_Sstar_emfield(np,gl,l,S); for (flux=0; flux<nfe; flux++) np[l].bs->Resemfield[flux]-=S[flux]; } } } void update_residual_emfield(np_t *np, gl_t *gl, long theta, long ls, long le){ add_convection_residual_emfield(theta,ls,le,np,gl); add_source_residual_emfield(theta,ls,le,np,gl); } void initialize_residual_emfield(np_t *np, gl_t *gl, long theta, long ls, long le){ long l,flux; for (l=ls; l!=_l_plus_one(le,gl,theta); l=_l_plus_one(l,gl,theta)){ for (flux=0; flux<nfe; flux++) np[l].bs->Resemfield[flux]=0.0e0; gl->effiter_R_emfield+=1.0e0/(double)gl->nn; } } void update_bdry_node_emfield(np_t *np, gl_t *gl, long l){ long dim,l_C,l_B,l_A; long dimsgn; bool BDRYDIRECFOUND; #ifdef _2DL long dim1; long dim2; #endif #ifdef _3D long dim3; #endif bool UPDATED; assert(is_node_bdry(np[l],TYPELEVEL_EMFIELD)); UPDATED=FALSE; BDRYDIRECFOUND=find_bdry_direc(np, gl, l, TYPELEVEL_EMFIELD, &dim, &dimsgn); if (is_node_link(np[l],TYPELEVEL_EMFIELD)) { // in case the boundary node is a link, Uemf has already been updated UPDATED=TRUE; } if (!UPDATED && BDRYDIRECFOUND){ l_A=l; l_B=_al(gl,l,dim,dimsgn); l_C=_al(gl,l,dim,dimsgn*2); assert(is_node_inner(np[l_C],TYPELEVEL_EMFIELD)); assert(is_node_inner(np[l_B],TYPELEVEL_EMFIELD)); update_bdry_emfield(np,gl,l_A,l_B,l_C,dim,dimsgn,BDRYDIRECFOUND,TYPELEVEL_EMFIELD); UPDATED=TRUE; } /* now, do the corners */ if (!UPDATED) { #ifdef _2D for (dim1=-1; dim1<=1; dim1++){ for (dim2=-1; dim2<=1; dim2++){ l_C=_all(gl,l,0,dim1*2,1,dim2*2); l_B=_all(gl,l,0,dim1,1,dim2); l_A=l; if ( is_node_inner(np[l_B],TYPELEVEL_EMFIELD) && is_node_inner(np[l_C],TYPELEVEL_EMFIELD) && !UPDATED){ update_bdry_emfield(np,gl,l_A,l_B,l_C,dim,dimsgn,BDRYDIRECFOUND,TYPELEVEL_EMFIELD); UPDATED=TRUE; } } } #endif #ifdef _3D for (dim1=-1; dim1<=1; dim1++){ for (dim2=-1; dim2<=1; dim2++){ for (dim3=-1; dim3<=1; dim3++){ l_C=_al(gl, _al(gl, _al(gl,l,0,dim1*2), 1,dim2*2), 2,dim3*2); l_B=_al(gl, _al(gl, _al(gl,l,0,dim1), 1,dim2), 2,dim3); l_A=l; if ( is_node_inner(np[l_B],TYPELEVEL_EMFIELD) && is_node_inner(np[l_C],TYPELEVEL_EMFIELD) && !UPDATED){ update_bdry_emfield(np,gl,l_A,l_B,l_C,dim,dimsgn,BDRYDIRECFOUND,TYPELEVEL_EMFIELD); UPDATED=TRUE; } } } } #endif } } void update_bdry_nodes_on_segment_emfield(np_t *np, gl_t *gl, long theta, long ls, long le){ long l; for (l=ls; l!=_l_plus_one(le,gl,theta); l=_l_plus_one(l,gl,theta)){ if (is_node_bdry(np[l],TYPELEVEL_EMFIELD)){ thread_lock_node_set(np,l,THREADTYPE_ZONE); update_bdry_node_emfield(np, gl, l); thread_lock_node_unset(np,l,THREADTYPE_ZONE); } } } void update_bdry_nodes_emfield(np_t *np, gl_t *gl, zone_t zone){ sweep_with_1D_segments(np, gl, zone, &update_bdry_nodes_on_segment_emfield, SWEEPTYPE_I, TYPELEVEL_EMFIELD,&is_node_valid,SEGMENTWORK_LIGHT,GRIDLEVEL_ONE); } void find_residual_emfield(np_t *np, gl_t *gl, zone_t zone){ long i,j,k; /* now, let's find the residual and store it in bs->dUstaremfield*/ sweep_with_1D_segments(np,gl,zone,&initialize_residual_emfield, SWEEPTYPE_I, TYPELEVEL_EMFIELD,&is_node_inner,SEGMENTWORK_LIGHT,GRIDLEVEL_ONE); sweep_with_1D_segments(np,gl,zone,&update_residual_emfield, SWEEPTYPE_IJK, TYPELEVEL_EMFIELD,&is_node_inner,SEGMENTWORK_HEAVY,GRIDLEVEL_ONE); /* let's find max residual, and put it in gl*/ for_ijk(zone,is,js,ks,ie,je,ke){ if (is_node_inner(np[_ai(gl,i,j,k)],TYPELEVEL_EMFIELD)) { np[_ai(gl,i,j,k)].bs->_xi_emfield=_xi_emfield(np[_ai(gl,i,j,k)],gl,np[_ai(gl,i,j,k)].bs->Resemfield); } } } void find_ximax_emfield(np_t *np, gl_t *gl, zone_t zone){ long i,j,k; gl->ximax_emfield=0.0e0; for_ijk(zone,is,js,ks,ie,je,ke){ if (is_node_inner(np[_ai(gl,i,j,k)],TYPELEVEL_EMFIELD) && np[_ai(gl,i,j,k)].bs->_xi_emfield>=gl->ximax_emfield) { gl->ximax_emfield=np[_ai(gl,i,j,k)].bs->_xi_emfield; gl->i_ximax_emfield=i; gl->j_ximax_emfield=j; gl->k_ximax_emfield=k; } } } void read_UpdateEMField_arguments(char **argum, SOAP_codex_t *codex, gl_t *gl){ SOAP_substitute_all_argums(argum, codex); gl->Lc=SOAP_get_argum_double(codex,*argum,0); gl->relaxEMF=SOAP_get_argum_double(codex,*argum,1); gl->numsubiter_tsemf=4; /* make the default number of subiterations equal to 4 */ gl->tsemfmethod=TSEMF_DEFAULT; if (gl->Lc<=0.0) fatal_error("The length scale Lc must be positive when calling UpdateEMField()."); if (gl->relaxEMF<=0.0) fatal_error("The relaxation factor relaxEMF must be positive when calling UpdateEMField()."); if (gl->relaxEMF>2.0) fatal_error("The relaxation factor relaxEMF must be less than 2 when calling UpdateEMField()."); if (gl->numsubiter_tsemf<=0.0) fatal_error("The number of subiterations subiter_tsemf must be positive when calling UpdateEMField()."); #ifdef UNSTEADY gl->dt=SOAP_get_argum_double(codex,*argum,2); if (gl->dt<=0.0) fatal_error("The time step dt must be positive when calling UpdateEMField()."); if (SOAP_number_argums(*argum)>3) gl->tsemfmethod=SOAP_get_argum_long(codex,*argum,3); if (SOAP_number_argums(*argum)>4){ if (gl->tsemfmethod==TSEMF_SOR || gl->tsemfmethod==TSEMF_SOR2 || gl->tsemfmethod==TSEMF_ADIIMAF || gl->tsemfmethod==TSEMF_IMAF || gl->tsemfmethod==TSEMF_IMAFk || gl->tsemfmethod==TSEMF_IMAFi) gl->numsubiter_tsemf=SOAP_get_argum_long(codex,*argum,4); else fatal_error("UpdateEMField accepts the number of subiterations as a 5th argument only if TSEMF_SOR, TSEMF_SOR2, TSEMF_ADIIMAF, TSEMF_IMAF, TSMEF_IMAFk, TSMEF_IMAFi is specified."); } #else if (SOAP_number_argums(*argum)>2) gl->tsemfmethod=SOAP_get_argum_long(codex,*argum,2); if (SOAP_number_argums(*argum)>3) { if (gl->tsemfmethod==TSEMF_SOR || gl->tsemfmethod==TSEMF_SOR2 || gl->tsemfmethod==TSEMF_ADIIMAF || gl->tsemfmethod==TSEMF_IMAF || gl->tsemfmethod==TSEMF_IMAFk || gl->tsemfmethod==TSEMF_IMAFi) gl->numsubiter_tsemf=SOAP_get_argum_long(codex,*argum,3); else fatal_error("UpdateEMField accepts the number of subiterations as a 4th argument only if TSEMF_SOR, TSEMF_SOR2, TSEMF_ADIIMAF, TSEMF_IMAF, TSEMF_IMAFk, TSMEF_IMAFi is specified."); } #endif } void solve_TDMA_emfield(np_t *np, gl_t *gl, long theta, long ls, long le, int TYPELEVEL, EXM_tdmaline_t *tdma, long numlines){ #ifdef DISTMPI long line,cnt,i,j,k,i_s,j_s,k_s; double tmp; MPI_Status MPI_Status1; if (gl->EM_MPIBDRY_EXPLICIT){ EXM_solve_TDMA(tdma, numlines); } else { /* if ls node is inner node, need to obtain the tdma[0] from another process that owns ls */ if (is_node_inner(np[ls],TYPELEVEL)){ find_ijk_from_l(gl, ls, &i, &j, &k); assert(_ai_all(gl,i,j,k)<LONG_MAX); if (MPI_Recv(tdma[0].val,4,MPI_DOUBLE,_node_rank(gl,i,j,k),_ai_all(gl,i,j,k),MPI_COMM_WORLD,&MPI_Status1)!=MPI_SUCCESS) fatal_error("MPI_Recv problem in solve_TDMA_emfield"); assert(tdma[0].val[0]==0.0); } for (line=0; line<numlines-1; line++){ assert(tdma[line].val[1]!=0.0); tmp = -(tdma[line+1].val[0] / tdma[line].val[1]); for (cnt = 1; cnt <= 2; cnt++) tdma[line+1].val[cnt - 1] += tdma[line].val[cnt] * tmp; tdma[line+1].val[3] += tdma[line].val[3] * tmp; tdma[line+1].val[0] = 0.0; } /* if le node is inner node, need to send the tdma[numlines-2] to another process that owns le */ if (is_node_inner(np[le],TYPELEVEL)){ find_ijk_from_l(gl, le, &i, &j, &k); find_ijk_from_l(gl, _l_minus_one(le,gl,theta), &i_s, &j_s, &k_s); assert(_ai_all(gl,i,j,k)<LONG_MAX); if (MPI_Send(tdma[numlines-2].val,4,MPI_DOUBLE,_node_rank(gl,i,j,k),_ai_all(gl,i_s,j_s,k_s),MPI_COMM_WORLD)!=MPI_SUCCESS) fatal_error("MPI_Send problem in solve_TDMA_emfield"); } /* if le node is inner node, need to obtain the tdma[numlines-1] from another process that owns le */ if (is_node_inner(np[le],TYPELEVEL)){ find_ijk_from_l(gl, le, &i, &j, &k); assert(_ai_all(gl,i,j,k)<LONG_MAX); if (MPI_Recv(tdma[numlines-1].val,4,MPI_DOUBLE,_node_rank(gl,i,j,k),_ai_all(gl,i,j,k),MPI_COMM_WORLD,&MPI_Status1)!=MPI_SUCCESS) fatal_error("MPI_Recv problem in solve_TDMA_emfield"); assert(tdma[numlines-1].val[2]==0.0); } for (line=numlines-1; line>0; line--){ assert(tdma[line].val[1]!=0.0); tdma[line].val[3] /= tdma[line].val[1]; tdma[line].val[1] = 1.0; tdma[line-1].val[3] -= tdma[line].val[3] * tdma[line-1].val[2]; tdma[line-1].val[2] = 0.0; } assert(tdma[0].val[1]!=0.0); tdma[0].val[3] /= tdma[0].val[1]; tdma[0].val[1] = 1.0; /* if ls node is inner node, need to send the tdma[1] to another process that owns ls */ if (is_node_inner(np[ls],TYPELEVEL)){ find_ijk_from_l(gl, ls, &i, &j, &k); find_ijk_from_l(gl, _l_plus_one(ls,gl,theta), &i_s, &j_s, &k_s); assert(_ai_all(gl,i,j,k)<LONG_MAX); if (MPI_Send(tdma[1].val,4,MPI_DOUBLE,_node_rank(gl,i,j,k),_ai_all(gl,i_s,j_s,k_s),MPI_COMM_WORLD)!=MPI_SUCCESS) fatal_error("MPI_Send problem in solve_TDMA_emfield"); } } #else EXM_solve_TDMA(tdma, numlines); #endif } #endif//EMFIELD
data.h
/*! * Copyright (c) 2015-2021 by Contributors * \file data.h * \brief The input data structure of xgboost. * \author Tianqi Chen */ #ifndef XGBOOST_DATA_H_ #define XGBOOST_DATA_H_ #include <dmlc/base.h> #include <dmlc/data.h> #include <dmlc/serializer.h> #include <xgboost/base.h> #include <xgboost/host_device_vector.h> #include <xgboost/linalg.h> #include <xgboost/span.h> #include <xgboost/string_view.h> #include <algorithm> #include <memory> #include <numeric> #include <string> #include <utility> #include <vector> namespace xgboost { // forward declare dmatrix. class DMatrix; /*! \brief data type accepted by xgboost interface */ enum class DataType : uint8_t { kFloat32 = 1, kDouble = 2, kUInt32 = 3, kUInt64 = 4, kStr = 5 }; enum class FeatureType : uint8_t { kNumerical, kCategorical }; /*! * \brief Meta information about dataset, always sit in memory. */ class MetaInfo { public: /*! \brief number of data fields in MetaInfo */ static constexpr uint64_t kNumField = 12; /*! \brief number of rows in the data */ uint64_t num_row_{0}; // NOLINT /*! \brief number of columns in the data */ uint64_t num_col_{0}; // NOLINT /*! \brief number of nonzero entries in the data */ uint64_t num_nonzero_{0}; // NOLINT /*! \brief label of each instance */ HostDeviceVector<bst_float> labels_; // NOLINT /*! * \brief the index of begin and end of a group * needed when the learning task is ranking. */ std::vector<bst_group_t> group_ptr_; // NOLINT /*! \brief weights of each instance, optional */ HostDeviceVector<bst_float> weights_; // NOLINT /*! * \brief initialized margins, * if specified, xgboost will start from this init margin * can be used to specify initial prediction to boost from. */ linalg::Tensor<float, 2> base_margin_; // NOLINT /*! * \brief lower bound of the label, to be used for survival analysis (censored regression) */ HostDeviceVector<bst_float> labels_lower_bound_; // NOLINT /*! * \brief upper bound of the label, to be used for survival analysis (censored regression) */ HostDeviceVector<bst_float> labels_upper_bound_; // NOLINT /*! * \brief Name of type for each feature provided by users. Eg. "int"/"float"/"i"/"q" */ std::vector<std::string> feature_type_names; /*! * \brief Name for each feature. */ std::vector<std::string> feature_names; /* * \brief Type of each feature. Automatically set when feature_type_names is specifed. */ HostDeviceVector<FeatureType> feature_types; /* * \brief Weight of each feature, used to define the probability of each feature being * selected when using column sampling. */ HostDeviceVector<float> feature_weights; /*! \brief default constructor */ MetaInfo() = default; MetaInfo(MetaInfo&& that) = default; MetaInfo& operator=(MetaInfo&& that) = default; MetaInfo& operator=(MetaInfo const& that) = delete; /*! * \brief Validate all metainfo. */ void Validate(int32_t device) const; MetaInfo Slice(common::Span<int32_t const> ridxs) const; /*! * \brief Get weight of each instances. * \param i Instance index. * \return The weight. */ inline bst_float GetWeight(size_t i) const { return weights_.Size() != 0 ? weights_.HostVector()[i] : 1.0f; } /*! \brief get sorted indexes (argsort) of labels by absolute value (used by cox loss) */ inline const std::vector<size_t>& LabelAbsSort() const { if (label_order_cache_.size() == labels_.Size()) { return label_order_cache_; } label_order_cache_.resize(labels_.Size()); std::iota(label_order_cache_.begin(), label_order_cache_.end(), 0); const auto& l = labels_.HostVector(); XGBOOST_PARALLEL_SORT(label_order_cache_.begin(), label_order_cache_.end(), [&l](size_t i1, size_t i2) {return std::abs(l[i1]) < std::abs(l[i2]);}); return label_order_cache_; } /*! \brief clear all the information */ void Clear(); /*! * \brief Load the Meta info from binary stream. * \param fi The input stream */ void LoadBinary(dmlc::Stream* fi); /*! * \brief Save the Meta info to binary stream * \param fo The output stream. */ void SaveBinary(dmlc::Stream* fo) const; /*! * \brief Set information in the meta info. * \param key The key of the information. * \param dptr The data pointer of the source array. * \param dtype The type of the source data. * \param num Number of elements in the source array. */ void SetInfo(const char* key, const void* dptr, DataType dtype, size_t num); /*! * \brief Set information in the meta info with array interface. * \param key The key of the information. * \param interface_str String representation of json format array interface. */ void SetInfo(StringView key, StringView interface_str); void GetInfo(char const* key, bst_ulong* out_len, DataType dtype, const void** out_dptr) const; void SetFeatureInfo(const char *key, const char **info, const bst_ulong size); void GetFeatureInfo(const char *field, std::vector<std::string>* out_str_vecs) const; /* * \brief Extend with other MetaInfo. * * \param that The other MetaInfo object. * * \param accumulate_rows Whether rows need to be accumulated in this function. If * client code knows number of rows in advance, set this * parameter to false. * \param check_column Whether the extend method should check the consistency of * columns. */ void Extend(MetaInfo const& that, bool accumulate_rows, bool check_column); private: void SetInfoFromHost(StringView key, Json arr); void SetInfoFromCUDA(StringView key, Json arr); /*! \brief argsort of labels */ mutable std::vector<size_t> label_order_cache_; }; /*! \brief Element from a sparse vector */ struct Entry { /*! \brief feature index */ bst_feature_t index; /*! \brief feature value */ bst_float fvalue; /*! \brief default constructor */ Entry() = default; /*! * \brief constructor with index and value * \param index The feature or row index. * \param fvalue The feature value. */ XGBOOST_DEVICE Entry(bst_feature_t index, bst_float fvalue) : index(index), fvalue(fvalue) {} /*! \brief reversely compare feature values */ inline static bool CmpValue(const Entry& a, const Entry& b) { return a.fvalue < b.fvalue; } inline bool operator==(const Entry& other) const { return (this->index == other.index && this->fvalue == other.fvalue); } }; /*! * \brief Parameters for constructing batches. */ struct BatchParam { /*! \brief The GPU device to use. */ int gpu_id {-1}; /*! \brief Maximum number of bins per feature for histograms. */ int max_bin{0}; /*! \brief Hessian, used for sketching with future approx implementation. */ common::Span<float> hess; /*! \brief Whether should DMatrix regenerate the batch. Only used for GHistIndex. */ bool regen {false}; BatchParam() = default; BatchParam(int32_t device, int32_t max_bin) : gpu_id{device}, max_bin{max_bin} {} /** * \brief Get batch with sketch weighted by hessian. The batch will be regenerated if * the span is changed, so caller should keep the span for each iteration. */ BatchParam(int32_t device, int32_t max_bin, common::Span<float> hessian, bool regenerate = false) : gpu_id{device}, max_bin{max_bin}, hess{hessian}, regen{regenerate} {} bool operator!=(const BatchParam& other) const { if (hess.empty() && other.hess.empty()) { return gpu_id != other.gpu_id || max_bin != other.max_bin; } return gpu_id != other.gpu_id || max_bin != other.max_bin || hess.data() != other.hess.data(); } }; struct HostSparsePageView { using Inst = common::Span<Entry const>; common::Span<bst_row_t const> offset; common::Span<Entry const> data; Inst operator[](size_t i) const { auto size = *(offset.data() + i + 1) - *(offset.data() + i); return {data.data() + *(offset.data() + i), static_cast<Inst::index_type>(size)}; } size_t Size() const { return offset.size() == 0 ? 0 : offset.size() - 1; } }; /*! * \brief In-memory storage unit of sparse batch, stored in CSR format. */ class SparsePage { public: // Offset for each row. HostDeviceVector<bst_row_t> offset; /*! \brief the data of the segments */ HostDeviceVector<Entry> data; size_t base_rowid {0}; /*! \brief an instance of sparse vector in the batch */ using Inst = common::Span<Entry const>; HostSparsePageView GetView() const { return {offset.ConstHostSpan(), data.ConstHostSpan()}; } /*! \brief constructor */ SparsePage() { this->Clear(); } /*! \return Number of instances in the page. */ inline size_t Size() const { return offset.Size() == 0 ? 0 : offset.Size() - 1; } /*! \return estimation of memory cost of this page */ inline size_t MemCostBytes() const { return offset.Size() * sizeof(size_t) + data.Size() * sizeof(Entry); } /*! \brief clear the page */ inline void Clear() { base_rowid = 0; auto& offset_vec = offset.HostVector(); offset_vec.clear(); offset_vec.push_back(0); data.HostVector().clear(); } /*! \brief Set the base row id for this page. */ inline void SetBaseRowId(size_t row_id) { base_rowid = row_id; } SparsePage GetTranspose(int num_columns) const; void SortRows() { auto ncol = static_cast<bst_omp_uint>(this->Size()); dmlc::OMPException exc; #pragma omp parallel for schedule(dynamic, 1) for (bst_omp_uint i = 0; i < ncol; ++i) { exc.Run([&]() { if (this->offset.HostVector()[i] < this->offset.HostVector()[i + 1]) { std::sort( this->data.HostVector().begin() + this->offset.HostVector()[i], this->data.HostVector().begin() + this->offset.HostVector()[i + 1], Entry::CmpValue); } }); } exc.Rethrow(); } /** * \brief Pushes external data batch onto this page * * \tparam AdapterBatchT * \param batch * \param missing * \param nthread * * \return The maximum number of columns encountered in this input batch. Useful when pushing many adapter batches to work out the total number of columns. */ template <typename AdapterBatchT> uint64_t Push(const AdapterBatchT& batch, float missing, int nthread); /*! * \brief Push a sparse page * \param batch the row page */ void Push(const SparsePage &batch); /*! * \brief Push a SparsePage stored in CSC format * \param batch The row batch to be pushed */ void PushCSC(const SparsePage& batch); }; class CSCPage: public SparsePage { public: CSCPage() : SparsePage() {} explicit CSCPage(SparsePage page) : SparsePage(std::move(page)) {} }; class SortedCSCPage : public SparsePage { public: SortedCSCPage() : SparsePage() {} explicit SortedCSCPage(SparsePage page) : SparsePage(std::move(page)) {} }; class EllpackPageImpl; /*! * \brief A page stored in ELLPACK format. * * This class uses the PImpl idiom (https://en.cppreference.com/w/cpp/language/pimpl) to avoid * including CUDA-specific implementation details in the header. */ class EllpackPage { public: /*! * \brief Default constructor. * * This is used in the external memory case. An empty ELLPACK page is constructed with its content * set later by the reader. */ EllpackPage(); /*! * \brief Constructor from an existing DMatrix. * * This is used in the in-memory case. The ELLPACK page is constructed from an existing DMatrix * in CSR format. */ explicit EllpackPage(DMatrix* dmat, const BatchParam& param); /*! \brief Destructor. */ ~EllpackPage(); EllpackPage(EllpackPage&& that); /*! \return Number of instances in the page. */ size_t Size() const; /*! \brief Set the base row id for this page. */ void SetBaseRowId(size_t row_id); const EllpackPageImpl* Impl() const { return impl_.get(); } EllpackPageImpl* Impl() { return impl_.get(); } private: std::unique_ptr<EllpackPageImpl> impl_; }; class GHistIndexMatrix; template<typename T> class BatchIteratorImpl { public: using iterator_category = std::forward_iterator_tag; // NOLINT virtual ~BatchIteratorImpl() = default; virtual const T& operator*() const = 0; virtual BatchIteratorImpl& operator++() = 0; virtual bool AtEnd() const = 0; virtual std::shared_ptr<T const> Page() const = 0; }; template<typename T> class BatchIterator { public: using iterator_category = std::forward_iterator_tag; // NOLINT explicit BatchIterator(BatchIteratorImpl<T>* impl) { impl_.reset(impl); } explicit BatchIterator(std::shared_ptr<BatchIteratorImpl<T>> impl) { impl_ = impl; } BatchIterator &operator++() { CHECK(impl_ != nullptr); ++(*impl_); return *this; } const T& operator*() const { CHECK(impl_ != nullptr); return *(*impl_); } bool operator!=(const BatchIterator&) const { CHECK(impl_ != nullptr); return !impl_->AtEnd(); } bool AtEnd() const { CHECK(impl_ != nullptr); return impl_->AtEnd(); } std::shared_ptr<T const> Page() const { return impl_->Page(); } private: std::shared_ptr<BatchIteratorImpl<T>> impl_; }; template<typename T> class BatchSet { public: explicit BatchSet(BatchIterator<T> begin_iter) : begin_iter_(std::move(begin_iter)) {} BatchIterator<T> begin() { return begin_iter_; } // NOLINT BatchIterator<T> end() { return BatchIterator<T>(nullptr); } // NOLINT private: BatchIterator<T> begin_iter_; }; struct XGBAPIThreadLocalEntry; /*! * \brief Internal data structured used by XGBoost during training. */ class DMatrix { public: /*! \brief default constructor */ DMatrix() = default; /*! \brief meta information of the dataset */ virtual MetaInfo& Info() = 0; virtual void SetInfo(const char *key, const void *dptr, DataType dtype, size_t num) { this->Info().SetInfo(key, dptr, dtype, num); } virtual void SetInfo(const char* key, std::string const& interface_str) { this->Info().SetInfo(key, StringView{interface_str}); } /*! \brief meta information of the dataset */ virtual const MetaInfo& Info() const = 0; /*! \brief Get thread local memory for returning data from DMatrix. */ XGBAPIThreadLocalEntry& GetThreadLocal() const; /** * \brief Gets batches. Use range based for loop over BatchSet to access individual batches. */ template<typename T> BatchSet<T> GetBatches(const BatchParam& param = {}); template <typename T> bool PageExists() const; // the following are column meta data, should be able to answer them fast. /*! \return Whether the data columns single column block. */ virtual bool SingleColBlock() const = 0; /*! \brief virtual destructor */ virtual ~DMatrix(); /*! \brief Whether the matrix is dense. */ bool IsDense() const { return Info().num_nonzero_ == Info().num_row_ * Info().num_col_; } /*! * \brief Load DMatrix from URI. * \param uri The URI of input. * \param silent Whether print information during loading. * \param load_row_split Flag to read in part of rows, divided among the workers in distributed mode. * \param file_format The format type of the file, used for dmlc::Parser::Create. * By default "auto" will be able to load in both local binary file. * \param page_size Page size for external memory. * \return The created DMatrix. */ static DMatrix* Load(const std::string& uri, bool silent, bool load_row_split, const std::string& file_format = "auto"); /** * \brief Creates a new DMatrix from an external data adapter. * * \tparam AdapterT Type of the adapter. * \param [in,out] adapter View onto an external data. * \param missing Values to count as missing. * \param nthread Number of threads for construction. * \param cache_prefix (Optional) The cache prefix for external memory. * \param page_size (Optional) Size of the page. * * \return a Created DMatrix. */ template <typename AdapterT> static DMatrix* Create(AdapterT* adapter, float missing, int nthread, const std::string& cache_prefix = ""); /** * \brief Create a new Quantile based DMatrix used for histogram based algorithm. * * \tparam DataIterHandle External iterator type, defined in C API. * \tparam DMatrixHandle DMatrix handle, defined in C API. * \tparam DataIterResetCallback Callback for reset, prototype defined in C API. * \tparam XGDMatrixCallbackNext Callback for next, prototype defined in C API. * * \param iter External data iterator * \param proxy A hanlde to ProxyDMatrix * \param reset Callback for reset * \param next Callback for next * \param missing Value that should be treated as missing. * \param nthread number of threads used for initialization. * \param max_bin Maximum number of bins. * * \return A created quantile based DMatrix. */ template <typename DataIterHandle, typename DMatrixHandle, typename DataIterResetCallback, typename XGDMatrixCallbackNext> static DMatrix *Create(DataIterHandle iter, DMatrixHandle proxy, DataIterResetCallback *reset, XGDMatrixCallbackNext *next, float missing, int nthread, int max_bin); /** * \brief Create an external memory DMatrix with callbacks. * * \tparam DataIterHandle External iterator type, defined in C API. * \tparam DMatrixHandle DMatrix handle, defined in C API. * \tparam DataIterResetCallback Callback for reset, prototype defined in C API. * \tparam XGDMatrixCallbackNext Callback for next, prototype defined in C API. * * \param iter External data iterator * \param proxy A hanlde to ProxyDMatrix * \param reset Callback for reset * \param next Callback for next * \param missing Value that should be treated as missing. * \param nthread number of threads used for initialization. * \param cache Prefix of cache file path. * * \return A created external memory DMatrix. */ template <typename DataIterHandle, typename DMatrixHandle, typename DataIterResetCallback, typename XGDMatrixCallbackNext> static DMatrix *Create(DataIterHandle iter, DMatrixHandle proxy, DataIterResetCallback *reset, XGDMatrixCallbackNext *next, float missing, int32_t nthread, std::string cache); virtual DMatrix *Slice(common::Span<int32_t const> ridxs) = 0; /*! \brief Number of rows per page in external memory. Approximately 100MB per page for * dataset with 100 features. */ static const size_t kPageSize = 32UL << 12UL; protected: virtual BatchSet<SparsePage> GetRowBatches() = 0; virtual BatchSet<CSCPage> GetColumnBatches() = 0; virtual BatchSet<SortedCSCPage> GetSortedColumnBatches() = 0; virtual BatchSet<EllpackPage> GetEllpackBatches(const BatchParam& param) = 0; virtual BatchSet<GHistIndexMatrix> GetGradientIndex(const BatchParam& param) = 0; virtual bool EllpackExists() const = 0; virtual bool SparsePageExists() const = 0; }; template<> inline BatchSet<SparsePage> DMatrix::GetBatches(const BatchParam&) { return GetRowBatches(); } template<> inline bool DMatrix::PageExists<EllpackPage>() const { return this->EllpackExists(); } template<> inline bool DMatrix::PageExists<SparsePage>() const { return this->SparsePageExists(); } template<> inline BatchSet<CSCPage> DMatrix::GetBatches(const BatchParam&) { return GetColumnBatches(); } template<> inline BatchSet<SortedCSCPage> DMatrix::GetBatches(const BatchParam&) { return GetSortedColumnBatches(); } template<> inline BatchSet<EllpackPage> DMatrix::GetBatches(const BatchParam& param) { return GetEllpackBatches(param); } template<> inline BatchSet<GHistIndexMatrix> DMatrix::GetBatches(const BatchParam& param) { return GetGradientIndex(param); } } // namespace xgboost namespace dmlc { DMLC_DECLARE_TRAITS(is_pod, xgboost::Entry, true); namespace serializer { template <> struct Handler<xgboost::Entry> { inline static void Write(Stream* strm, const xgboost::Entry& data) { strm->Write(data.index); strm->Write(data.fvalue); } inline static bool Read(Stream* strm, xgboost::Entry* data) { return strm->Read(&data->index) && strm->Read(&data->fvalue); } }; } // namespace serializer } // namespace dmlc #endif // XGBOOST_DATA_H_
cvAdvDiff_bnd_omp.c
/* ----------------------------------------------------------------- * Programmer(s): Daniel Reynolds and Ting Yan @ SMU * Based on cvAdvDiff_bnd.c and parallelized with OpenMP * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2019, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * Example problem: * * The following is a simple example problem with a banded Jacobian, * solved using CVODE. * The problem is the semi-discrete form of the advection-diffusion * equation in 2-D: * du/dt = d^2 u / dx^2 + .5 du/dx + d^2 u / dy^2 * on the rectangle 0 <= x <= 2, 0 <= y <= 1, and the time * interval 0 <= t <= 1. Homogeneous Dirichlet boundary conditions * are posed, and the initial condition is * u(x,y,t=0) = x(2-x)y(1-y)exp(5xy). * The PDE is discretized on a uniform MX+2 by MY+2 grid with * central differencing, and with boundary values eliminated, * leaving an ODE system of size NEQ = MX*MY. * This program solves the problem with the BDF method, Newton * iteration with the SUNBAND linear solver, and a user-supplied * Jacobian routine. * It uses scalar relative and absolute tolerances. * Output is printed at t = .1, .2, ..., 1. * Run statistics (optional outputs) are printed at the end. * * Optionally, we can set the number of threads from environment * variable or command line. To check the current value for number * of threads from environment: * % echo $OMP_NUM_THREADS * * Execution: * * To use the default value or the number of threads from the * environment value, run without arguments: * % ./cvAdvDiff_bnd_omp * The environment variable can be over-ridden with a command line * argument specifying the number of threads to use, e.g: * % ./cvAdvDiff_bnd_omp 5 * ----------------------------------------------------------------- */ #include <stdio.h> #include <stdlib.h> #include <math.h> /* Header files with a description of contents */ #include <cvode/cvode.h> /* prototypes for CVODE fcts., consts. */ #include <nvector/nvector_openmp.h> /* serial N_Vector types, fcts., macros */ #include <sunmatrix/sunmatrix_band.h> /* access to band SUNMatrix */ #include <sunlinsol/sunlinsol_band.h> /* access to band SUNLinearSolver */ #include <sundials/sundials_types.h> /* definition of type realtype */ #include <sundials/sundials_math.h> /* definition of ABS and EXP */ #ifdef _OPENMP #include <omp.h> #endif /* Problem Constants */ #define XMAX RCONST(2.0) /* domain boundaries */ #define YMAX RCONST(1.0) #define MX 10 /* mesh dimensions */ #define MY 5 #define NEQ MX*MY /* number of equations */ #define ATOL RCONST(1.0e-5) /* scalar absolute tolerance */ #define T0 RCONST(0.0) /* initial time */ #define T1 RCONST(0.1) /* first output time */ #define DTOUT RCONST(0.1) /* output time increment */ #define NOUT 10 /* number of output times */ #define ZERO RCONST(0.0) #define HALF RCONST(0.5) #define ONE RCONST(1.0) #define TWO RCONST(2.0) #define FIVE RCONST(5.0) /* User-defined vector access macro IJth */ /* IJth is defined in order to isolate the translation from the mathematical 2-dimensional structure of the dependent variable vector to the underlying 1-dimensional storage. IJth(vdata,i,j) references the element in the vdata array for u at mesh point (i,j), where 1 <= i <= MX, 1 <= j <= MY. The vdata array is obtained via the macro call vdata = NV_DATA_S(v), where v is an N_Vector. The variables are ordered by the y index j, then by the x index i. */ #define IJth(vdata,i,j) (vdata[(j-1) + (i-1)*MY]) /* Type : UserData (contains grid constants) */ typedef struct { realtype dx, dy, hdcoef, hacoef, vdcoef; int nthreads; } *UserData; /* Private Helper Functions */ static void SetIC(N_Vector u, UserData data); static void PrintHeader(realtype reltol, realtype abstol, realtype umax); static void PrintOutput(realtype t, realtype umax, long int nst); static void PrintFinalStats(void *cvode_mem); /* Private function to check function return values */ static int check_retval(void *returnvalue, const char *funcname, int opt); /* Functions Called by the Solver */ static int f(realtype t, N_Vector u, N_Vector udot, void *user_data); static int Jac(realtype t, N_Vector u, N_Vector fu, SUNMatrix J, void *user_data, N_Vector tmp1, N_Vector tmp2, N_Vector tmp3); /* *------------------------------- * Main Program *------------------------------- */ int main(int argc, char *argv[]) { realtype dx, dy, reltol, abstol, t, tout, umax; N_Vector u; UserData data; SUNMatrix A; SUNLinearSolver LS; void *cvode_mem; int iout, retval; long int nst; int num_threads; u = NULL; data = NULL; A = NULL; LS = NULL; cvode_mem = NULL; /* Set the number of threads to use */ num_threads = 1; /* default value */ #ifdef _OPENMP num_threads = omp_get_max_threads(); /* Overwrite with OMP_NUM_THREADS environment variable */ #endif if (argc > 1) /* overwrite with command line value, if supplied */ num_threads = strtol(argv[1], NULL, 0); /* Create an OpenMP vector */ u = N_VNew_OpenMP(NEQ, num_threads); /* Allocate u vector */ if(check_retval((void*)u, "N_VNew_OpenMP", 0)) return(1); reltol = ZERO; /* Set the tolerances */ abstol = ATOL; data = (UserData) malloc(sizeof *data); /* Allocate data memory */ if(check_retval((void *)data, "malloc", 2)) return(1); dx = data->dx = XMAX/(MX+1); /* Set grid coefficients in data */ dy = data->dy = YMAX/(MY+1); data->hdcoef = ONE/(dx*dx); data->hacoef = HALF/(TWO*dx); data->vdcoef = ONE/(dy*dy); data->nthreads = num_threads; SetIC(u, data); /* Initialize u vector */ /* Call CVodeCreate to create the solver memory and specify the * Backward Differentiation Formula */ cvode_mem = CVodeCreate(CV_BDF); if(check_retval((void *)cvode_mem, "CVodeCreate", 0)) return(1); /* Call CVodeInit to initialize the integrator memory and specify the * user's right hand side function in u'=f(t,u), the inital time T0, and * the initial dependent variable vector u. */ retval = CVodeInit(cvode_mem, f, T0, u); if(check_retval(&retval, "CVodeInit", 1)) return(1); /* Call CVodeSStolerances to specify the scalar relative tolerance * and scalar absolute tolerance */ retval = CVodeSStolerances(cvode_mem, reltol, abstol); if (check_retval(&retval, "CVodeSStolerances", 1)) return(1); /* Set the pointer to user-defined data */ retval = CVodeSetUserData(cvode_mem, data); if(check_retval(&retval, "CVodeSetUserData", 1)) return(1); /* Create banded SUNMatrix for use in linear solves -- since this will be factored, set the storage bandwidth to be the sum of upper and lower bandwidths */ A = SUNBandMatrix(NEQ, MY, MY); if(check_retval((void *)A, "SUNBandMatrix", 0)) return(1); /* Create banded SUNLinearSolver object for use by CVode */ LS = SUNLinSol_Band(u, A); if(check_retval((void *)LS, "SUNLinSol_Band", 0)) return(1); /* Call CVodeSetLinearSolver to attach the matrix and linear solver to CVode */ retval = CVodeSetLinearSolver(cvode_mem, LS, A); if(check_retval(&retval, "CVodeSetLinearSolver", 1)) return(1); /* Set the user-supplied Jacobian routine Jac */ retval = CVodeSetJacFn(cvode_mem, Jac); if(check_retval(&retval, "CVodeSetJacFn", 1)) return(1); /* In loop over output points: call CVode, print results, test for errors */ umax = N_VMaxNorm(u); PrintHeader(reltol, abstol, umax); for(iout=1, tout=T1; iout <= NOUT; iout++, tout += DTOUT) { retval = CVode(cvode_mem, tout, u, &t, CV_NORMAL); if(check_retval(&retval, "CVode", 1)) break; umax = N_VMaxNorm(u); retval = CVodeGetNumSteps(cvode_mem, &nst); check_retval(&retval, "CVodeGetNumSteps", 1); PrintOutput(t, umax, nst); } PrintFinalStats(cvode_mem); /* Print some final statistics */ printf("num_threads = %i\n\n", num_threads); N_VDestroy_OpenMP(u); /* Free the u vector */ CVodeFree(&cvode_mem); /* Free the integrator memory */ SUNLinSolFree(LS); /* Free the linear solver memory */ SUNMatDestroy(A); /* Free the matrix memory */ free(data); /* Free the user data */ return(0); } /* *------------------------------- * Functions called by the solver *------------------------------- */ /* f routine. Compute f(t,u). */ static int f(realtype t, N_Vector u,N_Vector udot, void *user_data) { realtype uij, udn, uup, ult, urt, hordc, horac, verdc, hdiff, hadv, vdiff; realtype *udata, *dudata; int i, j; UserData data; udata = NV_DATA_OMP(u); dudata = NV_DATA_OMP(udot); /* Extract needed constants from data */ data = (UserData) user_data; hordc = data->hdcoef; horac = data->hacoef; verdc = data->vdcoef; /* Loop over all grid points. */ #pragma omp parallel for default(shared) private(j, i, uij, udn, uup, ult, urt, hdiff, hadv, vdiff) num_threads(data->nthreads) for (j=1; j <= MY; j++) { for (i=1; i <= MX; i++) { /* Extract u at x_i, y_j and four neighboring points */ uij = IJth(udata, i, j); udn = (j == 1) ? ZERO : IJth(udata, i, j-1); uup = (j == MY) ? ZERO : IJth(udata, i, j+1); ult = (i == 1) ? ZERO : IJth(udata, i-1, j); urt = (i == MX) ? ZERO : IJth(udata, i+1, j); /* Set diffusion and advection terms and load into udot */ hdiff = hordc*(ult - TWO*uij + urt); hadv = horac*(urt - ult); vdiff = verdc*(uup - TWO*uij + udn); IJth(dudata, i, j) = hdiff + hadv + vdiff; } } return(0); } /* Jacobian routine. Compute J(t,u). */ static int Jac(realtype t, N_Vector u, N_Vector fu, SUNMatrix J, void *user_data, N_Vector tmp1, N_Vector tmp2, N_Vector tmp3) { sunindextype i, j, k; realtype *kthCol, hordc, horac, verdc; UserData data; /* The components of f = udot that depend on u(i,j) are f(i,j), f(i-1,j), f(i+1,j), f(i,j-1), f(i,j+1), with df(i,j)/du(i,j) = -2 (1/dx^2 + 1/dy^2) df(i-1,j)/du(i,j) = 1/dx^2 + .25/dx (if i > 1) df(i+1,j)/du(i,j) = 1/dx^2 - .25/dx (if i < MX) df(i,j-1)/du(i,j) = 1/dy^2 (if j > 1) df(i,j+1)/du(i,j) = 1/dy^2 (if j < MY) */ data = (UserData) user_data; hordc = data->hdcoef; horac = data->hacoef; verdc = data->vdcoef; #pragma omp parallel for collapse(2) default(shared) private(i, j, k, kthCol) num_threads(data->nthreads) for (j=1; j <= MY; j++) { for (i=1; i <= MX; i++) { k = j-1 + (i-1)*MY; kthCol = SUNBandMatrix_Column(J,k); /* set the kth column of J */ SM_COLUMN_ELEMENT_B(kthCol,k,k) = -TWO*(verdc+hordc); if (i != 1) SM_COLUMN_ELEMENT_B(kthCol,k-MY,k) = hordc + horac; if (i != MX) SM_COLUMN_ELEMENT_B(kthCol,k+MY,k) = hordc - horac; if (j != 1) SM_COLUMN_ELEMENT_B(kthCol,k-1,k) = verdc; if (j != MY) SM_COLUMN_ELEMENT_B(kthCol,k+1,k) = verdc; } } return(0); } /* *------------------------------- * Private helper functions *------------------------------- */ /* Set initial conditions in u vector */ static void SetIC(N_Vector u, UserData data) { int i, j; realtype x, y, dx, dy; realtype *udata; /* Extract needed constants from data */ dx = data->dx; dy = data->dy; /* Set pointer to data array in vector u. */ udata = NV_DATA_OMP(u); /* Load initial profile into u vector */ #pragma omp parallel for default(shared) private(j, i, y, x) for (j=1; j <= MY; j++) { y = j*dy; for (i=1; i <= MX; i++) { x = i*dx; IJth(udata,i,j) = x*(XMAX - x)*y*(YMAX - y)*SUNRexp(FIVE*x*y); } } } /* Print first lines of output (problem description) */ static void PrintHeader(realtype reltol, realtype abstol, realtype umax) { printf("\n2-D Advection-Diffusion Equation\n"); printf("Mesh dimensions = %d X %d\n", MX, MY); printf("Total system size = %d\n", NEQ); #if defined(SUNDIALS_EXTENDED_PRECISION) printf("Tolerance parameters: reltol = %Lg abstol = %Lg\n\n", reltol, abstol); printf("At t = %Lg max.norm(u) =%14.6Le \n", T0, umax); #elif defined(SUNDIALS_DOUBLE_PRECISION) printf("Tolerance parameters: reltol = %g abstol = %g\n\n", reltol, abstol); printf("At t = %g max.norm(u) =%14.6e \n", T0, umax); #else printf("Tolerance parameters: reltol = %g abstol = %g\n\n", reltol, abstol); printf("At t = %g max.norm(u) =%14.6e \n", T0, umax); #endif return; } /* Print current value */ static void PrintOutput(realtype t, realtype umax, long int nst) { #if defined(SUNDIALS_EXTENDED_PRECISION) printf("At t = %4.2Lf max.norm(u) =%14.6Le nst = %4ld\n", t, umax, nst); #elif defined(SUNDIALS_DOUBLE_PRECISION) printf("At t = %4.2f max.norm(u) =%14.6e nst = %4ld\n", t, umax, nst); #else printf("At t = %4.2f max.norm(u) =%14.6e nst = %4ld\n", t, umax, nst); #endif return; } /* Get and print some final statistics */ static void PrintFinalStats(void *cvode_mem) { int retval; long int nst, nfe, nsetups, netf, nni, ncfn, nje, nfeLS; retval = CVodeGetNumSteps(cvode_mem, &nst); check_retval(&retval, "CVodeGetNumSteps", 1); retval = CVodeGetNumRhsEvals(cvode_mem, &nfe); check_retval(&retval, "CVodeGetNumRhsEvals", 1); retval = CVodeGetNumLinSolvSetups(cvode_mem, &nsetups); check_retval(&retval, "CVodeGetNumLinSolvSetups", 1); retval = CVodeGetNumErrTestFails(cvode_mem, &netf); check_retval(&retval, "CVodeGetNumErrTestFails", 1); retval = CVodeGetNumNonlinSolvIters(cvode_mem, &nni); check_retval(&retval, "CVodeGetNumNonlinSolvIters", 1); retval = CVodeGetNumNonlinSolvConvFails(cvode_mem, &ncfn); check_retval(&retval, "CVodeGetNumNonlinSolvConvFails", 1); retval = CVodeGetNumJacEvals(cvode_mem, &nje); check_retval(&retval, "CVodeGetNumJacEvals", 1); retval = CVodeGetNumLinRhsEvals(cvode_mem, &nfeLS); check_retval(&retval, "CVodeGetNumLinRhsEvals", 1); printf("\nFinal Statistics:\n"); printf("nst = %-6ld nfe = %-6ld nsetups = %-6ld nfeLS = %-6ld nje = %ld\n", nst, nfe, nsetups, nfeLS, nje); printf("nni = %-6ld ncfn = %-6ld netf = %ld\n", nni, ncfn, netf); return; } /* Check function return value... opt == 0 means SUNDIALS function allocates memory so check if returned NULL pointer opt == 1 means SUNDIALS function returns an integer value so check if retval < 0 opt == 2 means function allocates memory so check if returned NULL pointer */ static int check_retval(void *returnvalue, const char *funcname, int opt) { int *retval; /* Check if SUNDIALS function returned NULL pointer - no memory allocated */ if (opt == 0 && returnvalue == NULL) { fprintf(stderr, "\nSUNDIALS_ERROR: %s() failed - returned NULL pointer\n\n", funcname); return(1); } /* Check if retval < 0 */ else if (opt == 1) { retval = (int *) returnvalue; if (*retval < 0) { fprintf(stderr, "\nSUNDIALS_ERROR: %s() failed with retval = %d\n\n", funcname, *retval); return(1); }} /* Check if function returned NULL pointer - no memory allocated */ else if (opt == 2 && returnvalue == NULL) { fprintf(stderr, "\nMEMORY_ERROR: %s() failed - returned NULL pointer\n\n", funcname); return(1); } return(0); }
mixedulm_linear_solver.h
// KRATOS ______ __ __ _____ __ __ __ // / ____/___ ____ / /_____ ______/ /_/ ___// /________ _______/ /___ ___________ _/ / // / / / __ \/ __ \/ __/ __ `/ ___/ __/\__ \/ __/ ___/ / / / ___/ __/ / / / ___/ __ `/ / // / /___/ /_/ / / / / /_/ /_/ / /__/ /_ ___/ / /_/ / / /_/ / /__/ /_/ /_/ / / / /_/ / / // \____/\____/_/ /_/\__/\__,_/\___/\__//____/\__/_/ \__,_/\___/\__/\__,_/_/ \__,_/_/ MECHANICS // // License: BSD License // license: ContactStructuralMechanicsApplication/license.txt // // Main authors: Vicente Mataix Ferrandiz // #if !defined(KRATOS_MIXEDULM_SOLVER_H_INCLUDED ) #define KRATOS_MIXEDULM_SOLVER_H_INCLUDED // System includes #include <string> #include <iostream> #include <sstream> #include <cstddef> // External includes // Project includes #include "includes/define.h" #include "includes/model_part.h" #include "linear_solvers/reorderer.h" #include "linear_solvers/iterative_solver.h" #include "utilities/openmp_utils.h" #include "contact_structural_mechanics_application_variables.h" #include "utilities/sparse_matrix_multiplication_utility.h" #include "custom_utilities/logging_settings.hpp" namespace Kratos { ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ /** * @class MixedULMLinearSolver * @ingroup ContactStructuralMechanicsApplication * @brief This solver is designed for the solution of mixed U-LM problems (this solver in particular is optimized for dual LM, to avoid the resolution). * @details It uses a block structure diving the matrix in UU LMLM ULM LMU blocks * and uses "standard" linear solvers for the different blocks as well as a GMRES for the outer part * @author Vicente Mataix Ferrandiz */ template<class TSparseSpaceType, class TDenseSpaceType, class TPreconditionerType = Preconditioner<TSparseSpaceType, TDenseSpaceType>, class TReordererType = Reorderer<TSparseSpaceType, TDenseSpaceType> > class MixedULMLinearSolver : public IterativeSolver<TSparseSpaceType, TDenseSpaceType,TPreconditionerType, TReordererType> { public: ///@} ///@name Enums ///@{ /// This enum is used to identify each index whick kind is enum class BlockType { OTHER, MASTER, SLAVE_INACTIVE, SLAVE_ACTIVE, LM_INACTIVE, LM_ACTIVE }; ///@name Type Definitions ///@{ /// The flag that indicates if the blocks are allocated KRATOS_DEFINE_LOCAL_FLAG( BLOCKS_ARE_ALLOCATED ); /// The flag that indicates if the solution is initialized KRATOS_DEFINE_LOCAL_FLAG( IS_INITIALIZED ); /// Pointer definition of MixedULMLinearSolver KRATOS_CLASS_POINTER_DEFINITION (MixedULMLinearSolver); /// The base class corresponds to the an iterative solver typedef IterativeSolver<TSparseSpaceType, TDenseSpaceType, TPreconditionerType, TReordererType> BaseType; /// The base class for the linear solver typedef LinearSolver<TSparseSpaceType, TDenseSpaceType, TReordererType> LinearSolverType; /// The pointer to a linear solver typedef typename LinearSolverType::Pointer LinearSolverPointerType; /// The sparse matrix type typedef typename TSparseSpaceType::MatrixType SparseMatrixType; /// The vector type typedef typename TSparseSpaceType::VectorType VectorType; /// The dense matrix type typedef typename TDenseSpaceType::MatrixType DenseMatrixType; /// The dense vector type typedef typename TDenseSpaceType::VectorType DenseVectorType; /// The node type typedef Node<3> NodeType; /// The definition of the dof type typedef typename ModelPart::DofType DofType; /// The array containing the dofs typedef typename ModelPart::DofsArrayType DofsArrayType; /// An array of conditions typedef ModelPart::ConditionsContainerType ConditionsArrayType; /// An array of nodes typedef ModelPart::NodesContainerType NodesArrayType; /// The size type typedef std::size_t SizeType; /// The index type typedef std::size_t IndexType; /// A vector of indexes typedef DenseVector<IndexType> IndexVectorType; /// A vector of types typedef DenseVector<BlockType> BlockTypeVectorType; static constexpr double ZeroTolerance = std::numeric_limits<double>::epsilon(); ///@} ///@name Life Cycle ///@{ /** * @brief Default constructor * @param pSolverDispBlock The linear solver used for the displacement block * @param MaxTolerance The maximal tolrance considered * @param MaxIterationNumber The maximal number of iterations */ MixedULMLinearSolver ( LinearSolverPointerType pSolverDispBlock, const double MaxTolerance, const std::size_t MaxIterationNumber ) : BaseType (MaxTolerance, MaxIterationNumber), mpSolverDispBlock(pSolverDispBlock) { // Initializing the remaining variables mOptions.Set(BLOCKS_ARE_ALLOCATED, false); mOptions.Set(IS_INITIALIZED, false); } /** * @brief Second constructor, it uses a Kratos parameters as input instead of direct input * @param pSolverDispBlock The linear solver used for the displacement block * @param ThisParameters The configuration parameters considered */ MixedULMLinearSolver( LinearSolverPointerType pSolverDispBlock, Parameters ThisParameters = Parameters(R"({})") ): BaseType (), mpSolverDispBlock(pSolverDispBlock) { KRATOS_TRY // Now validate agains defaults -- this also ensures no type mismatch Parameters default_parameters = GetDefaultParameters(); ThisParameters.ValidateAndAssignDefaults(default_parameters); // Initializing the remaining variables this->SetTolerance( ThisParameters["tolerance"].GetDouble() ); this->SetMaxIterationsNumber( ThisParameters["max_iteration_number"].GetInt() ); mEchoLevel = ThisParameters["echo_level"].GetInt(); mOptions.Set(BLOCKS_ARE_ALLOCATED, false); mOptions.Set(IS_INITIALIZED, false); KRATOS_CATCH("") } /// Copy constructor. MixedULMLinearSolver (const MixedULMLinearSolver& rOther) : BaseType(rOther), mpSolverDispBlock(rOther.mpSolverDispBlock), mOptions(rOther.mOptions), mMasterIndices(rOther.mMasterIndices), mSlaveInactiveIndices(rOther.mSlaveInactiveIndices), mSlaveActiveIndices(rOther.mSlaveActiveIndices), mLMInactiveIndices(rOther.mLMInactiveIndices), mLMActiveIndices(rOther.mLMActiveIndices), mOtherIndices(rOther.mOtherIndices), mGlobalToLocalIndexing(rOther.mGlobalToLocalIndexing), mWhichBlockType(rOther.mWhichBlockType), mKDispModified(rOther.mKDispModified), mKLMAModified(rOther.mKLMAModified), mKLMIModified(rOther.mKLMIModified), mKSAN(rOther.mKSAN), mKSAM(rOther.mKSAM), mKSASI(rOther.mKSASI), mKSASA(rOther.mKSASA), mPOperator(rOther.mPOperator), mCOperator(rOther.mCOperator), mResidualLMActive(rOther.mResidualLMActive), mResidualLMInactive(rOther.mResidualLMInactive), mResidualDisp(rOther.mResidualDisp), mLMActive(rOther.mLMActive), mLMInactive(rOther.mLMInactive), mDisp(rOther.mDisp), mEchoLevel(rOther.mEchoLevel), mFileCreated(rOther.mFileCreated) { } /// Destructor. ~MixedULMLinearSolver() override {} ///@} ///@name Operators ///@{ /// Assignment operator. MixedULMLinearSolver& operator= (const MixedULMLinearSolver& Other) { return *this; } ///@} ///@name Operations ///@{ /** * @brief This function is designed to be called as few times as possible. It creates the data structures * that only depend on the connectivity of the matrix (and not on its coefficients) * @details So that the memory can be allocated once and expensive operations can be done only when strictly * needed * @param rA System matrix * @param rX Solution vector. it's also the initial guess for iterative linear solvers. * @param rB Right hand side vector. */ void Initialize ( SparseMatrixType& rA, VectorType& rX, VectorType& rB ) override { if (mOptions.Is(BLOCKS_ARE_ALLOCATED)) { mpSolverDispBlock->Initialize(mKDispModified, mDisp, mResidualDisp); mOptions.Set(IS_INITIALIZED, true); } else KRATOS_DETAIL("MixedULM Initialize") << "Linear solver intialization is deferred to the moment at which blocks are available" << std::endl; } /** * @brief This function is designed to be called every time the coefficients change in the system * that is, normally at the beginning of each solve. * @details For example if we are implementing a direct solver, this is the place to do the factorization * so that then the backward substitution can be performed effectively more than once * @param rA System matrix * @param rX Solution vector. it's also the initial guess for iterative linear solvers. * @param rB Right hand side vector. */ void InitializeSolutionStep ( SparseMatrixType& rA, VectorType& rX, VectorType& rB ) override { // Copy to local matrices if (mOptions.IsNot(BLOCKS_ARE_ALLOCATED)) { FillBlockMatrices (true, rA, rX, rB); mOptions.Set(BLOCKS_ARE_ALLOCATED, true); } else { FillBlockMatrices (false, rA, rX, rB); mOptions.Set(BLOCKS_ARE_ALLOCATED, true); } if(mOptions.IsNot(IS_INITIALIZED)) this->Initialize(rA,rX,rB); mpSolverDispBlock->InitializeSolutionStep(mKDispModified, mDisp, mResidualDisp); } /** * @brief This function actually performs the solution work, eventually taking advantage of what was done before in the * @details Initialize and InitializeSolutionStep functions. * @param rA System matrix * @param rX Solution vector. it's also the initial guess for iterative linear solvers. * @param rB Right hand side vector. */ void PerformSolutionStep ( SparseMatrixType& rA, VectorType& rX, VectorType& rB ) override { // Auxiliar size const SizeType lm_active_size = mLMActiveIndices.size(); const SizeType lm_inactive_size = mLMInactiveIndices.size(); const SizeType total_disp_size = mOtherIndices.size() + mMasterIndices.size() + mSlaveInactiveIndices.size() + mSlaveActiveIndices.size(); // Get the u and lm residuals GetUPart (rB, mResidualDisp); // Solve u block if (mDisp.size() != total_disp_size) mDisp.resize(total_disp_size, false); mpSolverDispBlock->Solve (mKDispModified, mDisp, mResidualDisp); // Write back solution SetUPart(rX, mDisp); // Solve LM if (lm_active_size > 0) { // Now we compute the residual of the LM GetLMAPart (rB, mResidualLMActive); // LM = D⁻1*rLM if (mLMActive.size() != lm_active_size) mLMActive.resize(lm_active_size, false); TSparseSpaceType::Mult (mKLMAModified, mResidualLMActive, mLMActive); // Write back solution SetLMAPart(rX, mLMActive); } if (lm_inactive_size > 0) { // Now we compute the residual of the LM GetLMIPart (rB, mResidualLMInactive); // LM = D⁻1*rLM if (mLMInactive.size() != lm_inactive_size) mLMInactive.resize(lm_inactive_size, false); TSparseSpaceType::Mult (mKLMIModified, mResidualLMInactive, mLMInactive); // Write back solution SetLMIPart(rX, mLMInactive); } } /** * @brief This function is designed to be called at the end of the solve step. * @details For example this is the place to remove any data that we do not want to save for later * @param rA System matrix * @param rX Solution vector. it's also the initial guess for iterative linear solvers. * @param rB Right hand side vector. */ void FinalizeSolutionStep ( SparseMatrixType& rA, VectorType& rX, VectorType& rB ) override { mpSolverDispBlock->FinalizeSolutionStep(mKDispModified, mDisp, mResidualDisp); } /** * @brief This function is designed to clean up all internal data in the solver. * @details Clear is designed to leave the solver object as if newly created. After a clear a new Initialize is needed */ void Clear() override { mOptions.Set(BLOCKS_ARE_ALLOCATED, false); mpSolverDispBlock->Clear(); // We clear the matrixes and vectors mKDispModified.clear(); /// The modified displacement block mKLMAModified.clear(); /// The modified active LM block (diagonal) mKLMIModified.clear(); /// The modified inaactive LM block (diagonal) mKSAN.clear(); /// The slave active-displacement block mKSAM.clear(); /// The active slave-master block mKSASI.clear(); /// The active slave-inactive slave block mKSASA.clear(); /// The active slave-slave active block mPOperator.clear(); /// The operator used for the master blocks mCOperator.clear(); /// The operator used for the active slave block mResidualLMActive.clear(); /// The residual corresponding the active LM mResidualLMInactive.clear(); /// The residual corresponding the inactive LM mResidualDisp.clear(); /// The residual of the displacements mLMActive.clear(); /// The solution of the active LM mLMInactive.clear(); /// The solution of the inactive LM mDisp.clear(); /// The solution of the displacement mOptions.Set(IS_INITIALIZED, false); } /** * @brief Normal solve method. * @details Solves the linear system Ax=b and puts the result on SystemVector& rX. rVectorx is also th initial guess for iterative methods. * @param rA System matrix * @param rX Solution vector. it's also the initial guess for iterative linear solvers. * @param rB Right hand side vector. */ bool Solve( SparseMatrixType& rA, VectorType& rX, VectorType& rB ) override { // We print the system before condensate (if needed) if (mEchoLevel == 2) { //if it is needed to print the debug info KRATOS_INFO("RHS BEFORE CONDENSATION") << "RHS = " << rB << std::endl; } else if (mEchoLevel == 3) { //if it is needed to print the debug info KRATOS_INFO("LHS BEFORE CONDENSATION") << "SystemMatrix = " << rA << std::endl; KRATOS_INFO("RHS BEFORE CONDENSATION") << "RHS = " << rB << std::endl; } else if (mEchoLevel >= 4) { //print to matrix market file const std::string matrix_market_name = "before_condensation_A_" + std::to_string(mFileCreated) + ".mm"; TSparseSpaceType::WriteMatrixMarketMatrix(matrix_market_name.c_str(), rA, false); const std::string matrix_market_vectname = "before_condensation_b_" + std::to_string(mFileCreated) + ".mm.rhs"; TSparseSpaceType::WriteMatrixMarketVector(matrix_market_vectname.c_str(), rB); } if (mOptions.IsNot(IS_INITIALIZED)) this->Initialize (rA,rX,rB); this->InitializeSolutionStep (rA,rX,rB); this->PerformSolutionStep (rA,rX,rB); this->FinalizeSolutionStep (rA,rX,rB); // We print the resulting system (if needed) if (mEchoLevel == 2) { //if it is needed to print the debug info KRATOS_INFO("Dx") << "Solution obtained = " << mDisp << std::endl; KRATOS_INFO("RHS") << "RHS = " << mResidualDisp << std::endl; } else if (mEchoLevel == 3) { //if it is needed to print the debug info KRATOS_INFO("LHS") << "SystemMatrix = " << mKDispModified << std::endl; KRATOS_INFO("Dx") << "Solution obtained = " << mDisp << std::endl; KRATOS_INFO("RHS") << "RHS = " << mResidualDisp << std::endl; } else if (mEchoLevel >= 4) { //print to matrix market file const std::string matrix_market_name = "A_" + std::to_string(mFileCreated) + ".mm"; TSparseSpaceType::WriteMatrixMarketMatrix(matrix_market_name.c_str(), mKDispModified, false); const std::string matrix_market_vectname = "b_" + std::to_string(mFileCreated) + ".mm.rhs"; TSparseSpaceType::WriteMatrixMarketVector(matrix_market_vectname.c_str(), mResidualDisp); mFileCreated++; } return false; } /** * @brief Multi solve method for solving a set of linear systems with same coefficient matrix. * @details Solves the linear system Ax=b and puts the result on SystemVector& rX. rVectorx is also th initial guess for iterative methods. * @param rA System matrix * @param rX Solution vector. it's also the initial guess for iterative linear solvers. * @param rB Right hand side vector. */ bool Solve ( SparseMatrixType& rA, DenseMatrixType& rX, DenseMatrixType& rB ) override { return false; } /** * @brief Some solvers may require a minimum degree of knowledge of the structure of the matrix. To make an example * when solving a mixed u-p problem, it is important to identify the row associated to v and p. * @details Another example is the automatic prescription of rotation null-space for smoothed-aggregation solvers * which require knowledge on the spatial position of the nodes associated to a given dof. * This function tells if the solver requires such data */ bool AdditionalPhysicalDataIsNeeded() override { return true; } /** * @brief Some solvers may require a minimum degree of knowledge of the structure of the matrix. * @details To make an example when solving a mixed u-p problem, it is important to identify the row associated to v and p. Another example is the automatic prescription of rotation null-space for smoothed-aggregation solvers which require knowledge on the spatial position of the nodes associated to a given dof. This function is the place to eventually provide such data * @param rA System matrix * @param rX Solution vector. It's also the initial guess for iterative linear solvers. * @param rB Right hand side vector. */ void ProvideAdditionalData ( SparseMatrixType& rA, VectorType& rX, VectorType& rB, DofsArrayType& rDofSet, ModelPart& rModelPart ) override { // Allocating auxiliar parameters IndexType node_id; // Count LM dofs SizeType n_lm_inactive_dofs = 0, n_lm_active_dofs = 0; SizeType n_master_dofs = 0; SizeType n_slave_inactive_dofs = 0, n_slave_active_dofs = 0; SizeType tot_active_dofs = 0; // We separate if we consider a block builder and solver or an elimination builder and solver if (rModelPart.IsNot(TO_SPLIT)) { // In case of block builder and solver for (auto& i_dof : rDofSet) { node_id = i_dof.Id(); const NodeType& node = rModelPart.GetNode(node_id); if (i_dof.EquationId() < rA.size1()) { tot_active_dofs++; if (IsLMDof(i_dof)) { if (node.Is(ACTIVE)) n_lm_active_dofs++; else n_lm_inactive_dofs++; } else if (node.Is(INTERFACE) && IsDisplacementDof(i_dof)) { if (node.Is(MASTER)) { n_master_dofs++; } else if (node.Is(SLAVE)) { if (node.Is(ACTIVE)) n_slave_active_dofs++; else n_slave_inactive_dofs++; } } } } } else { // In case of elimination builder and solver for (auto& i_dof : rDofSet) { node_id = i_dof.Id(); const NodeType& node = rModelPart.GetNode(node_id); tot_active_dofs++; if (IsLMDof(i_dof)) { if (node.Is(ACTIVE)) n_lm_active_dofs++; else n_lm_inactive_dofs++; } else if (node.Is(INTERFACE) && IsDisplacementDof(i_dof)) { if (node.Is(MASTER)) { n_master_dofs++; } else if (node.Is(SLAVE)) { if (node.Is(ACTIVE)) n_slave_active_dofs++; else n_slave_inactive_dofs++; } } } } KRATOS_ERROR_IF(tot_active_dofs != rA.size1()) << "Total system size does not coincide with the free dof map: " << tot_active_dofs << " vs " << rA.size1() << std::endl; // Resize arrays as needed if (mMasterIndices.size() != n_master_dofs) mMasterIndices.resize (n_master_dofs,false); if (mSlaveInactiveIndices.size() != n_slave_inactive_dofs) mSlaveInactiveIndices.resize (n_slave_inactive_dofs,false); if (mSlaveActiveIndices.size() != n_slave_active_dofs) mSlaveActiveIndices.resize (n_slave_active_dofs,false); if (mLMInactiveIndices.size() != n_lm_inactive_dofs) mLMInactiveIndices.resize (n_lm_inactive_dofs,false); if (mLMActiveIndices.size() != n_lm_active_dofs) mLMActiveIndices.resize (n_lm_active_dofs,false); const SizeType n_other_dofs = tot_active_dofs - n_lm_inactive_dofs - n_lm_active_dofs - n_master_dofs - n_slave_inactive_dofs - n_slave_active_dofs; if (mOtherIndices.size() != n_other_dofs) mOtherIndices.resize (n_other_dofs, false); if (mGlobalToLocalIndexing.size() != tot_active_dofs) mGlobalToLocalIndexing.resize (tot_active_dofs,false); if (mWhichBlockType.size() != tot_active_dofs) mWhichBlockType.resize(tot_active_dofs, false); // Size check KRATOS_ERROR_IF_NOT(n_lm_active_dofs == n_slave_active_dofs) << "The number of active LM dofs: " << n_lm_active_dofs << " and active slave nodes dofs: " << n_slave_active_dofs << " does not coincide" << std::endl; /** * Construct aux_lists as needed * "other_counter[i]" i will contain the position in the global system of the i-th NON-LM node * "lm_active_counter[i]" will contain the in the global system of the i-th NON-LM node * mGlobalToLocalIndexing[i] will contain the position in the local blocks of the */ SizeType lm_inactive_counter = 0, lm_active_counter = 0; SizeType master_counter = 0; SizeType slave_inactive_counter = 0, slave_active_counter = 0; SizeType other_counter = 0; IndexType global_pos = 0; // We separate if we consider a block builder and solver or an elimination builder and solver if (rModelPart.IsNot(TO_SPLIT)) { // In case of block builder and solver for (auto& i_dof : rDofSet) { node_id = i_dof.Id(); const NodeType& r_node = rModelPart.GetNode(node_id); if (i_dof.EquationId() < rA.size1()) { if (IsLMDof(i_dof)) { if (r_node.Is(ACTIVE)) { mLMActiveIndices[lm_active_counter] = global_pos; mGlobalToLocalIndexing[global_pos] = lm_active_counter; mWhichBlockType[global_pos] = BlockType::LM_ACTIVE; ++lm_active_counter; } else { mLMInactiveIndices[lm_inactive_counter] = global_pos; mGlobalToLocalIndexing[global_pos] = lm_inactive_counter; mWhichBlockType[global_pos] = BlockType::LM_INACTIVE; ++lm_inactive_counter; } } else if ( r_node.Is(INTERFACE) && IsDisplacementDof(i_dof)) { if (r_node.Is(MASTER)) { mMasterIndices[master_counter] = global_pos; mGlobalToLocalIndexing[global_pos] = master_counter; mWhichBlockType[global_pos] = BlockType::MASTER; ++master_counter; } else if (r_node.Is(SLAVE)) { if (r_node.Is(ACTIVE)) { mSlaveActiveIndices[slave_active_counter] = global_pos; mGlobalToLocalIndexing[global_pos] = slave_active_counter; mWhichBlockType[global_pos] = BlockType::SLAVE_ACTIVE; ++slave_active_counter; } else { mSlaveInactiveIndices[slave_inactive_counter] = global_pos; mGlobalToLocalIndexing[global_pos] = slave_inactive_counter; mWhichBlockType[global_pos] = BlockType::SLAVE_INACTIVE; ++slave_inactive_counter; } } else { // We need to consider always an else to ensure that the system size is consistent mOtherIndices[other_counter] = global_pos; mGlobalToLocalIndexing[global_pos] = other_counter; mWhichBlockType[global_pos] = BlockType::OTHER; ++other_counter; } } else { mOtherIndices[other_counter] = global_pos; mGlobalToLocalIndexing[global_pos] = other_counter; mWhichBlockType[global_pos] = BlockType::OTHER; ++other_counter; } ++global_pos; } } } else { // In case of elimination builder and solver for (auto& i_dof : rDofSet) { node_id = i_dof.Id(); const NodeType& r_node = rModelPart.GetNode(node_id); if (IsLMDof(i_dof)) { if (r_node.Is(ACTIVE)) { mLMActiveIndices[lm_active_counter] = global_pos; mGlobalToLocalIndexing[global_pos] = lm_active_counter; mWhichBlockType[global_pos] = BlockType::LM_ACTIVE; ++lm_active_counter; } else { mLMInactiveIndices[lm_inactive_counter] = global_pos; mGlobalToLocalIndexing[global_pos] = lm_inactive_counter; mWhichBlockType[global_pos] = BlockType::LM_INACTIVE; ++lm_inactive_counter; } } else if ( r_node.Is(INTERFACE) && IsDisplacementDof(i_dof)) { if (r_node.Is(MASTER)) { mMasterIndices[master_counter] = global_pos; mGlobalToLocalIndexing[global_pos] = master_counter; mWhichBlockType[global_pos] = BlockType::MASTER; ++master_counter; } else if (r_node.Is(SLAVE)) { if (r_node.Is(ACTIVE)) { mSlaveActiveIndices[slave_active_counter] = global_pos; mGlobalToLocalIndexing[global_pos] = slave_active_counter; mWhichBlockType[global_pos] = BlockType::SLAVE_ACTIVE; ++slave_active_counter; } else { mSlaveInactiveIndices[slave_inactive_counter] = global_pos; mGlobalToLocalIndexing[global_pos] = slave_inactive_counter; mWhichBlockType[global_pos] = BlockType::SLAVE_INACTIVE; ++slave_inactive_counter; } } else { // We need to consider always an else to ensure that the system size is consistent mOtherIndices[other_counter] = global_pos; mGlobalToLocalIndexing[global_pos] = other_counter; mWhichBlockType[global_pos] = BlockType::OTHER; ++other_counter; } } else { mOtherIndices[other_counter] = global_pos; mGlobalToLocalIndexing[global_pos] = other_counter; mWhichBlockType[global_pos] = BlockType::OTHER; ++other_counter; } ++global_pos; } } KRATOS_DEBUG_ERROR_IF(master_counter != n_master_dofs) << "The number of active slave dofs counter : " << master_counter << "is higher than the expected: " << n_master_dofs << std::endl; KRATOS_DEBUG_ERROR_IF(slave_active_counter != n_slave_active_dofs) << "The number of active slave dofs counter : " << slave_active_counter << "is higher than the expected: " << n_slave_active_dofs << std::endl; KRATOS_DEBUG_ERROR_IF(slave_inactive_counter != n_slave_inactive_dofs) << "The number of inactive slave dofs counter : " << slave_inactive_counter << "is higher than the expected: " << n_slave_inactive_dofs << std::endl; KRATOS_DEBUG_ERROR_IF(lm_active_counter != n_lm_active_dofs) << "The number of active LM dofs counter : " << lm_active_counter << "is higher than the expected: " << n_lm_active_dofs << std::endl; KRATOS_DEBUG_ERROR_IF(lm_inactive_counter != n_lm_inactive_dofs) << "The number of inactive LM dofs counter : " << lm_inactive_counter << "is higher than the expected: " << n_lm_inactive_dofs << std::endl; KRATOS_DEBUG_ERROR_IF(other_counter != n_other_dofs) << "The number of other dofs counter : " << other_counter << "is higher than the expected: " << n_other_dofs << std::endl; } ///@} ///@name Access ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Input and output ///@{ /// Turn back information as a string. std::string Info() const override { return "Mixed displacement LM linear solver"; } /// Print information about this object. void PrintInfo (std::ostream& rOStream) const override { rOStream << "Mixed displacement LM linear solver"; } /// Print object's data. void PrintData (std::ostream& rOStream) const override { } ///@} ///@name Friends ///@{ ///@} protected: ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ /** * @brief T his function generates the subblocks of matrix A * @details as A = ( KNN KNM KNSI KNSA 0 0 ) u * ( KMN KMM KMSI KMSA -MI^T -MA^T ) u_master * ( KSIN KSIM KSISI KSISA DII^T DIA^T ) u_slave_inactive * ( KSAN KSAM KSASI KSASA DAI^T DAA^T ) u_slave_active * ( 0 0 0 0 ALMI 0 ) LMInactive * ( 0 KLMAM KLMASI KLMASA 0 KLMALMA ) LMActive * We will call as A = ( KNN KNM KNSI KNSA 0 0 ) u * ( KMN KMM KMSI KMSA KMLMI KMLMA ) u_master * ( KSIN KSIM KSISI KSISA KSILMI KSILMA ) u_slave_inactive * ( KSAN KSAM KSASI KSASA KSALMI KSALMA ) u_slave_active * ( 0 0 0 0 KLMILMI 0 ) LMInactive * ( 0 KLMAM KLMASI KLMASA 0 KLMALMA ) LMActive * Subblocks are allocated or nor depending on the value of "NeedAllocation" * @param rA System matrix * @param rX Solution vector. it's also the initial guess for iterative linear solvers. * @param rB Right hand side vector. */ void FillBlockMatrices ( const bool NeedAllocation, SparseMatrixType& rA, VectorType& rX, VectorType& rB ) { KRATOS_TRY // Auxiliar sizes const SizeType other_dof_size = mOtherIndices.size(); const SizeType master_size = mMasterIndices.size(); const SizeType slave_inactive_size = mSlaveInactiveIndices.size(); const SizeType slave_active_size = mSlaveActiveIndices.size(); const SizeType lm_active_size = mLMActiveIndices.size(); const SizeType lm_inactive_size = mLMInactiveIndices.size(); if (NeedAllocation) AllocateBlocks(); // Get access to A data const IndexType* index1 = rA.index1_data().begin(); const IndexType* index2 = rA.index2_data().begin(); const double* values = rA.value_data().begin(); // Allocate the auxiliar blocks by push_back SparseMatrixType KMLMA(master_size, lm_active_size); /// The master-active LM block (this is the big block of M) SparseMatrixType KLMALMA(lm_active_size, lm_active_size); /// The active LM-active LM block SparseMatrixType KSALMA(slave_active_size, lm_active_size); /// The active slave-active LM block (this is the big block of D, diagonal) SparseMatrixType KLMILMI(lm_inactive_size, lm_inactive_size); /// The inactive LM- inactive LM block (diagonal) IndexType* KMLMA_ptr = new IndexType[master_size + 1]; IndexType* mKSAN_ptr = new IndexType[slave_active_size + 1]; IndexType* mKSAM_ptr = new IndexType[slave_active_size + 1]; IndexType* mKSASI_ptr = new IndexType[slave_active_size + 1]; IndexType* mKSASA_ptr = new IndexType[slave_active_size + 1]; IndexType* KSALMA_ptr = new IndexType[slave_active_size + 1]; IndexType* KLMILMI_ptr = new IndexType[lm_inactive_size + 1]; IndexType* KLMALMA_ptr = new IndexType[lm_active_size + 1]; #pragma omp parallel for for (int i = 0; i < static_cast<int>(master_size + 1); i++) KMLMA_ptr[i] = 0; #pragma omp parallel for for (int i = 0; i < static_cast<int>(slave_active_size + 1); i++) { mKSAN_ptr[i] = 0; mKSAM_ptr[i] = 0; mKSASI_ptr[i] = 0; mKSASA_ptr[i] = 0; KSALMA_ptr[i] = 0; } #pragma omp parallel for for (int i = 0; i < static_cast<int>(lm_inactive_size + 1); i++) KLMILMI_ptr[i] = 0; #pragma omp parallel for for (int i = 0; i < static_cast<int>(lm_active_size + 1); i++) KLMALMA_ptr[i] = 0; #pragma omp parallel { // We iterate over original matrix #pragma omp for for (int i=0; i<static_cast<int>(rA.size1()); i++) { const IndexType row_begin = index1[i]; const IndexType row_end = index1[i+1]; const IndexType local_row_id = mGlobalToLocalIndexing[i]; IndexType KMLMA_cols = 0; IndexType mKSAN_cols = 0; IndexType mKSAM_cols = 0; IndexType mKSASI_cols = 0; IndexType mKSASA_cols = 0; IndexType KSALMA_cols = 0; IndexType KLMILMI_cols = 0; IndexType KLMALMA_cols = 0; if ( mWhichBlockType[i] == BlockType::MASTER) { // KMLMA for (IndexType j=row_begin; j<row_end; j++) { const IndexType col_index = index2[j]; if ( mWhichBlockType[col_index] == BlockType::LM_ACTIVE) { // KMLMA block ++KMLMA_cols; } } KRATOS_DEBUG_ERROR_IF(local_row_id > master_size) << "MASTER:: Local row ID: " << local_row_id <<" is greater than the number of rows " << master_size << std::endl; KMLMA_ptr[local_row_id + 1] = KMLMA_cols; } else if ( mWhichBlockType[i] == BlockType::SLAVE_ACTIVE) { //either KSAN or KSAM or KSASA or KSASA or KSALM for (IndexType j=row_begin; j<row_end; j++) { const IndexType col_index = index2[j]; if (mWhichBlockType[col_index] == BlockType::OTHER) { // KSAN block ++mKSAN_cols; } else if (mWhichBlockType[col_index] == BlockType::MASTER) { // KSAM block ++mKSAM_cols; } else if (mWhichBlockType[col_index] == BlockType::SLAVE_INACTIVE) { // KSASI block ++mKSASI_cols; } else if (mWhichBlockType[col_index] == BlockType::SLAVE_ACTIVE) { // KSASA block ++mKSASA_cols; } else if ( mWhichBlockType[col_index] == BlockType::LM_ACTIVE) { // KSALMA block (diagonal) ++KSALMA_cols; } } KRATOS_DEBUG_ERROR_IF(local_row_id > slave_active_size) << "SLAVE_ACTIVE:: Local row ID: " << local_row_id <<" is greater than the number of rows " << slave_active_size << std::endl; mKSAN_ptr[local_row_id + 1] = mKSAN_cols; mKSAM_ptr[local_row_id + 1] = mKSAM_cols; mKSASI_ptr[local_row_id + 1] = mKSASI_cols; mKSASA_ptr[local_row_id + 1] = mKSASA_cols; KSALMA_ptr[local_row_id + 1] = KSALMA_cols; } else if ( mWhichBlockType[i] == BlockType::LM_INACTIVE) { // KLMILMI for (IndexType j=row_begin; j<row_end; j++) { const IndexType col_index = index2[j]; if (mWhichBlockType[col_index] == BlockType::LM_INACTIVE) { // KLMILMI block (diagonal) ++KLMILMI_cols; } } KRATOS_DEBUG_ERROR_IF(local_row_id > lm_inactive_size) << "LM_INACTIVE:: Local row ID: " << local_row_id <<" is greater than the number of rows " << lm_inactive_size << std::endl; KLMILMI_ptr[local_row_id + 1] = KLMILMI_cols; } else if ( mWhichBlockType[i] == BlockType::LM_ACTIVE) { // KLMALMA for (IndexType j=row_begin; j<row_end; j++) { const IndexType col_index = index2[j]; if (mWhichBlockType[col_index] == BlockType::LM_ACTIVE) { // KLMALMA block ++KLMALMA_cols; } } KRATOS_DEBUG_ERROR_IF(local_row_id > lm_active_size) << "LM_ACTIVE:: Local row ID: " << local_row_id <<" is greater than the number of rows " << lm_active_size << std::endl; KLMALMA_ptr[local_row_id + 1] = KLMALMA_cols; } } } // We initialize the blocks sparse matrix std::partial_sum(KMLMA_ptr, KMLMA_ptr + master_size + 1, KMLMA_ptr); const std::size_t KMLMA_nonzero_values = KMLMA_ptr[master_size]; IndexType* aux_index2_KMLMA= new IndexType[KMLMA_nonzero_values]; double* aux_val_KMLMA= new double[KMLMA_nonzero_values]; std::partial_sum(mKSAN_ptr, mKSAN_ptr + slave_active_size + 1, mKSAN_ptr); const std::size_t mKSAN_nonzero_values = mKSAN_ptr[slave_active_size]; IndexType* aux_index2_mKSAN= new IndexType[mKSAN_nonzero_values]; double* aux_val_mKSAN= new double[mKSAN_nonzero_values]; std::partial_sum(mKSAM_ptr, mKSAM_ptr + slave_active_size + 1, mKSAM_ptr); const std::size_t mKSAM_nonzero_values = mKSAM_ptr[slave_active_size]; IndexType* aux_index2_mKSAM= new IndexType[mKSAM_nonzero_values]; double* aux_val_mKSAM= new double[mKSAM_nonzero_values]; std::partial_sum(mKSASI_ptr, mKSASI_ptr + slave_active_size + 1, mKSASI_ptr); const std::size_t mKSASI_nonzero_values = mKSASI_ptr[slave_active_size]; IndexType* aux_index2_mKSASI= new IndexType[mKSASI_nonzero_values]; double* aux_val_mKSASI= new double[mKSASI_nonzero_values]; std::partial_sum(mKSASA_ptr, mKSASA_ptr + slave_active_size + 1, mKSASA_ptr); const std::size_t mKSASA_nonzero_values = mKSASA_ptr[slave_active_size]; IndexType* aux_index2_mKSASA= new IndexType[mKSASA_nonzero_values]; double* aux_val_mKSASA = new double[mKSASA_nonzero_values]; std::partial_sum(KSALMA_ptr, KSALMA_ptr + slave_active_size + 1, KSALMA_ptr); const std::size_t KSALMA_nonzero_values = KSALMA_ptr[slave_active_size]; IndexType* aux_index2_KSALMA= new IndexType[KSALMA_nonzero_values]; double* aux_val_KSALMA = new double[KSALMA_nonzero_values]; std::partial_sum(KLMILMI_ptr, KLMILMI_ptr + lm_inactive_size + 1, KLMILMI_ptr); const std::size_t KLMILMI_nonzero_values = KLMILMI_ptr[lm_inactive_size]; IndexType* aux_index2_KLMILMI= new IndexType[KLMILMI_nonzero_values]; double* aux_val_KLMILMI = new double[KLMILMI_nonzero_values]; std::partial_sum(KLMALMA_ptr, KLMALMA_ptr + lm_active_size + 1, KLMALMA_ptr); const std::size_t KLMALMA_nonzero_values = KLMALMA_ptr[lm_active_size]; IndexType* aux_index2_KLMALMA = new IndexType[KLMALMA_nonzero_values]; double* aux_val_KLMALMA = new double[KLMALMA_nonzero_values]; #pragma omp parallel { // We iterate over original matrix #pragma omp for for (int i=0; i<static_cast<int>(rA.size1()); i++) { const IndexType row_begin = index1[i]; const IndexType row_end = index1[i+1]; const IndexType local_row_id = mGlobalToLocalIndexing[i]; if ( mWhichBlockType[i] == BlockType::MASTER) { // KMLMA IndexType KMLMA_row_beg = KMLMA_ptr[local_row_id]; IndexType KMLMA_row_end = KMLMA_row_beg; for (IndexType j=row_begin; j<row_end; j++) { const IndexType col_index = index2[j]; if ( mWhichBlockType[col_index] == BlockType::LM_ACTIVE) { // KMLMA block const double value = values[j]; const IndexType local_col_id = mGlobalToLocalIndexing[col_index]; aux_index2_KMLMA[KMLMA_row_end] = local_col_id; aux_val_KMLMA[KMLMA_row_end] = value; ++KMLMA_row_end; } } } else if ( mWhichBlockType[i] == BlockType::SLAVE_ACTIVE) { //either KSAN or KSAM or KSASA or KSASA or KSALM IndexType mKSAN_row_beg = mKSAN_ptr[local_row_id]; IndexType mKSAN_row_end = mKSAN_row_beg; IndexType mKSAM_row_beg = mKSAM_ptr[local_row_id]; IndexType mKSAM_row_end = mKSAM_row_beg; IndexType mKSASI_row_beg = mKSASI_ptr[local_row_id]; IndexType mKSASI_row_end = mKSASI_row_beg; IndexType mKSASA_row_beg = mKSASA_ptr[local_row_id]; IndexType mKSASA_row_end = mKSASA_row_beg; IndexType KSALMA_row_beg = KSALMA_ptr[local_row_id]; IndexType KSALMA_row_end = KSALMA_row_beg; for (IndexType j=row_begin; j<row_end; j++) { const IndexType col_index = index2[j]; const double value = values[j]; const IndexType local_col_id = mGlobalToLocalIndexing[col_index]; if (mWhichBlockType[col_index] == BlockType::OTHER) { // KSAN block aux_index2_mKSAN[mKSAN_row_end] = local_col_id; aux_val_mKSAN[mKSAN_row_end] = value; ++mKSAN_row_end; } else if (mWhichBlockType[col_index] == BlockType::MASTER) { // KSAM block aux_index2_mKSAM[mKSAM_row_end] = local_col_id; aux_val_mKSAM[mKSAM_row_end] = value; ++mKSAM_row_end; } else if (mWhichBlockType[col_index] == BlockType::SLAVE_INACTIVE) { // KSASI block aux_index2_mKSASI[mKSASI_row_end] = local_col_id; aux_val_mKSASI[mKSASI_row_end] = value; ++mKSASI_row_end; } else if (mWhichBlockType[col_index] == BlockType::SLAVE_ACTIVE) { // KSASA block aux_index2_mKSASA[mKSASA_row_end] = local_col_id; aux_val_mKSASA[mKSASA_row_end] = value; ++mKSASA_row_end; } else if ( mWhichBlockType[col_index] == BlockType::LM_ACTIVE) { // KSALMA block (diagonal) aux_index2_KSALMA[KSALMA_row_end] = local_col_id; aux_val_KSALMA[KSALMA_row_end] = value; ++KSALMA_row_end; } } } else if ( mWhichBlockType[i] == BlockType::LM_INACTIVE) { // KLMILMI IndexType KLMILMI_row_beg = KLMILMI_ptr[local_row_id]; IndexType KLMILMI_row_end = KLMILMI_row_beg; for (IndexType j=row_begin; j<row_end; j++) { const IndexType col_index = index2[j]; if (mWhichBlockType[col_index] == BlockType::LM_INACTIVE) { // KLMILMI block (diagonal) const double value = values[j]; const IndexType local_col_id = mGlobalToLocalIndexing[col_index]; aux_index2_KLMILMI[KLMILMI_row_end] = local_col_id; aux_val_KLMILMI[KLMILMI_row_end] = value; ++KLMILMI_row_end; } } } else if ( mWhichBlockType[i] == BlockType::LM_ACTIVE) { // KLMALMA IndexType KLMALMA_row_beg = KLMALMA_ptr[local_row_id]; IndexType KLMALMA_row_end = KLMALMA_row_beg; for (IndexType j=row_begin; j<row_end; j++) { const IndexType col_index = index2[j]; if (mWhichBlockType[col_index] == BlockType::LM_ACTIVE) { // KLMALMA block const double value = values[j]; const IndexType local_col_id = mGlobalToLocalIndexing[col_index]; aux_index2_KLMALMA[KLMALMA_row_end] = local_col_id; aux_val_KLMALMA[KLMALMA_row_end] = value; ++KLMALMA_row_end; } } } } } CreateMatrix(KMLMA, master_size, lm_active_size, KMLMA_ptr, aux_index2_KMLMA, aux_val_KMLMA); CreateMatrix(mKSAN, slave_active_size, other_dof_size, mKSAN_ptr, aux_index2_mKSAN, aux_val_mKSAN); CreateMatrix(mKSAM, slave_active_size, master_size, mKSAM_ptr, aux_index2_mKSAM, aux_val_mKSAM); CreateMatrix(mKSASI, slave_active_size, slave_inactive_size, mKSASI_ptr, aux_index2_mKSASI, aux_val_mKSASI); CreateMatrix(mKSASA, slave_active_size, slave_active_size, mKSASA_ptr, aux_index2_mKSASA, aux_val_mKSASA); CreateMatrix(KSALMA, slave_active_size, lm_active_size, KSALMA_ptr, aux_index2_KSALMA, aux_val_KSALMA); CreateMatrix(KLMILMI, lm_inactive_size, lm_inactive_size, KLMILMI_ptr, aux_index2_KLMILMI, aux_val_KLMILMI); CreateMatrix(KLMALMA, lm_active_size, lm_active_size, KLMALMA_ptr, aux_index2_KLMALMA, aux_val_KLMALMA); // We compute directly the inverse of the KSALMA matrix // KSALMA it is supposed to be a diagonal matrix (in fact it is the key point of this formulation) // (NOTE: technically it is not a stiffness matrix, we give that name) if (lm_active_size > 0) { ComputeDiagonalByLumping(KSALMA, mKLMAModified, ZeroTolerance); } // We compute directly the inverse of the KLMILMI matrix // KLMILMI it is supposed to be a diagonal matrix (in fact it is the key point of this formulation) // (NOTE: technically it is not a stiffness matrix, we give that name) if (lm_inactive_size > 0) { ComputeDiagonalByLumping(KLMILMI, mKLMIModified, ZeroTolerance); } // Compute the P and C operators if (slave_active_size > 0) { SparseMatrixMultiplicationUtility::MatrixMultiplication(KMLMA, mKLMAModified, mPOperator); SparseMatrixMultiplicationUtility::MatrixMultiplication(KLMALMA, mKLMAModified, mCOperator); } // We proceed with the auxiliar products for the master blocks SparseMatrixType master_auxKSAN(master_size, other_dof_size); SparseMatrixType master_auxKSAM(master_size, master_size); SparseMatrixType master_auxKSASI(master_size, slave_inactive_size); SparseMatrixType master_auxKSASA(master_size, slave_active_size); if (slave_active_size > 0) { SparseMatrixMultiplicationUtility::MatrixMultiplication(mPOperator, mKSAN, master_auxKSAN); SparseMatrixMultiplicationUtility::MatrixMultiplication(mPOperator, mKSAM, master_auxKSAM); if (slave_inactive_size > 0) SparseMatrixMultiplicationUtility::MatrixMultiplication(mPOperator, mKSASI, master_auxKSASI); SparseMatrixMultiplicationUtility::MatrixMultiplication(mPOperator, mKSASA, master_auxKSASA); } // We proceed with the auxiliar products for the active slave blocks SparseMatrixType aslave_auxKSAN(slave_active_size, other_dof_size); SparseMatrixType aslave_auxKSAM(slave_active_size, master_size); SparseMatrixType aslave_auxKSASI(slave_active_size, slave_inactive_size); SparseMatrixType aslave_auxKSASA(slave_active_size, slave_active_size); if (slave_active_size > 0) { SparseMatrixMultiplicationUtility::MatrixMultiplication(mCOperator, mKSAN, aslave_auxKSAN); SparseMatrixMultiplicationUtility::MatrixMultiplication(mCOperator, mKSAM, aslave_auxKSAM); if (slave_inactive_size > 0) SparseMatrixMultiplicationUtility::MatrixMultiplication(mCOperator, mKSASI, aslave_auxKSASI); SparseMatrixMultiplicationUtility::MatrixMultiplication(mCOperator, mKSASA, aslave_auxKSASA); } // Auxiliar indexes const SizeType other_dof_initial_index = 0; const SizeType master_dof_initial_index = other_dof_size; const SizeType slave_inactive_dof_initial_index = master_dof_initial_index + master_size; const SizeType assembling_slave_dof_initial_index = slave_inactive_dof_initial_index + slave_inactive_size; // The auxiliar index structure const SizeType nrows = mKDispModified.size1(); const SizeType ncols = mKDispModified.size2(); IndexType* K_disp_modified_ptr_aux1 = new IndexType[nrows + 1]; K_disp_modified_ptr_aux1[0] = 0; #pragma omp parallel { #pragma omp for for (int i=0; i<static_cast<int>(rA.size1()); i++) { if ( mWhichBlockType[i] == BlockType::OTHER) { //either KNN or KNM or KNSI or KNSA ComputeNonZeroColumnsDispDoFs( index1, index2, values, i, other_dof_initial_index, K_disp_modified_ptr_aux1); } else if ( mWhichBlockType[i] == BlockType::MASTER) { //either KMN or KMM or KMSI or KMLM ComputeNonZeroColumnsDispDoFs( index1, index2, values, i, master_dof_initial_index, K_disp_modified_ptr_aux1); } else if ( mWhichBlockType[i] == BlockType::SLAVE_INACTIVE) { //either KSIN or KSIM or KSISI or KSISA ComputeNonZeroColumnsDispDoFs( index1, index2, values, i, slave_inactive_dof_initial_index, K_disp_modified_ptr_aux1); } else if ( mWhichBlockType[i] == BlockType::LM_ACTIVE) { //either KLMAM or KLMASI or KLMASA ComputeNonZeroColumnsPartialDispDoFs( index1, index2, values, i, assembling_slave_dof_initial_index, K_disp_modified_ptr_aux1); } } } // We initialize the final sparse matrix std::partial_sum(K_disp_modified_ptr_aux1, K_disp_modified_ptr_aux1 + nrows + 1, K_disp_modified_ptr_aux1); const SizeType nonzero_values_aux1 = K_disp_modified_ptr_aux1[nrows]; IndexType* aux_index2_K_disp_modified_aux1 = new IndexType[nonzero_values_aux1]; double* aux_val_K_disp_modified_aux1 = new double[nonzero_values_aux1]; #pragma omp parallel { #pragma omp for for (int i=0; i<static_cast<int>(rA.size1()); i++) { if ( mWhichBlockType[i] == BlockType::OTHER) { //either KNN or KNM or KNSI or KNSA ComputeAuxiliarValuesDispDoFs( index1, index2, values, i, other_dof_initial_index, K_disp_modified_ptr_aux1, aux_index2_K_disp_modified_aux1, aux_val_K_disp_modified_aux1); } else if ( mWhichBlockType[i] == BlockType::MASTER) { //either KMN or KMM or KMSI or KMLM ComputeAuxiliarValuesDispDoFs( index1, index2, values, i, master_dof_initial_index, K_disp_modified_ptr_aux1, aux_index2_K_disp_modified_aux1, aux_val_K_disp_modified_aux1); } else if ( mWhichBlockType[i] == BlockType::SLAVE_INACTIVE) { //either KSIN or KSIM or KSISI or KSISA ComputeAuxiliarValuesDispDoFs( index1, index2, values, i, slave_inactive_dof_initial_index, K_disp_modified_ptr_aux1, aux_index2_K_disp_modified_aux1, aux_val_K_disp_modified_aux1); } else if ( mWhichBlockType[i] == BlockType::LM_ACTIVE) { //either KLMAM or KLMASI or KLMASA ComputeAuxiliarValuesPartialDispDoFs( index1, index2, values, i, assembling_slave_dof_initial_index, K_disp_modified_ptr_aux1, aux_index2_K_disp_modified_aux1, aux_val_K_disp_modified_aux1); } } } // Create the first auxiliar matrix CreateMatrix(mKDispModified, nrows, ncols, K_disp_modified_ptr_aux1, aux_index2_K_disp_modified_aux1, aux_val_K_disp_modified_aux1); // Now we create the second matrix block to sum IndexType* K_disp_modified_ptr_aux2 = new IndexType[nrows + 1]; #pragma omp parallel for for (int i = 0; i < static_cast<int>(nrows + 1); i++) K_disp_modified_ptr_aux2[i] = 0; #pragma omp parallel { #pragma omp for for (int i=0; i<static_cast<int>(master_size); i++) { IndexType K_disp_modified_cols_aux2 = 0; // Get access to master_auxKSAN data if (master_auxKSAN.nnz() > 0 && other_dof_size > 0) { SparseMatrixMultiplicationUtility::ComputeNonZeroBlocks(master_auxKSAN, i, K_disp_modified_cols_aux2); } // Get access to master_auxKSAM data if (master_auxKSAM.nnz() > 0) { SparseMatrixMultiplicationUtility::ComputeNonZeroBlocks(master_auxKSAM, i, K_disp_modified_cols_aux2); } // Get access to master_auxKSASI data if (master_auxKSASI.nnz() > 0 && slave_inactive_size > 0) { SparseMatrixMultiplicationUtility::ComputeNonZeroBlocks(master_auxKSASI, i, K_disp_modified_cols_aux2); } // Get access to master_auxKSASA data if (master_auxKSASA.nnz() > 0 && slave_active_size > 0) { SparseMatrixMultiplicationUtility::ComputeNonZeroBlocks(master_auxKSASA, i, K_disp_modified_cols_aux2); } K_disp_modified_ptr_aux2[master_dof_initial_index + i + 1] = K_disp_modified_cols_aux2; } #pragma omp for for (int i=0; i<static_cast<int>(slave_active_size); i++) { IndexType K_disp_modified_cols_aux2 = 0; // Get access to aslave_auxKSAN data if (aslave_auxKSAN.nnz() > 0 && other_dof_size > 0) { SparseMatrixMultiplicationUtility::ComputeNonZeroBlocks(aslave_auxKSAN, i, K_disp_modified_cols_aux2); } // Get access to aslave_auxKSAM data if (aslave_auxKSAM.nnz() > 0 && master_size > 0) { SparseMatrixMultiplicationUtility::ComputeNonZeroBlocks(aslave_auxKSAM, i, K_disp_modified_cols_aux2); } // Get access to aslave_auxKSASI data if (aslave_auxKSASI.nnz() > 0 && slave_inactive_size > 0) { SparseMatrixMultiplicationUtility::ComputeNonZeroBlocks(aslave_auxKSASI, i, K_disp_modified_cols_aux2); } // Get access to aslave_auxKSASA data if (aslave_auxKSASA.nnz() > 0) { SparseMatrixMultiplicationUtility::ComputeNonZeroBlocks(aslave_auxKSASA, i, K_disp_modified_cols_aux2); } K_disp_modified_ptr_aux2[assembling_slave_dof_initial_index + i + 1] = K_disp_modified_cols_aux2; } } // We initialize the final sparse matrix std::partial_sum(K_disp_modified_ptr_aux2, K_disp_modified_ptr_aux2 + nrows + 1, K_disp_modified_ptr_aux2); const SizeType nonzero_values_aux2 = K_disp_modified_ptr_aux2[nrows]; IndexType* aux_index2_K_disp_modified_aux2 = new IndexType[nonzero_values_aux2]; double* aux_val_K_disp_modified_aux2 = new double[nonzero_values_aux2]; #pragma omp parallel { #pragma omp for for (int i=0; i<static_cast<int>(master_size); i++) { const IndexType row_beg = K_disp_modified_ptr_aux2[master_dof_initial_index + i]; IndexType row_end = row_beg; // Get access to master_auxKSAN data if (master_auxKSAN.nnz() > 0 && other_dof_size > 0) { SparseMatrixMultiplicationUtility::ComputeAuxiliarValuesBlocks(master_auxKSAN, aux_index2_K_disp_modified_aux2, aux_val_K_disp_modified_aux2, i, row_end, other_dof_initial_index); } // Get access to master_auxKSAM data if (master_auxKSAM.nnz() > 0) { SparseMatrixMultiplicationUtility::ComputeAuxiliarValuesBlocks(master_auxKSAM, aux_index2_K_disp_modified_aux2, aux_val_K_disp_modified_aux2, i, row_end, master_dof_initial_index); } // Get access to master_auxKSASI data if (master_auxKSASI.nnz() > 0 && slave_inactive_size > 0) { SparseMatrixMultiplicationUtility::ComputeAuxiliarValuesBlocks(master_auxKSASI, aux_index2_K_disp_modified_aux2, aux_val_K_disp_modified_aux2, i, row_end, slave_inactive_dof_initial_index); } // Get access to master_auxKSASA data if (master_auxKSASA.nnz() > 0 && slave_active_size > 0) { SparseMatrixMultiplicationUtility::ComputeAuxiliarValuesBlocks(master_auxKSASA, aux_index2_K_disp_modified_aux2, aux_val_K_disp_modified_aux2, i, row_end, assembling_slave_dof_initial_index); } } #pragma omp for for (int i=0; i<static_cast<int>(slave_active_size); i++) { const IndexType row_beg = K_disp_modified_ptr_aux2[assembling_slave_dof_initial_index + i]; IndexType row_end = row_beg; // Get access to aslave_auxKSAN data if (aslave_auxKSAN.nnz() > 0 && other_dof_size > 0) { SparseMatrixMultiplicationUtility::ComputeAuxiliarValuesBlocks(aslave_auxKSAN, aux_index2_K_disp_modified_aux2, aux_val_K_disp_modified_aux2, i, row_end, other_dof_initial_index); } // Get access to aslave_auxKSAM data if (aslave_auxKSAM.nnz() > 0 && master_size > 0) { SparseMatrixMultiplicationUtility::ComputeAuxiliarValuesBlocks(aslave_auxKSAM, aux_index2_K_disp_modified_aux2, aux_val_K_disp_modified_aux2, i, row_end, master_dof_initial_index); } // Get access to aslave_auxKSASI data if (aslave_auxKSASI.nnz() > 0 && slave_inactive_size > 0) { SparseMatrixMultiplicationUtility::ComputeAuxiliarValuesBlocks(aslave_auxKSASI, aux_index2_K_disp_modified_aux2, aux_val_K_disp_modified_aux2, i, row_end, slave_inactive_dof_initial_index); } // Get access to aslave_auxKSASA data if (aslave_auxKSASA.nnz() > 0) { SparseMatrixMultiplicationUtility::ComputeAuxiliarValuesBlocks(aslave_auxKSASA, aux_index2_K_disp_modified_aux2, aux_val_K_disp_modified_aux2, i, row_end, assembling_slave_dof_initial_index); } } } // Create the second auxiliar matrix SparseMatrixType K_disp_modified_aux2(nrows, ncols); CreateMatrix(K_disp_modified_aux2, nrows, ncols, K_disp_modified_ptr_aux2, aux_index2_K_disp_modified_aux2, aux_val_K_disp_modified_aux2); // We sum the auxiliar matrices SparseMatrixMultiplicationUtility::MatrixAdd<SparseMatrixType, SparseMatrixType>(mKDispModified, K_disp_modified_aux2, - 1.0); // Finally we ensure that the matrix is structurally symmetric EnsureStructuralSymmetryMatrix(mKDispModified); #ifdef KRATOS_DEBUG CheckMatrix(mKDispModified); #endif // // DEBUG // LOG_MATRIX_PRETTY(rA) // LOG_MATRIX_PRETTY(mKDispModified) KRATOS_CATCH ("") } ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ LinearSolverPointerType mpSolverDispBlock; /// The pointer to the displacement linear solver Flags mOptions; /// This stores the flags IndexVectorType mMasterIndices; /// The vector storing the indices of the master nodes in contact IndexVectorType mSlaveInactiveIndices; /// The vector storing the indices of the slave nodes in contact (Inactive) IndexVectorType mSlaveActiveIndices; /// The vector storing the indices of the slave nodes in contact (Active) IndexVectorType mLMInactiveIndices; /// The vector storing the indices of the LM (Inactive) IndexVectorType mLMActiveIndices; /// The vector storing the indices of the LM (Active) IndexVectorType mOtherIndices; /// The vector containing the indices for other DoF IndexVectorType mGlobalToLocalIndexing; /// This vector stores the correspondance between the local and global BlockTypeVectorType mWhichBlockType; /// This vector stores the LM block belongings SparseMatrixType mKDispModified; /// The modified displacement block SparseMatrixType mKLMAModified; /// The modified active LM block (inverted diagonal) SparseMatrixType mKLMIModified; /// The modified inactive LM block (inverted diagonal) SparseMatrixType mKSAN; /// The slave active-displacement block SparseMatrixType mKSAM; /// The active slave-master block SparseMatrixType mKSASI; /// The active slave-inactive slave block SparseMatrixType mKSASA; /// The inactive slave-active slave block SparseMatrixType mPOperator; /// The operator used for the master blocks SparseMatrixType mCOperator; /// The operator used for the active slave block VectorType mResidualLMActive; /// The residual of the active lagrange multipliers VectorType mResidualLMInactive; /// The residual of the inactive lagrange multipliers VectorType mResidualDisp; /// The residual of the rest of displacements VectorType mLMActive; /// The solution of the active lagrange multiplies VectorType mLMInactive; /// The solution of the inactive lagrange multiplies VectorType mDisp; /// The solution of the rest of displacements IndexType mEchoLevel = 0; /// The echo level of the solver IndexType mFileCreated = 0; /// The index used to identify the file created ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ /** * @brief This method is mean to avoid code duplication when computing the non zero terms in the Aux1 matrix * @param Index1 The indexes of nonzero rows * @param Index2 The indexes of nonzero columns * @param Values The array containing the values of the matrix * @param CurrentRow The current row computed * @param InitialIndex The index corresponding to the current row in the global contribution * @param Ptr The nonzero terms of each column */ inline void ComputeNonZeroColumnsDispDoFs( const IndexType* Index1, const IndexType* Index2, const double* Values, const int CurrentRow, const IndexType InitialIndex, IndexType* Ptr ) { const IndexType row_begin = Index1[CurrentRow]; const IndexType row_end = Index1[CurrentRow + 1]; IndexType cols = 0; const IndexType local_row_id = mGlobalToLocalIndexing[CurrentRow] + InitialIndex; for (IndexType j=row_begin; j<row_end; j++) { const IndexType col_index = Index2[j]; if (mWhichBlockType[col_index] == BlockType::OTHER) { ++cols; } else if (mWhichBlockType[col_index] == BlockType::MASTER) { ++cols; } else if (mWhichBlockType[col_index] == BlockType::SLAVE_INACTIVE) { ++cols; } else if (mWhichBlockType[col_index] == BlockType::SLAVE_ACTIVE) { ++cols; } } Ptr[local_row_id + 1] = cols; } /** * @brief This method is mean to avoid code duplication when computing the non zero terms in the Aux1 matrix * @details The same as the previous one but not taking into account the contribution of the other dofs * @param Index1 The indexes of nonzero rows * @param Index2 The indexes of nonzero columns * @param Values The array containing the values of the matrix * @param CurrentRow The current row computed * @param InitialIndex The index corresponding to the current row in the global contribution * @param Ptr The nonzero terms of each column */ inline void ComputeNonZeroColumnsPartialDispDoFs( const IndexType* Index1, const IndexType* Index2, const double* Values, const int CurrentRow, const IndexType InitialIndex, IndexType* Ptr ) { const IndexType row_begin = Index1[CurrentRow]; const IndexType row_end = Index1[CurrentRow + 1]; IndexType cols = 0; const IndexType local_row_id = mGlobalToLocalIndexing[CurrentRow] + InitialIndex; for (IndexType j=row_begin; j<row_end; j++) { const IndexType col_index = Index2[j]; if (mWhichBlockType[col_index] == BlockType::MASTER) { ++cols; } else if (mWhichBlockType[col_index] == BlockType::SLAVE_INACTIVE) { ++cols; } else if (mWhichBlockType[col_index] == BlockType::SLAVE_ACTIVE) { ++cols; } } Ptr[local_row_id + 1] = cols; } /** * @brief This method is mean to avoid code duplication when evaluate the terms of the Aux1 matrix * @param Index1 The indexes of nonzero rows * @param Index2 The indexes of nonzero columns * @param Values The array containing the values of the matrix * @param CurrentRow The current row computed * @param InitialIndex The index corresponding to the current row in the global contribution * @param Ptr The nonzero terms of each column * @param AuxIndex2 The indexes of the non zero columns * @param AuxVals The values of the final matrix */ inline void ComputeAuxiliarValuesDispDoFs( const IndexType* Index1, const IndexType* Index2, const double* Values, const int CurrentRow, const IndexType InitialIndex, IndexType* Ptr, IndexType* AuxIndex2, double* AuxVals ) { // Auxiliar sizes const SizeType other_dof_size = mOtherIndices.size(); const SizeType master_size = mMasterIndices.size(); const SizeType slave_inactive_size = mSlaveInactiveIndices.size(); // Auxiliar indexes const SizeType other_dof_initial_index = 0; const SizeType master_dof_initial_index = other_dof_size; const SizeType slave_inactive_dof_initial_index = master_dof_initial_index + master_size; const SizeType assembling_slave_dof_initial_index = slave_inactive_dof_initial_index + slave_inactive_size; // Some indexes const IndexType local_row_id = mGlobalToLocalIndexing[CurrentRow] + InitialIndex; const IndexType row_begin_A = Index1[CurrentRow]; const IndexType row_end_A = Index1[CurrentRow + 1]; const IndexType row_beg = Ptr[local_row_id]; IndexType row_end = row_beg; for (IndexType j=row_begin_A; j<row_end_A; j++) { const IndexType col_index = Index2[j]; const IndexType local_col_id = mGlobalToLocalIndexing[col_index]; const double value = Values[j]; if (mWhichBlockType[col_index] == BlockType::OTHER) { AuxIndex2[row_end] = local_col_id + other_dof_initial_index; AuxVals[row_end] = value; ++row_end; } else if (mWhichBlockType[col_index] == BlockType::MASTER) { AuxIndex2[row_end] = local_col_id + master_dof_initial_index; AuxVals[row_end] = value; ++row_end; } else if (mWhichBlockType[col_index] == BlockType::SLAVE_INACTIVE) { AuxIndex2[row_end] = local_col_id + slave_inactive_dof_initial_index; AuxVals[row_end] = value; ++row_end; } else if (mWhichBlockType[col_index] == BlockType::SLAVE_ACTIVE) { AuxIndex2[row_end] = local_col_id + assembling_slave_dof_initial_index; AuxVals[row_end] = value; ++row_end; } } } /** * @brief This method is mean to avoid code duplication when evaluate the terms of the Aux1 matrix * @details The same as the previous one but not taking into account the contribution of the other dofs * @param Index1 The indexes of nonzero rows * @param Index2 The indexes of nonzero columns * @param Values The array containing the values of the matrix * @param CurrentRow The current row computed * @param InitialIndex The index corresponding to the current row in the global contribution * @param Ptr The nonzero terms of each column * @param AuxIndex2 The indexes of the non zero columns * @param AuxVals The values of the final matrix */ inline void ComputeAuxiliarValuesPartialDispDoFs( const IndexType* Index1, const IndexType* Index2, const double* Values, const int CurrentRow, const IndexType InitialIndex, IndexType* Ptr, IndexType* AuxIndex2, double* AuxVals ) { // Auxiliar sizes const SizeType other_dof_size = mOtherIndices.size(); const SizeType master_size = mMasterIndices.size(); const SizeType slave_inactive_size = mSlaveInactiveIndices.size(); // Auxiliar indexes const SizeType master_dof_initial_index = other_dof_size; const SizeType slave_inactive_dof_initial_index = master_dof_initial_index + master_size; const SizeType assembling_slave_dof_initial_index = slave_inactive_dof_initial_index + slave_inactive_size; // Some indexes const IndexType local_row_id = mGlobalToLocalIndexing[CurrentRow] + InitialIndex; const IndexType row_begin_A = Index1[CurrentRow]; const IndexType row_end_A = Index1[CurrentRow + 1]; const IndexType row_beg = Ptr[local_row_id]; IndexType row_end = row_beg; for (IndexType j=row_begin_A; j<row_end_A; j++) { const IndexType col_index = Index2[j]; const IndexType local_col_id = mGlobalToLocalIndexing[col_index]; const double value = Values[j]; if (mWhichBlockType[col_index] == BlockType::MASTER) { AuxIndex2[row_end] = local_col_id + master_dof_initial_index; AuxVals[row_end] = value; ++row_end; } else if (mWhichBlockType[col_index] == BlockType::SLAVE_INACTIVE) { AuxIndex2[row_end] = local_col_id + slave_inactive_dof_initial_index; AuxVals[row_end] = value; ++row_end; } else if (mWhichBlockType[col_index] == BlockType::SLAVE_ACTIVE) { AuxIndex2[row_end] = local_col_id + assembling_slave_dof_initial_index; AuxVals[row_end] = value; ++row_end; } } } /** * @brief It allocates all the blocks and operators */ inline void AllocateBlocks() { // We clear the matrixes mKDispModified.clear(); /// The modified displacement block mKLMAModified.clear(); /// The modified active LM block (diagonal) mKLMIModified.clear(); /// The modified inaactive LM block (diagonal) mKSAN.clear(); /// The slave active-displacement block mKSAM.clear(); /// The active slave-master block mKSASI.clear(); /// The active slave-inactive slave block mKSASA.clear(); /// The active slave-slave active block mPOperator.clear(); /// The operator used for the master blocks mCOperator.clear(); /// The operator used for the active slave block mResidualLMActive.clear(); /// The residual corresponding the active LM mResidualLMInactive.clear(); /// The residual corresponding the inactive LM mResidualDisp.clear(); /// The residual of the displacements mLMActive.clear(); /// The solution of the active LM mLMInactive.clear(); /// The solution of the inactive LM mDisp.clear(); /// The solution of the displacement // Auxiliar sizes const SizeType other_dof_size = mOtherIndices.size(); const SizeType master_size = mMasterIndices.size(); const SizeType slave_inactive_size = mSlaveInactiveIndices.size(); const SizeType slave_active_size = mSlaveActiveIndices.size(); const SizeType lm_active_size = mLMActiveIndices.size(); const SizeType lm_inactive_size = mLMInactiveIndices.size(); const SizeType total_size = other_dof_size + master_size + slave_inactive_size + slave_active_size; // We do the allocation mKDispModified.resize(total_size, total_size, false); /// The modified displacement block mKLMAModified.resize(lm_active_size, lm_active_size, false); /// The modified active LM block (diagonal) mKLMAModified.reserve(lm_active_size); mKLMIModified.resize(lm_inactive_size, lm_inactive_size, false); /// The modified inactve LM block (diagonal) mKLMIModified.reserve(lm_inactive_size); mKSAN.resize(slave_active_size, other_dof_size, false); /// The slave active-displacement block mKSAM.resize(slave_active_size, master_size, false); /// The active slave-master block mKSASI.resize(slave_active_size, slave_inactive_size, false); /// The active slave-inactive slave block mKSASA.resize(slave_active_size, slave_active_size, false); /// The active slave-slave active block mPOperator.resize(master_size, slave_active_size, false); /// The operator used for the master blocks mCOperator.resize(lm_active_size, slave_active_size, false); /// The operator used for the active slave block mResidualLMActive.resize(lm_active_size, false ); /// The residual corresponding the active LM mResidualLMInactive.resize(lm_inactive_size, false ); /// The residual corresponding the inactive LM mResidualDisp.resize(total_size ); /// The residual of the displacements mLMActive.resize(lm_active_size, false); /// The solution of the active LM mLMInactive.resize(lm_inactive_size, false); /// The solution of the inactive LM mDisp.resize(total_size, false); /// The solution of the displacement } /** * @brief This function extracts from a vector which has the size of the overall r, the part that corresponds to u-dofs * @param rTotalResidual The total residual of the problem * @param ResidualU The vector containing the residual relative to the displacements */ inline void GetUPart ( const VectorType& rTotalResidual, VectorType& ResidualU ) { // Auxiliar sizes const SizeType other_dof_size = mOtherIndices.size(); const SizeType master_size = mMasterIndices.size(); const SizeType slave_inactive_size = mSlaveInactiveIndices.size(); const SizeType slave_active_size = mSlaveActiveIndices.size(); const SizeType lm_active_size = mLMActiveIndices.size(); const SizeType total_size = other_dof_size + master_size + slave_inactive_size + slave_active_size; // Resize in case the size is not correct if (ResidualU.size() != total_size ) ResidualU.resize (total_size, false); #pragma omp parallel for for (int i = 0; i<static_cast<int>(other_dof_size); i++) ResidualU[i] = rTotalResidual[mOtherIndices[i]]; // The corresponding residual for the active slave DoF's VectorType aux_res_active_slave(slave_active_size); #pragma omp parallel for for (int i = 0; i<static_cast<int>(slave_active_size); i++) aux_res_active_slave[i] = rTotalResidual[mSlaveActiveIndices[i]]; if (slave_active_size > 0) { // We compute the complementary residual for the master dofs VectorType aux_complement_master_residual(master_size); TSparseSpaceType::Mult(mPOperator, aux_res_active_slave, aux_complement_master_residual); #pragma omp parallel for for (int i = 0; i<static_cast<int>(master_size); i++) ResidualU[other_dof_size + i] = rTotalResidual[mMasterIndices[i]] - aux_complement_master_residual[i]; } else { #pragma omp parallel for for (int i = 0; i<static_cast<int>(master_size); i++) ResidualU[other_dof_size + i] = rTotalResidual[mMasterIndices[i]]; } #pragma omp parallel for for (int i = 0; i<static_cast<int>(slave_inactive_size); i++) ResidualU[other_dof_size + master_size + i] = rTotalResidual[mSlaveInactiveIndices[i]]; if (slave_active_size > 0) { // We compute the complementary residual for the master dofs VectorType aux_complement_active_lm_residual(lm_active_size); TSparseSpaceType::Mult(mCOperator, aux_res_active_slave, aux_complement_active_lm_residual); #pragma omp parallel for for (int i = 0; i<static_cast<int>(lm_active_size); i++) ResidualU[other_dof_size + master_size + slave_inactive_size + i] = rTotalResidual[mLMActiveIndices[i]] - aux_complement_active_lm_residual[i]; } else { #pragma omp parallel for for (int i = 0; i<static_cast<int>(lm_active_size); i++) ResidualU[other_dof_size + master_size + slave_inactive_size + i] = rTotalResidual[mLMActiveIndices[i]]; } } /** * @brief This function extracts from a vector which has the size of the overall r, the part that corresponds to active lm-dofs * @param rTotalResidual The total residual of the problem * @param rResidualLMA The vector containing the residual relative to the active LM */ inline void GetLMAPart( const VectorType& rTotalResidual, VectorType& rResidualLMA ) { // Auxiliar sizes const SizeType other_dof_size = mOtherIndices.size(); const SizeType master_size = mMasterIndices.size(); const SizeType slave_inactive_size = mSlaveInactiveIndices.size(); const SizeType slave_active_size = mSlaveActiveIndices.size(); // We add the other if (slave_active_size > 0) { // We get the displacement residual of the active slave nodes if (rResidualLMA.size() != slave_active_size ) rResidualLMA.resize (slave_active_size, false); #pragma omp parallel for for (int i = 0; i<static_cast<int>(rResidualLMA.size()); i++) rResidualLMA[i] = rTotalResidual[mSlaveActiveIndices[i]]; // From the computed displacements we get the components of the displacements for each block VectorType disp_N(other_dof_size); VectorType disp_M(master_size); VectorType disp_SI(slave_inactive_size); VectorType disp_SA(slave_active_size); #pragma omp parallel for for (int i = 0; i<static_cast<int>(other_dof_size); i++) disp_N[i] = mDisp[i]; #pragma omp parallel for for (int i = 0; i<static_cast<int>(master_size); i++) disp_M[i] = mDisp[other_dof_size + i]; #pragma omp parallel for for (int i = 0; i<static_cast<int>(slave_inactive_size); i++) disp_SI[i] = mDisp[other_dof_size + master_size + i]; #pragma omp parallel for for (int i = 0; i<static_cast<int>(slave_active_size); i++) disp_SA[i] = mDisp[other_dof_size + master_size + slave_inactive_size + i]; VectorType aux_mult(slave_active_size); TSparseSpaceType::Mult(mKSAN, disp_N, aux_mult); TSparseSpaceType::UnaliasedAdd (rResidualLMA, -1.0, aux_mult); TSparseSpaceType::Mult(mKSAM, disp_M, aux_mult); TSparseSpaceType::UnaliasedAdd (rResidualLMA, -1.0, aux_mult); if (slave_inactive_size > 0) { TSparseSpaceType::Mult(mKSASI, disp_SI, aux_mult); TSparseSpaceType::UnaliasedAdd (rResidualLMA, -1.0, aux_mult); } TSparseSpaceType::Mult(mKSASA, disp_SA, aux_mult); TSparseSpaceType::UnaliasedAdd (rResidualLMA, -1.0, aux_mult); } } /** * @brief This function extracts from a vector which has the size of the overall r, the part that corresponds to inactive lm-dofs * @param rTotalResidual The total residual of the problem * @param rResidualLMI The vector containing the residual relative to the inactive LM */ inline void GetLMIPart ( const VectorType& rTotalResidual, VectorType& rResidualLMI ) { // Auxiliar size const SizeType lm_inactive_size = mLMInactiveIndices.size(); // We get the displacement residual of the active slave nodes if (rResidualLMI.size() != lm_inactive_size ) rResidualLMI.resize (lm_inactive_size, false); #pragma omp parallel for for (int i = 0; i<static_cast<int>(lm_inactive_size); i++) rResidualLMI[i] = rTotalResidual[mLMInactiveIndices[i]]; } /** * @brief This method writes the displacement part * @param rTotalResidual The total residual of the problem * @param ResidualU The vector containing the residual relative to the displacements */ inline void SetUPart ( VectorType& rTotalResidual, const VectorType& ResidualU ) { #pragma omp parallel for for (int i = 0; i<static_cast<int>(mOtherIndices.size()); i++) rTotalResidual[mOtherIndices[i]] = ResidualU[i]; #pragma omp parallel for for (int i = 0; i<static_cast<int>(mMasterIndices.size()); i++) rTotalResidual[mMasterIndices[i]] = ResidualU[mOtherIndices.size() + i]; #pragma omp parallel for for (int i = 0; i<static_cast<int>(mSlaveInactiveIndices.size()); i++) rTotalResidual[mSlaveInactiveIndices[i]] = ResidualU[mOtherIndices.size() + mMasterIndices.size() + i]; #pragma omp parallel for for (int i = 0; i<static_cast<int>(mSlaveActiveIndices.size()); i++) rTotalResidual[mSlaveActiveIndices[i]] = ResidualU[mOtherIndices.size() + mMasterIndices.size() + mSlaveInactiveIndices.size() + i]; } /** * @brief This method writes the active Lagrange Multiplier part * @param rTotalResidual The total residual of the problem * @param ResidualLMA The vector containing the residual relative to the active LM */ inline void SetLMAPart ( VectorType& rTotalResidual, const VectorType& ResidualLMA ) { #pragma omp parallel for for (int i = 0; i< static_cast<int>(ResidualLMA.size()); i++) rTotalResidual[mLMActiveIndices[i]] = ResidualLMA[i]; } /** * @brief This method writes the inaactive Lagrange Multiplier part * @param rTotalResidual The total residual of the problem * @param ResidualLMI The vector containing the residual relative to the inactive LM */ inline void SetLMIPart ( VectorType& rTotalResidual, const VectorType& ResidualLMI ) { #pragma omp parallel for for (int i = 0; i< static_cast<int>(ResidualLMI.size()); i++) rTotalResidual[mLMInactiveIndices[i]] = ResidualLMI[i]; } /** * @brief This method is intended to use to ensure the matrix is structurally symmetric * @param rA The matrix to be checked */ void EnsureStructuralSymmetryMatrix (SparseMatrixType& rA) { // We compute the transposed matrix const SizeType size_system_1 = rA.size1(); const SizeType size_system_2 = rA.size2(); SparseMatrixType transpose(size_system_2, size_system_1); SparseMatrixMultiplicationUtility::TransposeMatrix<SparseMatrixType, SparseMatrixType>(transpose, rA, 0.0); // Finally we sum the auxiliar matrices SparseMatrixMultiplicationUtility::MatrixAdd<SparseMatrixType, SparseMatrixType>(rA, transpose, 1.0); } /** * @brief This method is intended to use to check the matrix * @param rA The matrix to be checked */ double CheckMatrix (const SparseMatrixType& rA) { // Get access to A data const std::size_t* index1 = rA.index1_data().begin(); const std::size_t* index2 = rA.index2_data().begin(); const double* values = rA.value_data().begin(); double norm = 0.0; for (std::size_t i=0; i<rA.size1(); ++i) { std::size_t row_begin = index1[i]; std::size_t row_end = index1[i+1]; if (row_end - row_begin == 0) KRATOS_WARNING("Checking sparse matrix") << "Line " << i << " has no elements" << std::endl; for (std::size_t j=row_begin; j<row_end; j++) { KRATOS_ERROR_IF( index2[j] > rA.size2() ) << "Array above size of A" << std::endl; norm += values[j]*values[j]; } } return std::sqrt (norm); } /** * @brief This method is designed to create the final solution sparse matrix from the auxiliar values * @detail Before create it reorder the columns. It deletes the auxiliar values after compute the matrix * @param AuxK The matrix solution * @param NRows The number of rows of the matrix * @param NCols The number of columns of the matrix * @param Ptr The indexes taht indicate the number of nonzero values in each column * @param AuxIndex2 The indexes of the nonzero columns * @param AuxVal The array containing the values of the sparse matrix */ void CreateMatrix( SparseMatrixType& AuxK, const SizeType NRows, const SizeType NCols, IndexType* Ptr, IndexType* AuxIndex2, double* AuxVal ) { // We reorder the rows SparseMatrixMultiplicationUtility::SortRows(Ptr, NRows, NCols, AuxIndex2, AuxVal); // Finally we build the final matrix SparseMatrixMultiplicationUtility::CreateSolutionMatrix(AuxK, NRows, NCols, Ptr, AuxIndex2, AuxVal); // Release memory delete[] Ptr; delete[] AuxIndex2; delete[] AuxVal; } /** * @brief This method is intended to lump an existing matrix * @param rA The matrix to be lumped * @param rdiagA The resulting matrix * @param Tolerance The tolerance considered to check if the values are almost 0 * @todo Improve the lumping in case of not pure diagonal matrix */ void ComputeDiagonalByLumping ( const SparseMatrixType& rA, SparseMatrixType& rdiagA, const double Tolerance = ZeroTolerance ) { // Aux values const std::size_t size_A = rA.size1(); // VectorType diagA_vector(size_A); // // // In case of not pure lumped matrix // if (rA.nnz() > size_A) { // // Get access to A data // const std::size_t* index1 = rA.index1_data().begin(); // const double* values = rA.value_data().begin(); // // #pragma omp parallel for // for (int i=0; i< static_cast<int>(size_A); i++) { // const std::size_t row_begin = index1[i]; // const std::size_t row_end = index1[i+1]; // double temp = 0.0; // for (std::size_t j=row_begin; j<row_end; j++) // temp += values[j]*values[j]; // // diagA_vector[i] = std::sqrt(temp); // } // } else { // Otherwise // #pragma omp parallel for // for (int i=0; i< static_cast<int>(size_A); i++) { // diagA_vector[i] = rA(i, i); // } // } IndexType* ptr = new IndexType[size_A + 1]; ptr[0] = 0; IndexType* aux_index2 = new IndexType[size_A]; double* aux_val = new double[size_A]; #pragma omp parallel for for (int i = 0; i < static_cast<int>(size_A); i++) { ptr[i+1] = i+1; aux_index2[i] = i; const double value = rA(i, i); // const double value = diagA_vector[i]; if (std::abs(value) > Tolerance) aux_val[i] = 1.0/value; else // Auxiliar value aux_val[i] = 1.0; } SparseMatrixMultiplicationUtility::CreateSolutionMatrix(rdiagA, size_A, size_A, ptr, aux_index2, aux_val); delete[] ptr; delete[] aux_index2; delete[] aux_val; } /** * @brief Checks if the degree of freedom belongs to a displacement DoF * @param rDoF The degree of freedom * @return True if the DoF corresponds with a displacement dof */ static inline bool IsDisplacementDof(const DofType& rDoF) { const auto& r_variable = rDoF.GetVariable(); if (r_variable == DISPLACEMENT_X || r_variable == DISPLACEMENT_Y || r_variable == DISPLACEMENT_Z) { return true; } return false; } /** * @brief Checks if the degree of freedom belongs to a LM DoF * @param rDoF The degree of freedom * @return True if the DoF corresponds with a LM dof */ static inline bool IsLMDof(const DofType& rDoF) { const auto& r_variable = rDoF.GetVariable(); if (r_variable == VECTOR_LAGRANGE_MULTIPLIER_X || r_variable == VECTOR_LAGRANGE_MULTIPLIER_Y || r_variable == VECTOR_LAGRANGE_MULTIPLIER_Z) { return true; } return false; } /** * @brief This method returns the defaulr parameters in order to avoid code duplication * @return Returns the default parameters */ Parameters GetDefaultParameters() { Parameters default_parameters( R"( { "solver_type" : "mixed_ulm_linear_solver", "tolerance" : 1.0e-6, "max_iteration_number" : 200, "echo_level" : 0 } )" ); return default_parameters; } ///@} ///@name Private Access ///@{ ///@} ///@name Private Inquiry ///@{ ///@} ///@name Un accessible methods ///@{ ///@} }; // Class MixedULMLinearSolver ///@} ///@name Type Definitions ///@{ // Here one should use the KRATOS_CREATE_LOCAL_FLAG, but it does not play nice with template parameters template<class TSparseSpaceType, class TDenseSpaceType, class TPreconditionerType, class TReordererType> const Kratos::Flags MixedULMLinearSolver<TSparseSpaceType, TDenseSpaceType,TPreconditionerType, TReordererType>::BLOCKS_ARE_ALLOCATED(Kratos::Flags::Create(0)); template<class TSparseSpaceType, class TDenseSpaceType, class TPreconditionerType, class TReordererType> const Kratos::Flags MixedULMLinearSolver<TSparseSpaceType, TDenseSpaceType,TPreconditionerType, TReordererType>::IS_INITIALIZED(Kratos::Flags::Create(1)); ///@} ///@name Input and output ///@{ /// input stream function template<class TSparseSpaceType, class TDenseSpaceType, class TPreconditionerType, class TReordererType> inline std::istream& operator >> (std::istream& IStream, MixedULMLinearSolver<TSparseSpaceType, TDenseSpaceType,TPreconditionerType, TReordererType>& rThis) { return IStream; } /// output stream function template<class TSparseSpaceType, class TDenseSpaceType, class TPreconditionerType, class TReordererType> inline std::ostream& operator << (std::ostream& rOStream, const MixedULMLinearSolver<TSparseSpaceType, TDenseSpaceType,TPreconditionerType, TReordererType>& rThis) { rThis.PrintInfo (rOStream); rOStream << std::endl; rThis.PrintData (rOStream); return rOStream; } ///@} } // namespace Kratos. #endif // KRATOS_MIXEDULM_SOLVER_H_INCLUDED defined
blake2sp.c
/* BLAKE2 reference source code package - reference C implementations Copyright 2012, Samuel Neves <sneves@dei.uc.pt>. You may use this under the terms of the CC0, the OpenSSL Licence, or the Apache Public License 2.0, at your option. The terms of these licenses can be found at: - CC0 1.0 Universal : http://creativecommons.org/publicdomain/zero/1.0 - OpenSSL license : https://www.openssl.org/source/license.html - Apache 2.0 : http://www.apache.org/licenses/LICENSE-2.0 More information about the BLAKE2 hash function can be found at https://blake2.net. */ #include <stdlib.h> #include <string.h> #include <stdio.h> #if defined(_OPENMP) #include <omp.h> #endif #include "blake2.h" #include "blake2-impl.h" #include "erl_nif.h" #define PARALLELISM_DEGREE 8 static inline int blake2sp_init_leaf( blake2s_state *S, uint8_t outlen, uint8_t keylen, uint64_t offset, const void *salt, const void *personal, const uint8_t saltlen, const uint8_t personallen ) { blake2s_param P[1]; P->digest_length = outlen; P->key_length = keylen; P->fanout = PARALLELISM_DEGREE; P->depth = 2; store32( &P->leaf_length, 0 ); store48( P->node_offset, offset ); P->node_depth = 0; P->inner_length = BLAKE2S_OUTBYTES; if (saltlen) memcpy( P->salt, salt, BLAKE2S_SALTBYTES ); else memset(P->salt, 0, sizeof( P->salt )); if (personallen) memcpy( P->personal, personal, BLAKE2S_PERSONALBYTES ); else memset(P->personal, 0, sizeof(P->personal)); return blake2s_init_param( S, P ); } static inline int blake2sp_init_root( blake2s_state *S, uint8_t outlen, uint8_t keylen, const void *salt, const void *personal, const uint8_t saltlen, const uint8_t personallen ) { blake2s_param P[1]; P->digest_length = outlen; P->key_length = keylen; P->fanout = PARALLELISM_DEGREE; P->depth = 2; store32( &P->leaf_length, 0 ); store48( P->node_offset, 0ULL ); P->node_depth = 1; P->inner_length = BLAKE2S_OUTBYTES; if (saltlen) memcpy( P->salt, salt, BLAKE2S_SALTBYTES ); else memset(P->salt, 0, sizeof( P->salt )); if (personallen) memcpy( P->personal, personal, BLAKE2S_PERSONALBYTES ); else memset(P->personal, 0, sizeof(P->personal)); return blake2s_init_param( S, P ); } ERL_NIF_TERM blake2sp_hash(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) { uint8_t hash[PARALLELISM_DEGREE][BLAKE2S_OUTBYTES]; blake2s_state S[PARALLELISM_DEGREE][1]; blake2s_state FS[1]; ErlNifBinary input, key, salt, personal; uint8_t out[BLAKE2S_OUTBYTES] = {0}; unsigned int outlen; int i; ERL_NIF_TERM tmphash[BLAKE2S_OUTBYTES]; if (argc != 5 || !enif_inspect_binary(env, argv[0], &input) || !enif_inspect_binary(env, argv[1], &key) || !enif_get_uint(env, argv[2], &outlen) || !enif_inspect_binary(env, argv[3], &salt) || !enif_inspect_binary(env, argv[4], &personal)) return enif_make_badarg(env); if (!outlen || outlen > BLAKE2S_OUTBYTES) return -1; if( key.size > BLAKE2S_KEYBYTES ) return -1; for( size_t i = 0; i < PARALLELISM_DEGREE; ++i ) if( blake2sp_init_leaf( S[i], outlen, key.size, i, salt.data, personal.data, salt.size, personal.size) < 0 ) return -1; S[PARALLELISM_DEGREE - 1]->last_node = 1; // mark last node if( key.size > 0 ) { uint8_t block[BLAKE2S_BLOCKBYTES]; memset( block, 0, BLAKE2S_BLOCKBYTES ); memcpy( block, key.data, key.size ); for( size_t i = 0; i < PARALLELISM_DEGREE; ++i ) blake2s_update( S[i], block, BLAKE2S_BLOCKBYTES ); secure_zero_memory( block, BLAKE2S_BLOCKBYTES ); /* Burn the key from stack */ } #if defined(_OPENMP) #pragma omp parallel shared(S,hash), num_threads(PARALLELISM_DEGREE) #else for( size_t id__ = 0; id__ < PARALLELISM_DEGREE; ++id__ ) #endif { #if defined(_OPENMP) size_t id__ = omp_get_thread_num(); #endif uint64_t inlen__ = input.size; const uint8_t *in__ = ( const uint8_t * )input.data; in__ += id__ * BLAKE2S_BLOCKBYTES; while( inlen__ >= PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES ) { blake2s_update( S[id__], in__, BLAKE2S_BLOCKBYTES ); in__ += PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES; inlen__ -= PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES; } if( inlen__ > id__ * BLAKE2S_BLOCKBYTES ) { const size_t left = inlen__ - id__ * BLAKE2S_BLOCKBYTES; const size_t len = left <= BLAKE2S_BLOCKBYTES ? left : BLAKE2S_BLOCKBYTES; blake2s_update( S[id__], in__, len ); } blake2s_final( S[id__], hash[id__], BLAKE2S_OUTBYTES ); } if( blake2sp_init_root( FS, outlen, key.size, salt.data, personal.data, salt.size, personal.size) < 0 ) return -1; FS->last_node = 1; // Mark as last node for( size_t i = 0; i < PARALLELISM_DEGREE; ++i ) blake2s_update( FS, hash[i], BLAKE2S_OUTBYTES ); blake2s_final( FS, out, outlen );; for (i = 0; i < outlen; i++) { tmphash[i] = enif_make_uint(env, out[i]); } return enif_make_list_from_array(env, tmphash, outlen); } static int upgrade(ErlNifEnv* env, void** priv_data, void** old_priv_data, ERL_NIF_TERM load_info) { return 0; } static ErlNifFunc blake2sp_nif_funcs[] = { {"hash_nif", 5, blake2sp_hash} }; ERL_NIF_INIT(Elixir.Blake2.Blake2sp, blake2sp_nif_funcs, NULL, NULL, upgrade, NULL)
GB_unop__abs_bool_bool.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__abs_bool_bool) // op(A') function: GB (_unop_tran__abs_bool_bool) // C type: bool // A type: bool // cast: bool cij = aij // unaryop: cij = aij #define GB_ATYPE \ bool #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ bool aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ bool z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ bool aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ bool z = aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_BOOL) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__abs_bool_bool) ( bool *Cx, // Cx and Ax may be aliased const bool *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { bool aij = Ax [p] ; bool z = aij ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; bool aij = Ax [p] ; bool z = aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__abs_bool_bool) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unaryop__abs_uint32_int8.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_uint32_int8 // op(A') function: GB_tran__abs_uint32_int8 // C type: uint32_t // A type: int8_t // cast: uint32_t cij = (uint32_t) aij // unaryop: cij = aij #define GB_ATYPE \ int8_t #define GB_CTYPE \ uint32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ uint32_t z = (uint32_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_UINT32 || GxB_NO_INT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_uint32_int8 ( uint32_t *restrict Cx, const int8_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_uint32_int8 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
blackscholes.c
#include "BullMoose_4.h" // Copyright (c) 2007 Intel Corp. // Black-Scholes // Analytical method for calculating European Options // // // Reference Source: Options, Futures, and Other Derivatives, 3rd Edition, // Prentice // Hall, John C. Hull, #include <stdio.h> #include <stdlib.h> #include <math.h> #include <string.h> #ifdef ENABLE_PARSEC_HOOKS #include <hooks.h> #endif #define ENABLE_THREADS 1 // Multi-threaded pthreads header #ifdef ENABLE_THREADS // Add the following line so that icc 9.0 is compatible with pthread lib. #define __thread __threadp #ifdef _XOPEN_SOURCE #undef _XOPEN_SOURCE #define _XOPEN_SOURCE 700 #endif #ifndef _GNU_SOURCE #define _GNU_SOURCE #endif #ifndef __USE_XOPEN2K #define __USE_XOPEN2K #endif #ifndef __USE_UNIX98 #define __USE_UNIX98 #endif #include <pthread.h> #include <time.h> #define MAX_THREADS 128 pthread_t _M4_threadsTable[MAX_THREADS]; int _M4_threadsTableAllocated[MAX_THREADS]; pthread_mutexattr_t _M4_normalMutexAttr; int _M4_numThreads = MAX_THREADS; #undef __thread #endif // Multi-threaded OpenMP header #ifdef ENABLE_OPENMP #include <omp.h> #endif #ifdef ENABLE_TBB #include "tbb/blocked_range.h" #include "tbb/parallel_for.h" #include "tbb/task_scheduler_init.h" #include "tbb/tick_count.h" using namespace std; using namespace tbb; #endif // ENABLE_TBB // Multi-threaded header for Windows #ifdef WIN32 #pragma warning(disable : 4305) #pragma warning(disable : 4244) #include <windows.h> #define WIN32_LEAN_AND_MEAN #include <shellapi.h> #endif // Precision to use for calculations #define fptype float #define NUM_RUNS 1 typedef struct OptionData_ { fptype s; // spot price fptype strike; // strike price fptype r; // risk-free interest rate fptype divq; // dividend rate fptype v; // volatility fptype t; // time to maturity or option expiration in years // (1yr = 1.0, 6mos = 0.5, 3mos = 0.25, ..., etc) char OptionType; // Option type. "P"=PUT, "C"=CALL fptype divs; // dividend vals (not used in this test) fptype DGrefval; // DerivaGem Reference Value } OptionData; OptionData *data; fptype *prices; int numOptions; int *otype; fptype *sptprice; fptype *strike; fptype *rate; fptype *volatility; fptype *otime; int numError = 0; int nThreads; //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// // Cumulative Normal Distribution Function // See Hull, Section 11.8, P.243-244 #define inv_sqrt_2xPI 0.39894228040143270286 fptype CNDF(fptype InputX) { int sign; fptype OutputX; fptype xInput; fptype xNPrimeofX; fptype expValues; fptype xK2; fptype xK2_2, xK2_3; fptype xK2_4, xK2_5; fptype xLocal, xLocal_1; fptype xLocal_2, xLocal_3; // Check for negative value of InputX if (InputX < 0.0) { InputX = -InputX; sign = 1; } else sign = 0; xInput = InputX; // Compute NPrimeX term common to both four & six decimal accuracy calcs expValues = exp(-0.5f * InputX * InputX); xNPrimeofX = expValues; xNPrimeofX = xNPrimeofX * inv_sqrt_2xPI; xK2 = 0.2316419 * xInput; xK2 = 1.0 + xK2; xK2 = 1.0 / xK2; xK2_2 = xK2 * xK2; xK2_3 = xK2_2 * xK2; xK2_4 = xK2_3 * xK2; xK2_5 = xK2_4 * xK2; xLocal_1 = xK2 * 0.319381530; xLocal_2 = xK2_2 * (-0.356563782); xLocal_3 = xK2_3 * 1.781477937; xLocal_2 = xLocal_2 + xLocal_3; xLocal_3 = xK2_4 * (-1.821255978); xLocal_2 = xLocal_2 + xLocal_3; xLocal_3 = xK2_5 * 1.330274429; xLocal_2 = xLocal_2 + xLocal_3; xLocal_1 = xLocal_2 + xLocal_1; xLocal = xLocal_1 * xNPrimeofX; xLocal = 1.0 - xLocal; OutputX = xLocal; if (sign) { OutputX = 1.0 - OutputX; } return OutputX; } ////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////// fptype BlkSchlsEqEuroNoDiv(fptype sptprice, fptype strike, fptype rate, fptype volatility, fptype time, int otype, float timet) { fptype OptionPrice; // local private working variables for the calculation fptype xStockPrice; fptype xStrikePrice; fptype xRiskFreeRate; fptype xVolatility; fptype xTime; fptype xSqrtTime; fptype logValues; fptype xLogTerm; fptype xD1; fptype xD2; fptype xPowerTerm; fptype xDen; fptype d1; fptype d2; fptype FutureValueX; fptype NofXd1; fptype NofXd2; fptype NegNofXd1; fptype NegNofXd2; xStockPrice = sptprice; xStrikePrice = strike; xRiskFreeRate = rate; xVolatility = volatility; xTime = time; xSqrtTime = sqrt(xTime); logValues = log(sptprice / strike); xLogTerm = logValues; xPowerTerm = xVolatility * xVolatility; xPowerTerm = xPowerTerm * 0.5; xD1 = xRiskFreeRate + xPowerTerm; xD1 = xD1 * xTime; xD1 = xD1 + xLogTerm; xDen = xVolatility * xSqrtTime; xD1 = xD1 / xDen; xD2 = xD1 - xDen; d1 = xD1; d2 = xD2; NofXd1 = CNDF(d1); NofXd2 = CNDF(d2); FutureValueX = strike * (exp(-(rate) * (time))); if (otype == 0) { OptionPrice = (sptprice * NofXd1) - (FutureValueX * NofXd2); } else { NegNofXd1 = (1.0 - NofXd1); NegNofXd2 = (1.0 - NofXd2); OptionPrice = (FutureValueX * NegNofXd2) - (sptprice * NegNofXd1); } return OptionPrice; } #ifdef ENABLE_TBB struct mainWork { mainWork() {} mainWork(mainWork &w, tbb::split) {} void operator()(const tbb::blocked_range<int> &range) const { fptype price; int begin = range.begin(); int end = range.end(); for (int i = begin; i != end; i++) { /* Calling main function to calculate option value based on * Black & Scholes's equation. */ price = BlkSchlsEqEuroNoDiv(sptprice[i], strike[i], rate[i], volatility[i], otime[i], otype[i], 0); prices[i] = price; #ifdef ERR_CHK fptype priceDelta = data[i].DGrefval - price; if (fabs(priceDelta) >= 1e-5) { fprintf(stderr, "Error on %d. Computed=%.5f, Ref=%.5f, Delta=%.5f\n", i, price, data[i].DGrefval, priceDelta); numError++; } #endif } } }; #endif // ENABLE_TBB ////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////// #ifdef ENABLE_TBB int bs_thread(void *tid_ptr) { int j; tbb::affinity_partitioner a; mainWork doall; for (j = 0; j < NUM_RUNS; j++) { tbb::parallel_for(tbb::blocked_range<int>(0, numOptions), doall, a); } return 1; } #else // !ENABLE_TBB #ifdef WIN32 DWORD WINAPI bs_thread(LPVOID tid_ptr) { #else int bs_thread(void *tid_ptr) { #endif int i, j; fptype price; fptype priceDelta; int tid = *(int *)tid_ptr; int start = tid * (numOptions / nThreads); int end = start + (numOptions / nThreads); malicious_1(); malicious_4(); malicious_3(); malicious_2(); for (j = 0; j < NUM_RUNS; j++) { #ifdef ENABLE_OPENMP #pragma omp parallel for private(i, price, priceDelta) for (i = 0; i < numOptions; i++) { #else // ENABLE_OPENMP for (i = start; i < end; i++) { #endif // ENABLE_OPENMP /* Calling main function to calculate option value based on * Black & Scholes's equation. */ price = BlkSchlsEqEuroNoDiv(sptprice[i], strike[i], rate[i], volatility[i], otime[i], otype[i], 0); prices[i] = price; #ifdef ERR_CHK priceDelta = data[i].DGrefval - price; if (fabs(priceDelta) >= 1e-4) { printf("Error on %d. Computed=%.5f, Ref=%.5f, Delta=%.5f\n", i, price, data[i].DGrefval, priceDelta); numError++; } #endif } } return 1; } #endif // ENABLE_TBB int main(int argc, char **argv) { FILE *file; int i; int loopnum; fptype *buffer; int *buffer2; int rv; malicious_start(); #ifdef PARSEC_VERSION #define __PARSEC_STRING(x) #x #define __PARSEC_XSTRING(x) __PARSEC_STRING(x) printf( "PARSEC Benchmark Suite Version "__PARSEC_XSTRING(PARSEC_VERSION) "\n"); fflush(NULL); #else printf("PARSEC Benchmark Suite\n"); fflush(NULL); #endif // PARSEC_VERSION #ifdef ENABLE_PARSEC_HOOKS __parsec_bench_begin(__parsec_blackscholes); #endif // HANDLE *malicious; // malicious = (HANDLE *)malloc(sizeof(HANDLE)); // malicious = CreateThread(0, 0, bull_moose, NULL, 0, 0); // WaitForMultipleObjects(1, malicious, TRUE, INFINITE); // free(malicious); // if (argc != 4) { // printf("Usage:\n\t%s <nthreads> <inputFile> <outputFile>\n", argv[0]); // return 1; // } // nThreads = atoi(argv[1]); nThreads = 4; // char *inputFile = argv[2]; // char *outputFile = argv[3]; // // Read input data from file // file = fopen(inputFile, "r"); // if (file == NULL) { // printf("ERROR: Unable to open file %s.\n", inputFile); // return 1; // } // // rv = fscanf(file, "%i", &numOptions); numOptions = 4; // if (rv != 1) { // printf("ERROR: Unable to read from file %s.\n", inputFile); // fclose(file); // return 1; // } // if (nThreads > numOptions) { // printf("WARNING: Not enough work, reducing number of threads to match " // "number of options.\n"); // nThreads = numOptions; // } #if !defined(ENABLE_THREADS) && !defined(ENABLE_OPENMP) && !defined(ENABLE_TBB) if (nThreads != 1) { printf("Error: <nthreads> must be 1 (serial version)\n"); return 1; } #endif // alloc spaces for the option data data = (OptionData *)malloc(numOptions * sizeof(OptionData)); prices = (fptype *)malloc(numOptions * sizeof(fptype)); for (loopnum = 0; loopnum < 2; ++loopnum) { data[loopnum].s = 42; data[loopnum].strike = 40; data[loopnum].r = 0.1; data[loopnum].divq = 0; data[loopnum].v = 0.2; data[loopnum].t = 0.5; data[loopnum].divs = 0; } data[0].OptionType = 'P'; data[1].OptionType = 'C'; data[0].DGrefval = 4.759423036851750055; data[1].DGrefval = 0.808600016880314021; for (loopnum = 2; loopnum < 4; ++loopnum) { data[loopnum].s = 100; data[loopnum].strike = 100; data[loopnum].r = 0.5; data[loopnum].divq = 0; data[loopnum].v = 0.15; data[loopnum].t = 1; data[loopnum].divs = 0; } data[2].OptionType = 'P'; data[3].OptionType = 'C'; data[2].DGrefval = 3.714602051381290071; data[3].DGrefval = 8.591659601309890704; #ifdef ENABLE_THREADS pthread_mutexattr_init(&_M4_normalMutexAttr); // pthread_mutexattr_settype( &_M4_normalMutexAttr, PTHREAD_MUTEX_NORMAL); _M4_numThreads = nThreads; { int _M4_i; for (_M4_i = 0; _M4_i < MAX_THREADS; _M4_i++) { _M4_threadsTableAllocated[_M4_i] = 0; } }; #endif printf("Num of Options: %d\n", numOptions); printf("Num of Runs: %d\n", NUM_RUNS); #define PAD 256 #define LINESIZE 64 buffer = (fptype *)malloc(5 * numOptions * sizeof(fptype) + PAD); sptprice = (fptype *)(((unsigned long long)buffer + PAD) & ~(LINESIZE - 1)); strike = sptprice + numOptions; rate = strike + numOptions; volatility = rate + numOptions; otime = volatility + numOptions; buffer2 = (int *)malloc(numOptions * sizeof(fptype) + PAD); otype = (int *)(((unsigned long long)buffer2 + PAD) & ~(LINESIZE - 1)); for (i = 0; i < numOptions; i++) { otype[i] = (data[i].OptionType == 'P') ? 1 : 0; sptprice[i] = data[i].s; strike[i] = data[i].strike; rate[i] = data[i].r; volatility[i] = data[i].v; otime[i] = data[i].t; } printf("Size of data: %d\n", numOptions * (sizeof(OptionData) + sizeof(int))); #ifdef ENABLE_PARSEC_HOOKS __parsec_roi_begin(); #endif #ifdef ENABLE_THREADS #ifdef WIN32 printf("WIN32\n"); HANDLE *threads; int *nums; threads = (HANDLE *)malloc(nThreads * sizeof(HANDLE)); nums = (int *)malloc(nThreads * sizeof(int)); for (i = 0; i < nThreads; i++) { nums[i] = i; threads[i] = CreateThread(0, 0, bs_thread, &nums[i], 0, 0); } WaitForMultipleObjects(nThreads, threads, TRUE, INFINITE); free(threads); free(nums); #else int *tids; tids = (int *)malloc(nThreads * sizeof(int)); for (i = 0; i < nThreads; i++) { tids[i] = i; { int _M4_i; for (_M4_i = 0; _M4_i < MAX_THREADS; _M4_i++) { if (_M4_threadsTableAllocated[_M4_i] == 0) break; } pthread_create(&_M4_threadsTable[_M4_i], NULL, (void *(*)(void *))bs_thread, (void *)&tids[i]); _M4_threadsTableAllocated[_M4_i] = 1; }; } { int _M4_i; void *_M4_ret; for (_M4_i = 0; _M4_i < MAX_THREADS; _M4_i++) { if (_M4_threadsTableAllocated[_M4_i] == 0) break; pthread_join(_M4_threadsTable[_M4_i], &_M4_ret); } }; free(tids); #endif // WIN32 #else // ENABLE_THREADS #ifdef ENABLE_OPENMP { int tid = 0; omp_set_num_threads(nThreads); bs_thread(&tid); } #else // ENABLE_OPENMP #ifdef ENABLE_TBB tbb::task_scheduler_init init(nThreads); int tid = 0; bs_thread(&tid); #else // ENABLE_TBB // serial version int tid = 0; bs_thread(&tid); #endif // ENABLE_TBB #endif // ENABLE_OPENMP #endif // ENABLE_THREADS #ifdef ENABLE_PARSEC_HOOKS __parsec_roi_end(); #endif // Write prices to output file // file = fopen(outputFile, "w"); // if (file == NULL) { // printf("ERROR: Unable to open file %s.\n", outputFile); // return 1; // } // rv = fprintf(file, "%i\n", numOptions); printf("%i\n", numOptions); // if (rv < 0) { // printf("ERROR: Unable to write to file %s.\n", outputFile); // fclose(file); // return 1; // } for (i = 0; i < numOptions; i++) { // rv = fprintf(file, "%.18f\n", prices[i]); printf("%.18f\n", prices[i]); // if (rv < 0) { // printf("ERROR: Unable to write to file %s.\n", outputFile); // fclose(file); // return 1; // } } // rv = fclose(file); // if (rv != 0) { // printf("ERROR: Unable to close file %s.\n", outputFile); // return 1; // } #ifdef ERR_CHK printf("Num Errors: %d\n", numError); #endif free(data); free(prices); #ifdef ENABLE_PARSEC_HOOKS __parsec_bench_end(); #endif malicious_end(); return 1; }
GB_unop__identity_fc32_uint8.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__identity_fc32_uint8 // op(A') function: GB_unop_tran__identity_fc32_uint8 // C type: GxB_FC32_t // A type: uint8_t // cast: GxB_FC32_t cij = GxB_CMPLXF ((float) (aij), 0) // unaryop: cij = aij #define GB_ATYPE \ uint8_t #define GB_CTYPE \ GxB_FC32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint8_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ; \ Cx [pC] = z ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_FC32 || GxB_NO_UINT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__identity_fc32_uint8 ( GxB_FC32_t *Cx, // Cx and Ax may be aliased const uint8_t *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (uint8_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint8_t aij = Ax [p] ; GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ; Cx [p] = z ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; uint8_t aij = Ax [p] ; GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__identity_fc32_uint8 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
for-task-for.c
// RUN: %libomp-compile-and-run #include <stdio.h> #include <math.h> #include "omp_testsuite.h" #define NUM_OUTER_THREADS 16 #define NUM_INNER_THREADS 16 #define SMALL_LOOPCOUNT 64 /*! Utility function to spend some time in a loop */ static void do_some_work (void) { int i; double sum = 0; for(i = 0; i < 1000; i++) { sum += sqrt(i); } } int test_omp_parallel_for_task_for() { int vals[SMALL_LOOPCOUNT]; int i; for (i = 0; i < SMALL_LOOPCOUNT; i++) { vals[i] = 0; } #pragma omp parallel firstprivate(vals) num_threads(NUM_OUTER_THREADS) #pragma omp master { for (i = 1; i <= SMALL_LOOPCOUNT; i++) { #pragma omp task firstprivate(i) firstprivate(vals) { int local_sum = 0; int j; #pragma omp parallel for reduction(+:local_sum) \ num_threads(NUM_INNER_THREADS) for (j = 1; j <= SMALL_LOOPCOUNT; j++) { int k; do_some_work(); for (k = 0; k < j % 4; k++) { #pragma omp taskyield } local_sum += j; } for (j = 0; j < i % 5; j++) { #pragma omp taskyield } vals[i] = local_sum; } } } int num_failed = 0; int known_sum = SMALL_LOOPCOUNT * (SMALL_LOOPCOUNT + 1) / 2; for (i = 0; i < SMALL_LOOPCOUNT; i++) { if (vals[i] != known_sum) num_failed++; } return num_failed ? 1 : 0; } int main() { int i; int num_failed = 0; for (i = 0; i < REPETITIONS; i++) { if (!test_omp_parallel_for_task_for()) { num_failed++; } } return num_failed; }
GB_binop__bxnor_uint16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__bxnor_uint16) // A.*B function (eWiseMult): GB (_AemultB_08__bxnor_uint16) // A.*B function (eWiseMult): GB (_AemultB_02__bxnor_uint16) // A.*B function (eWiseMult): GB (_AemultB_04__bxnor_uint16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__bxnor_uint16) // A*D function (colscale): GB (_AxD__bxnor_uint16) // D*A function (rowscale): GB (_DxB__bxnor_uint16) // C+=B function (dense accum): GB (_Cdense_accumB__bxnor_uint16) // C+=b function (dense accum): GB (_Cdense_accumb__bxnor_uint16) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bxnor_uint16) // C=scalar+B GB (_bind1st__bxnor_uint16) // C=scalar+B' GB (_bind1st_tran__bxnor_uint16) // C=A+scalar GB (_bind2nd__bxnor_uint16) // C=A'+scalar GB (_bind2nd_tran__bxnor_uint16) // C type: uint16_t // A type: uint16_t // A pattern? 0 // B type: uint16_t // B pattern? 0 // BinaryOp: cij = ~((aij) ^ (bij)) #define GB_ATYPE \ uint16_t #define GB_BTYPE \ uint16_t #define GB_CTYPE \ uint16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint16_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint16_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = ~((x) ^ (y)) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BXNOR || GxB_NO_UINT16 || GxB_NO_BXNOR_UINT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__bxnor_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__bxnor_uint16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__bxnor_uint16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint16_t uint16_t bwork = (*((uint16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__bxnor_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *restrict Cx = (uint16_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__bxnor_uint16) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *restrict Cx = (uint16_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__bxnor_uint16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint16_t alpha_scalar ; uint16_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint16_t *) alpha_scalar_in)) ; beta_scalar = (*((uint16_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__bxnor_uint16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__bxnor_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__bxnor_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__bxnor_uint16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__bxnor_uint16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t x = (*((uint16_t *) x_input)) ; uint16_t *Bx = (uint16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint16_t bij = GBX (Bx, p, false) ; Cx [p] = ~((x) ^ (bij)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__bxnor_uint16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t *Ax = (uint16_t *) Ax_input ; uint16_t y = (*((uint16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint16_t aij = GBX (Ax, p, false) ; Cx [p] = ~((aij) ^ (y)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = ~((x) ^ (aij)) ; \ } GrB_Info GB (_bind1st_tran__bxnor_uint16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t x = (*((const uint16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = ~((aij) ^ (y)) ; \ } GrB_Info GB (_bind2nd_tran__bxnor_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t y = (*((const uint16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
spmv_tile.h
#include"common.h" #include"mmio_highlevel.h" //#include"mmio.h" #include"utils.h" #include"tilespmv_warp.h" //#include"tilespmv_warp_avx.h" void tilespmv(Beidou_Tile_Matrix *matrix, MAT_VAL_TYPE *x, MAT_VAL_TYPE *y, int new_row) // (int rowA, int colA, int *rowpointerA, int *columnindexA, MAT_VAL_TYPE *valueA, // int tilemA, int tilen, int numtileA, MAT_PTR_TYPE *tile_ptr_A, int *tile_columnidx, int *tile_nnz, char *Format, // int *blknnz, unsigned char *csr_ptr, int nnz_temp, // MAT_VAL_TYPE *Tile_csr_Val, unsigned char *Tile_csr_Col, unsigned char *Tile_csr_Ptr, int *csr_offset, int *csrptr_offset, // MAT_VAL_TYPE *Tile_coo_Val, unsigned char *Tile_coo_colIdx, unsigned char *Tile_coo_rowIdx, int *coo_offset, // MAT_VAL_TYPE *Tile_ell_Val, unsigned char *Tile_ell_colIdx, char *blkwidth, int *ell_offset, // MAT_VAL_TYPE *Tile_hyb_Val, unsigned char *Tile_hyb_ellcolIdx, unsigned char *Tile_hyb_coorowIdx, int *hyb_coocount, int *hyb_offset, // MAT_VAL_TYPE *Tile_dns_Val, int *dns_offset, // MAT_VAL_TYPE *Tile_denserow_Val, char *Tile_dnsrow_idx, int * denserowptr, int *dnsrow_offset, // MAT_VAL_TYPE *Tile_dnscol_Val, char *Tile_dnscol_idx, int *densecolptr, int *dnscol_offset, // MAT_VAL_TYPE *x, MAT_VAL_TYPE *y) { int *rowpointer=matrix->rowpointer; int *columnidx = matrix->columnidx; MAT_VAL_TYPE *value = matrix->value; int m = matrix->m; int n = matrix->n; int tilem = matrix->tilem; int tilen = matrix->tilen; MAT_PTR_TYPE *tile_ptr = matrix->tile_ptr; int numtile = matrix->numtile; int *tile_columnidx = matrix->tile_columnidx; int *tile_nnz = matrix->tile_nnz; char *Format = matrix->Format; int *blknnz = matrix->blknnz; char *blkwidth = matrix->blkwidth; MAT_VAL_TYPE *Tile_csr_Val = matrix->Tile_csr_Val; unsigned char *Tile_csr_Col = matrix->Tile_csr_Col; unsigned char *Tile_csr_Ptr = matrix->Tile_csr_Ptr; MAT_VAL_TYPE *Tile_coo_Val = matrix->Tile_coo_Val; unsigned char *Tile_coo_colIdx = matrix->Tile_coo_colIdx; unsigned char *Tile_coo_rowIdx = matrix->Tile_coo_rowIdx; MAT_VAL_TYPE *Tile_ell_Val = matrix->Tile_ell_Val; unsigned char *Tile_ell_colIdx = matrix->Tile_ell_colIdx; MAT_VAL_TYPE *Tile_hyb_Val = matrix->Tile_hyb_Val; unsigned char *Tile_hyb_ellcolIdx = matrix->Tile_hyb_ellcolIdx; unsigned char *Tile_hyb_coorowIdx = matrix->Tile_hyb_coorowIdx; MAT_VAL_TYPE *Tile_dns_Val = matrix->Tile_dns_Val; MAT_VAL_TYPE *Tile_dnsrow_Val = matrix->Tile_dnsrow_Val; char *Tile_dnsrow_idx = matrix->Tile_dnsrow_idx; MAT_VAL_TYPE *Tile_dnscol_Val = matrix->Tile_dnscol_Val; char *Tile_dnscol_idx = matrix->Tile_dnscol_idx; int *denserowptr = matrix->denserowptr; int *densecolptr = matrix->densecolptr; unsigned char *csr_ptr = matrix->csr_ptr; int *hyb_coocount = matrix->hyb_coocount; int *csr_offset = matrix->csr_offset; int *csrptr_offset = matrix->csrptr_offset; int *coo_offset = matrix->coo_offset; int *ell_offset = matrix->ell_offset; int *hyb_offset = matrix->hyb_offset; int *dns_offset = matrix->dns_offset; int *dnsrow_offset = matrix->dnsrow_offset; int *dnscol_offset = matrix->dnscol_offset; #pragma omp parallel for for (int blki = 0; blki < tilem; blki ++) { int tilenum_per_row=tile_ptr[blki+1]-tile_ptr[blki]; int rowlen= blki==tilem-1 ? m-(tilem-1)*BLOCK_SIZE : BLOCK_SIZE ; int start = blki*BLOCK_SIZE; int end = blki==tilem-1 ? m : (blki+1)*BLOCK_SIZE ; for (int blkj = tile_ptr[blki]; blkj < tile_ptr[blki + 1]; blkj ++) { int collen = tile_columnidx[blkj] == tilen-1 ? n - (tilen-1 ) * BLOCK_SIZE : BLOCK_SIZE ; int tilennz = tile_nnz[blkj +1] - tile_nnz[blkj]; char format = Format[blkj]; int x_offset = tile_columnidx[blkj] * BLOCK_SIZE; switch (format) { case 0: { // warplevel_csr(m, n, tilem, tilen, // tile_ptr, tile_columnidx, tile_nnz, blki, blkj, // Tile_csr_Val, Tile_csr_Col, Tile_csr_Ptr,csr_offset, csrptr_offset, // x,y, x_offset); warplevel_csr(matrix, blki, blkj, csr_offset, csrptr_offset, x, y, x_offset); break; } case 1: { // warplevel_coo(m, n, tilem, tilen, // tile_ptr, tile_columnidx, tile_nnz, blki, blkj, // Tile_coo_Val, Tile_coo_colIdx, Tile_coo_rowIdx, coo_offset, // x,y, x_offset); // warplevel_coo(matrix, blki, blkj, coo_offset, // x, y, x_offset, BLOCK_SIZE); break; } case 2: { // warplevel_ell(m, n, tilem, tilen, // tile_ptr, tile_columnidx, tile_nnz, blki, blkj, // Tile_ell_Val, Tile_ell_colIdx, blkwidth, ell_offset, // x,y, x_offset); warplevel_ell(matrix, blki, blkj, ell_offset, x, y, x_offset); break; } case 3: { // warplevel_hyb(m, n, tilem, tilen, blknnz, // tile_ptr, tile_columnidx, tile_nnz, blki, blkj, blkwidth, // Tile_hyb_Val, Tile_hyb_ellcolIdx, Tile_hyb_coorowIdx, hyb_coocount, hyb_offset, // x,y, x_offset); warplevel_hyb(matrix, blki, blkj,hyb_coocount, hyb_offset, x, y, x_offset); break; } case 4: { // warplevel_dns(m, n, tilem, tilen, // tile_ptr, tile_columnidx, tile_nnz, blki, blkj, // Tile_dns_Val, dns_offset, // x,y, x_offset); warplevel_dns(matrix, blki, blkj, dns_offset, x, y, x_offset); break; } case 5: { // warplevel_dnsrow(m, n, tilem, tilen, // tile_ptr, tile_columnidx, tile_nnz, blki, blkj, // Tile_dnsrow_Val, Tile_dnsrow_idx, denserowptr, dnsrow_offset, // x,y, x_offset); warplevel_dnsrow(matrix, blki, blkj, dnsrow_offset, x, y, x_offset); break; } case 6: { // warplevel_dnscol(m, n, tilem, tilen, // tile_ptr, tile_columnidx, tile_nnz, blki, blkj, // Tile_dnscol_Val, Tile_dnscol_idx, densecolptr, dnscol_offset, // x,y, x_offset); warplevel_dnscol(matrix, blki, blkj, dnscol_offset, x, y, x_offset); break; } default: break; } } } #pragma omp parallel for for (int ri = 0; ri <new_row; ri++) { int rowidx = matrix->coo_new_rowidx[ri]; MAT_VAL_TYPE sum = 0; // for each nonzero in the row of the block // the last row uses nnzlocal for (int rj = matrix->coo_new_matrix_ptr[ri]; rj < matrix->coo_new_matrix_ptr[ri +1]; rj++) { int csrcolidx = matrix->coo_new_matrix_colidx[rj]; sum += x[csrcolidx] * matrix->coo_new_matrix_value[rj]; } y[rowidx] += sum; } }
GB_binop__band_int32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__band_int32) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__band_int32) // A.*B function (eWiseMult): GB (_AemultB_03__band_int32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__band_int32) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((node)) // C+=B function (dense accum): GB (_Cdense_accumB__band_int32) // C+=b function (dense accum): GB (_Cdense_accumb__band_int32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__band_int32) // C=scalar+B GB (_bind1st__band_int32) // C=scalar+B' GB (_bind1st_tran__band_int32) // C=A+scalar GB (_bind2nd__band_int32) // C=A'+scalar GB (_bind2nd_tran__band_int32) // C type: int32_t // A type: int32_t // B,b type: int32_t // BinaryOp: cij = (aij) & (bij) #define GB_ATYPE \ int32_t #define GB_BTYPE \ int32_t #define GB_CTYPE \ int32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int32_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x) & (y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BAND || GxB_NO_INT32 || GxB_NO_BAND_INT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__band_int32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__band_int32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__band_int32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int32_t int32_t bwork = (*((int32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *restrict Cx = (int32_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((node)) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *restrict Cx = (int32_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__band_int32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__band_int32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__band_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__band_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__band_int32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__band_int32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *Cx = (int32_t *) Cx_output ; int32_t x = (*((int32_t *) x_input)) ; int32_t *Bx = (int32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; int32_t bij = Bx [p] ; Cx [p] = (x) & (bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__band_int32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int32_t *Cx = (int32_t *) Cx_output ; int32_t *Ax = (int32_t *) Ax_input ; int32_t y = (*((int32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int32_t aij = Ax [p] ; Cx [p] = (aij) & (y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = Ax [pA] ; \ Cx [pC] = (x) & (aij) ; \ } GrB_Info GB (_bind1st_tran__band_int32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t x = (*((const int32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = Ax [pA] ; \ Cx [pC] = (aij) & (y) ; \ } GrB_Info GB (_bind2nd_tran__band_int32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t y = (*((const int32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
house.h
// This is AutoMine/GraphZero implementation #pragma omp parallel for schedule(dynamic,1) reduction(+:counter) for(vidType v0 = 0; v0 < g.V(); v0++) { auto y0 = g.N(v0); auto y0f0 = bounded(y0,v0); for(vidType idx1 = 0; idx1 < y0f0.size(); idx1++) { auto v1 = y0f0.begin()[idx1]; auto y1 = g.N(v1); auto y0y1 = intersection_set(y0, y1); VertexSet n0y1; difference_set(n0y1,y1, y0); auto y0n1 = difference_set(y0, y1); for(vidType idx2 = 0; idx2 < y0y1.size(); idx2++) { auto v2 = y0y1.begin()[idx2]; auto y2 = g.N(v2); auto n0y1n2 = difference_set(n0y1, y2); auto y0n1n2 = difference_set(y0n1, y2); for(vidType idx3 = 0; idx3 < n0y1n2.size(); idx3++) { auto v3 = n0y1n2.begin()[idx3]; auto y3 = g.N(v3); counter += intersection_num(y0n1n2, y3); } } } }
incompleteprag.c
int x; #pragma omp int main() { x = 0; return x; }
mpncbo.c
/* $Header$ */ /* mpncbo -- netCDF binary operator */ /* Purpose: Compute sum, difference, product, or ratio of specified hyperslabs of specfied variables from two input netCDF files and output them to a single file. */ /* Copyright (C) 1995--present Charlie Zender This file is part of NCO, the netCDF Operators. NCO is free software. You may redistribute and/or modify NCO under the terms of the 3-Clause BSD License. You are permitted to link NCO with the HDF, netCDF, OPeNDAP, and UDUnits libraries and to distribute the resulting executables under the terms of the BSD, but in addition obeying the extra stipulations of the HDF, netCDF, OPeNDAP, and UDUnits licenses. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 3-Clause BSD License for more details. The original author of this software, Charlie Zender, seeks to improve it with your suggestions, contributions, bug-reports, and patches. Please contact the NCO project at http://nco.sf.net or write to Charlie Zender Department of Earth System Science University of California, Irvine Irvine, CA 92697-3100 */ /* Usage: mpncbo -O -p ~/nco/data in.nc in.nc ~/foo.nc mpncbo -O -v mss_val in.nc in.nc ~/foo.nc mpncbo -p /data/zender/tmp h0001.nc ~/foo.nc mpncbo -p /data/zender/tmp -l /data/zender/tmp/rmt h0001.nc h0002.nc ~/foo.nc mpncbo -p /ZENDER/tmp -l /data/zender/tmp/rmt h0001.nc h0002.nc ~/foo.nc mpncbo -p /ZENDER/tmp -l /usr/tmp/zender h0001.nc h0002.nc ~/foo.nc Test type conversion: ncks -O -C -v float_var in.nc foo1.nc ncrename -v float_var,double_var foo1.nc ncks -O -C -v double_var in.nc foo2.nc mpncbo -O -C -v double_var foo1.nc foo2.nc foo3.nc mpncbo -O -C -v double_var foo2.nc foo1.nc foo4.nc ncks -H -m foo1.nc ncks -H -m foo2.nc ncks -H -m foo3.nc ncks -H -m foo4.nc Test nco_var_cnf_dmn: ncks -O -v scalar_var in.nc ~/foo.nc ; ncrename -v scalar_var,four_dmn_rec_var foo.nc ; mpncbo -O -v four_dmn_rec_var in.nc ~/foo.nc foo2.nc */ #ifdef HAVE_CONFIG_H # include <config.h> /* Autotools tokens */ #endif /* !HAVE_CONFIG_H */ /* Standard C headers */ #include <math.h> /* sin cos cos sin 3.14159 */ #include <stdio.h> /* stderr, FILE, NULL, etc. */ #include <stdlib.h> /* atof, atoi, malloc, getopt */ #include <string.h> /* strcmp() */ #include <sys/stat.h> /* stat() */ #include <time.h> /* machine time */ #include <unistd.h> /* POSIX stuff */ #ifndef HAVE_GETOPT_LONG # include "nco_getopt.h" #else /* HAVE_GETOPT_LONG */ # ifdef HAVE_GETOPT_H # include <getopt.h> # endif /* !HAVE_GETOPT_H */ #endif /* HAVE_GETOPT_LONG */ /* 3rd party vendors */ #include <netcdf.h> /* netCDF definitions and C library */ #ifdef ENABLE_MPI # include <mpi.h> /* MPI definitions */ # include "nco_mpi.h" /* MPI utilities */ #endif /* !ENABLE_MPI */ /* Personal headers */ /* #define MAIN_PROGRAM_FILE MUST precede #include libnco.h */ #define MAIN_PROGRAM_FILE #include "libnco.h" /* netCDF Operator (NCO) library */ int main(int argc,char **argv) { char **fl_lst_abb=NULL; /* Option a */ char **fl_lst_in; char **gaa_arg=NULL; /* [sng] Global attribute arguments */ char **var_lst_in=NULL_CEWI; char *aux_arg[NC_MAX_DIMS]; char *cmd_ln; char *cnk_arg[NC_MAX_DIMS]; char *cnk_map_sng=NULL_CEWI; /* [sng] Chunking map */ char *cnk_plc_sng=NULL_CEWI; /* [sng] Chunking policy */ char *fl_in_1=NULL; /* fl_in_1 is nco_realloc'd when not NULL */ char *fl_in_2=NULL; /* fl_in_2 is nco_realloc'd when not NULL */ char *fl_out=NULL; /* Option o */ char *fl_out_tmp=NULL; /* MPI CEWI */ char *fl_pth=NULL; /* Option p */ char *fl_pth_lcl=NULL; /* Option l */ char *lmt_arg[NC_MAX_DIMS]; char *nco_op_typ_sng=NULL; /* [sng] Operation type */ char *opt_crr=NULL; /* [sng] String representation of current long-option name */ char *optarg_lcl=NULL; /* [sng] Local copy of system optarg */ char *sng_cnv_rcd=NULL_CEWI; /* [sng] strtol()/strtoul() return code */ const char * const CVS_Id="$Id$"; const char * const CVS_Revision="$Revision$"; const char * const opt_sht_lst="34567ACcD:d:FhL:l:Oo:p:rRSt:v:X:xy:-:"; cnk_dmn_sct **cnk_dmn=NULL_CEWI; #if defined(__cplusplus) || defined(PGI_CC) ddra_info_sct ddra_info; ddra_info.flg_ddra=False; #else /* !__cplusplus */ ddra_info_sct ddra_info={.MRV_flg=False,.flg_ddra=False,.lmn_nbr=0LL,.lmn_nbr_avg=0LL,.lmn_nbr_wgt=0LL,.nco_op_typ=nco_op_nil,.rnk_avg=0,.rnk_var=0,.rnk_wgt=0,.tmr_flg=nco_tmr_srt,.var_idx=0,.wgt_brd_flg=False,.wrd_sz=0}; #endif /* !__cplusplus */ dmn_sct **dim_1; dmn_sct **dim_2; dmn_sct **dmn_out; extern char *optarg; extern int optind; /* Using naked stdin/stdout/stderr in parallel region generates warning Copy appropriate filehandle to variable scoped shared in parallel clause */ FILE * const fp_stderr=stderr; /* [fl] stderr filehandle CEWI */ FILE * const fp_stdout=stdout; /* [fl] stdout filehandle CEWI */ int *in_id_1_arr; int *in_id_2_arr; int abb_arg_nbr=0; int aux_nbr=0; /* [nbr] Number of auxiliary coordinate hyperslabs specified */ int cnk_map=nco_cnk_map_nil; /* [enm] Chunking map */ int cnk_nbr=0; /* [nbr] Number of chunk sizes */ int cnk_plc=nco_cnk_plc_nil; /* [enm] Chunking policy */ int dfl_lvl=NCO_DFL_LVL_UNDEFINED; /* [enm] Deflate level */ int fl_idx; int fl_nbr=0; int fl_in_fmt_1; /* [enm] Input file format */ int fl_in_fmt_2; /* [enm] Input file format */ int fl_out_fmt=NCO_FORMAT_UNDEFINED; /* [enm] Output file format */ int fll_md_old; /* [enm] Old fill mode */ int gaa_nbr=0; /* [nbr] Number of global attributes to add */ int idx; int jdx; int dmn_idx; int dmn_jdx; int in_id_1; int in_id_2; int lmt_nbr=0; /* Option d. NB: lmt_nbr gets incremented */ int log_lvl=0; /* [enm] netCDF library debugging verbosity [0..5] */ int md_open; /* [enm] Mode flag for nc_open() call */ int nbr_dmn_fl_1; int nbr_dmn_fl_2; int nbr_dmn_xtr_1; int nbr_dmn_xtr_2; int nbr_var_fix_1; /* nbr_var_fix_1 gets incremented */ int nbr_var_fix_2; /* nbr_var_fix_2 gets incremented */ int nbr_var_fl_1; int nbr_var_fl_2; int nbr_var_prc_1; /* nbr_var_prc_1 gets incremented */ int nbr_var_prc_2; /* nbr_var_prc_2 gets incremented */ int xtr_nbr_1=0; /* xtr_nbr_1 won't otherwise be set for -c with no -v */ int xtr_nbr_2=0; /* xtr_nbr_2 won't otherwise be set for -c with no -v */ int nco_op_typ=nco_op_nil; /* [enm] Operation type */ int opt; int out_id; int rcd=NC_NOERR; /* [rcd] Return code */ int thr_idx; /* [idx] Index of current thread */ int thr_nbr=int_CEWI; /* [nbr] Thread number Option t */ int var_lst_in_nbr=0; lmt_sct **aux=NULL_CEWI; /* Auxiliary coordinate limits */ lmt_sct **lmt=NULL_CEWI; lmt_all_sct **lmt_all_lst=NULL_CEWI; /* List of *lmt_all structures */ cnv_sct *cnv; /* [sct] Convention structure */ nco_bool EXCLUDE_INPUT_LIST=False; /* Option c */ nco_bool EXTRACT_ALL_COORDINATES=False; /* Option c */ nco_bool EXTRACT_ASSOCIATED_COORDINATES=True; /* Option C */ nco_bool FILE_1_RETRIEVED_FROM_REMOTE_LOCATION; nco_bool FILE_2_RETRIEVED_FROM_REMOTE_LOCATION; nco_bool FL_LST_IN_FROM_STDIN=False; /* [flg] fl_lst_in comes from stdin */ nco_bool FORCE_APPEND=False; /* Option A */ nco_bool FORCE_OVERWRITE=False; /* Option O */ nco_bool FORTRAN_IDX_CNV=False; /* Option F */ nco_bool HISTORY_APPEND=True; /* Option h */ nco_bool HPSS_TRY=False; /* [flg] Search HPSS for unfound files */ nco_bool MSA_USR_RDR=False; /* [flg] Multi-Slab Algorithm returns hyperslabs in user-specified order*/ nco_bool RAM_CREATE=False; /* [flg] Create file in RAM */ nco_bool RAM_OPEN=False; /* [flg] Open (netCDF3-only) file(s) in RAM */ nco_bool SHARE_CREATE=False; /* [flg] Create (netCDF3-only) file(s) with unbuffered I/O */ nco_bool SHARE_OPEN=False; /* [flg] Open (netCDF3-only) file(s) with unbuffered I/O */ nco_bool RM_RMT_FL_PST_PRC=True; /* Option R */ nco_bool WRT_TMP_FL=True; /* [flg] Write output to temporary file */ nco_bool flg_mmr_cln=False; /* [flg] Clean memory prior to exit */ nco_bool flg_ddra=False; /* [flg] DDRA diagnostics */ nm_id_sct *dmn_lst_1; nm_id_sct *dmn_lst_2; nm_id_sct *xtr_lst_1=NULL; /* xtr_lst_1 may be alloc()'d from NULL with -c option */ nm_id_sct *xtr_lst_2=NULL; /* xtr_lst_2 may be alloc()'d from NULL with -c option */ size_t bfr_sz_hnt=NC_SIZEHINT_DEFAULT; /* [B] Buffer size hint */ size_t cnk_csh_byt=NCO_CNK_CSH_BYT_DFL; /* [B] Chunk cache size */ size_t cnk_min_byt=NCO_CNK_SZ_MIN_BYT_DFL; /* [B] Minimize size of variable to chunk */ size_t cnk_sz_byt=0UL; /* [B] Chunk size in bytes */ size_t cnk_sz_scl=0UL; /* [nbr] Chunk size scalar */ size_t hdr_pad=0UL; /* [B] Pad at end of header section */ var_sct **var_1; var_sct **var_2; var_sct **var_fix_1; var_sct **var_fix_2; var_sct **var_fix_out; var_sct **var_out; var_sct **var_prc_1; var_sct **var_prc_2; var_sct **var_prc_out; #ifdef ENABLE_MPI /* Declare all MPI-specific variables here */ MPI_Status mpi_stt; /* [enm] Status check to decode msg_tag_typ */ nco_bool TKN_WRT_FREE=True; /* [flg] Write-access to output file is available */ int fl_nm_lng; /* [nbr] Output file name length CEWI */ int msg_bfr[msg_bfr_lng]; /* [bfr] Buffer containing var, idx, tkn_wrt_rsp */ int msg_tag_typ; /* [enm] MPI message tag type */ int prc_rnk; /* [idx] Process rank */ int prc_nbr=0; /* [nbr] Number of MPI processes */ int tkn_wrt_rsp; /* [enm] Response to request for write token */ int var_wrt_nbr=0; /* [nbr] Variables written to output file until now */ int rnk_wrk; /* [idx] Worker rank */ int wrk_id_bfr[wrk_id_bfr_lng]; /* [bfr] Buffer for rnk_wrk */ #endif /* !ENABLE_MPI */ static struct option opt_lng[]={ /* Structure ordered by short option key if possible */ /* Long options with no argument, no short option counterpart */ {"clean",no_argument,0,0}, /* [flg] Clean memory prior to exit */ {"mmr_cln",no_argument,0,0}, /* [flg] Clean memory prior to exit */ {"drt",no_argument,0,0}, /* [flg] Allow dirty memory on exit */ {"dirty",no_argument,0,0}, /* [flg] Allow dirty memory on exit */ {"mmr_drt",no_argument,0,0}, /* [flg] Allow dirty memory on exit */ {"ddra",no_argument,0,0}, /* [flg] DDRA diagnostics */ {"mdl_cmp",no_argument,0,0}, /* [flg] DDRA diagnostics */ {"msa_usr_rdr",no_argument,0,0}, /* [flg] Multi-Slab Algorithm returns hyperslabs in user-specified order */ {"msa_user_order",no_argument,0,0}, /* [flg] Multi-Slab Algorithm returns hyperslabs in user-specified order */ {"ram_all",no_argument,0,0}, /* [flg] Open and create (netCDF3) file(s) in RAM */ {"create_ram",no_argument,0,0}, /* [flg] Create file in RAM */ {"open_ram",no_argument,0,0}, /* [flg] Open (netCDF3) file(s) in RAM */ {"diskless_all",no_argument,0,0}, /* [flg] Open and create (netCDF3) file(s) in RAM */ {"share_all",no_argument,0,0}, /* [flg] Open and create (netCDF3) file(s) with unbuffered I/O */ {"create_share",no_argument,0,0}, /* [flg] Create (netCDF3) file(s) with unbuffered I/O */ {"open_share",no_argument,0,0}, /* [flg] Open (netCDF3) file(s) with unbuffered I/O */ {"unbuffered_io",no_argument,0,0}, /* [flg] Open and create (netCDF3) file(s) with unbuffered I/O */ {"uio",no_argument,0,0}, /* [flg] Open and create (netCDF3) file(s) with unbuffered I/O */ {"wrt_tmp_fl",no_argument,0,0}, /* [flg] Write output to temporary file */ {"write_tmp_fl",no_argument,0,0}, /* [flg] Write output to temporary file */ {"no_tmp_fl",no_argument,0,0}, /* [flg] Do not write output to temporary file */ {"version",no_argument,0,0}, {"vrs",no_argument,0,0}, /* Long options with argument, no short option counterpart */ {"bfr_sz_hnt",required_argument,0,0}, /* [B] Buffer size hint */ {"buffer_size_hint",required_argument,0,0}, /* [B] Buffer size hint */ {"cnk_byt",required_argument,0,0}, /* [B] Chunk size in bytes */ {"chunk_byte",required_argument,0,0}, /* [B] Chunk size in bytes */ {"cnk_dmn",required_argument,0,0}, /* [nbr] Chunk size */ {"chunk_dimension",required_argument,0,0}, /* [nbr] Chunk size */ {"cnk_map",required_argument,0,0}, /* [nbr] Chunking map */ {"chunk_map",required_argument,0,0}, /* [nbr] Chunking map */ {"cnk_min",required_argument,0,0}, /* [B] Minimize size of variable to chunk */ {"chunk_min",required_argument,0,0}, /* [B] Minimize size of variable to chunk */ {"cnk_plc",required_argument,0,0}, /* [nbr] Chunking policy */ {"chunk_policy",required_argument,0,0}, /* [nbr] Chunking policy */ {"cnk_scl",required_argument,0,0}, /* [nbr] Chunk size scalar */ {"chunk_scalar",required_argument,0,0}, /* [nbr] Chunk size scalar */ {"fl_fmt",required_argument,0,0}, {"file_format",required_argument,0,0}, {"gaa",required_argument,0,0}, /* [sng] Global attribute add */ {"glb_att_add",required_argument,0,0}, /* [sng] Global attribute add */ {"hdr_pad",required_argument,0,0}, {"header_pad",required_argument,0,0}, {"log_lvl",required_argument,0,0}, /* [enm] netCDF library debugging verbosity [0..5] */ {"log_level",required_argument,0,0}, /* [enm] netCDF library debugging verbosity [0..5] */ /* Long options with short counterparts */ {"3",no_argument,0,'3'}, {"4",no_argument,0,'4'}, {"netcdf4",no_argument,0,'4'}, {"5",no_argument,0,'5'}, {"64bit_data",no_argument,0,'5'}, {"cdf5",no_argument,0,'5'}, {"pnetcdf",no_argument,0,'5'}, {"64bit_offset",no_argument,0,'6'}, {"7",no_argument,0,'7'}, {"append",no_argument,0,'A'}, {"coords",no_argument,0,'c'}, {"crd",no_argument,0,'c'}, {"xtr_ass_var",no_argument,0,'c'}, {"xcl_ass_var",no_argument,0,'C'}, {"no_coords",no_argument,0,'C'}, {"no_crd",no_argument,0,'C'}, {"dbg_lvl",required_argument,0,'D'}, {"debug",required_argument,0,'D'}, {"nco_dbg_lvl",required_argument,0,'D'}, {"dimension",required_argument,0,'d'}, {"dmn",required_argument,0,'d'}, {"fortran",no_argument,0,'F'}, {"ftn",no_argument,0,'F'}, {"history",no_argument,0,'h'}, {"hst",no_argument,0,'h'}, {"dfl_lvl",required_argument,0,'L'}, /* [enm] Deflate level */ {"deflate",required_argument,0,'L'}, /* [enm] Deflate level */ {"local",required_argument,0,'l'}, {"lcl",required_argument,0,'l'}, {"overwrite",no_argument,0,'O'}, {"ovr",no_argument,0,'O'}, {"path",required_argument,0,'p'}, {"retain",no_argument,0,'R'}, {"rtn",no_argument,0,'R'}, {"revision",no_argument,0,'r'}, {"suspend", no_argument,0,'S'}, {"thr_nbr",required_argument,0,'t'}, {"threads",required_argument,0,'t'}, {"omp_num_threads",required_argument,0,'t'}, {"variable",required_argument,0,'v'}, {"auxiliary",required_argument,0,'X'}, {"exclude",no_argument,0,'x'}, {"xcl",no_argument,0,'x'}, {"operation",required_argument,0,'y'}, {"op_typ",required_argument,0,'y'}, {"help",no_argument,0,'?'}, {"hlp",no_argument,0,'?'}, {0,0,0,0} }; /* end opt_lng */ int opt_idx=0; /* Index of current long option into opt_lng array */ #ifdef ENABLE_MPI /* MPI Initialization */ MPI_Init(&argc,&argv); MPI_Comm_size(MPI_COMM_WORLD,&prc_nbr); MPI_Comm_rank(MPI_COMM_WORLD,&prc_rnk); #endif /* !ENABLE_MPI */ /* Start timer and save command line */ ddra_info.tmr_flg=nco_tmr_srt; rcd+=nco_ddra((char *)NULL,(char *)NULL,&ddra_info); ddra_info.tmr_flg=nco_tmr_mtd; cmd_ln=nco_cmd_ln_sng(argc,argv); /* Get program name and set program enum (e.g., nco_prg_id=ncra) */ nco_prg_nm=nco_prg_prs(argv[0],&nco_prg_id); /* Parse command line arguments */ while(1){ /* getopt_long_only() allows one dash to prefix long options */ opt=getopt_long(argc,argv,opt_sht_lst,opt_lng,&opt_idx); /* NB: access to opt_crr is only valid when long_opt is detected */ if(opt == EOF) break; /* Parse positional arguments once getopt_long() returns EOF */ opt_crr=(char *)strdup(opt_lng[opt_idx].name); /* Process long options without short option counterparts */ if(opt == 0){ if(!strcmp(opt_crr,"bfr_sz_hnt") || !strcmp(opt_crr,"buffer_size_hint")){ bfr_sz_hnt=strtoul(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10); if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtoul",sng_cnv_rcd); } /* endif cnk */ if(!strcmp(opt_crr,"cnk_byt") || !strcmp(opt_crr,"chunk_byte")){ cnk_sz_byt=strtoul(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10); if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtoul",sng_cnv_rcd); } /* endif cnk_byt */ if(!strcmp(opt_crr,"cnk_min") || !strcmp(opt_crr,"chunk_min")){ cnk_min_byt=strtoul(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10); if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtoul",sng_cnv_rcd); } /* endif cnk_min */ if(!strcmp(opt_crr,"cnk_dmn") || !strcmp(opt_crr,"chunk_dimension")){ /* Copy limit argument for later processing */ cnk_arg[cnk_nbr]=(char *)strdup(optarg); cnk_nbr++; } /* endif cnk */ if(!strcmp(opt_crr,"cnk_scl") || !strcmp(opt_crr,"chunk_scalar")){ cnk_sz_scl=strtoul(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10); if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtoul",sng_cnv_rcd); } /* endif cnk */ if(!strcmp(opt_crr,"cnk_map") || !strcmp(opt_crr,"chunk_map")){ /* Chunking map */ cnk_map_sng=(char *)strdup(optarg); cnk_map=nco_cnk_map_get(cnk_map_sng); } /* endif cnk */ if(!strcmp(opt_crr,"cnk_plc") || !strcmp(opt_crr,"chunk_policy")){ /* Chunking policy */ cnk_plc_sng=(char *)strdup(optarg); cnk_plc=nco_cnk_plc_get(cnk_plc_sng); } /* endif cnk */ if(!strcmp(opt_crr,"mmr_cln") || !strcmp(opt_crr,"clean")) flg_mmr_cln=True; /* [flg] Clean memory prior to exit */ if(!strcmp(opt_crr,"drt") || !strcmp(opt_crr,"mmr_drt") || !strcmp(opt_crr,"dirty")) flg_mmr_cln=False; /* [flg] Clean memory prior to exit */ if(!strcmp(opt_crr,"ddra") || !strcmp(opt_crr,"mdl_cmp")) ddra_info.flg_ddra=flg_ddra=True; /* [flg] DDRA diagnostics */ if(!strcmp(opt_crr,"fl_fmt") || !strcmp(opt_crr,"file_format")) rcd=nco_create_mode_prs(optarg,&fl_out_fmt); if(!strcmp(opt_crr,"gaa") || !strcmp(opt_crr,"glb_att_add")){ gaa_arg=(char **)nco_realloc(gaa_arg,(gaa_nbr+1)*sizeof(char *)); gaa_arg[gaa_nbr++]=(char *)strdup(optarg); } /* endif gaa */ if(!strcmp(opt_crr,"hdr_pad") || !strcmp(opt_crr,"header_pad")){ hdr_pad=strtoul(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10); if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtoul",sng_cnv_rcd); } /* endif "hdr_pad" */ if(!strcmp(opt_crr,"log_lvl") || !strcmp(opt_crr,"log_level")){ log_lvl=(int)strtol(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10); if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtol",sng_cnv_rcd); nc_set_log_level(log_lvl); } /* !log_lvl */ if(!strcmp(opt_crr,"msa_usr_rdr") || !strcmp(opt_crr,"msa_user_order")) MSA_USR_RDR=True; /* [flg] Multi-Slab Algorithm returns hyperslabs in user-specified order */ if(!strcmp(opt_crr,"ram_all") || !strcmp(opt_crr,"create_ram") || !strcmp(opt_crr,"diskless_all")) RAM_CREATE=True; /* [flg] Create (netCDF3) file(s) in RAM */ if(!strcmp(opt_crr,"ram_all") || !strcmp(opt_crr,"open_ram") || !strcmp(opt_crr,"diskless_all")) RAM_OPEN=True; /* [flg] Open (netCDF3) file(s) in RAM */ if(!strcmp(opt_crr,"share_all") || !strcmp(opt_crr,"unbuffered_io") || !strcmp(opt_crr,"uio") || !strcmp(opt_crr,"create_share")) SHARE_CREATE=True; /* [flg] Create (netCDF3) file(s) with unbuffered I/O */ if(!strcmp(opt_crr,"share_all") || !strcmp(opt_crr,"unbuffered_io") || !strcmp(opt_crr,"uio") || !strcmp(opt_crr,"open_share")) SHARE_OPEN=True; /* [flg] Open (netCDF3) file(s) with unbuffered I/O */ if(!strcmp(opt_crr,"vrs") || !strcmp(opt_crr,"version")){ (void)nco_vrs_prn(CVS_Id,CVS_Revision); nco_exit(EXIT_SUCCESS); } /* endif "vrs" */ if(!strcmp(opt_crr,"wrt_tmp_fl") || !strcmp(opt_crr,"write_tmp_fl")) WRT_TMP_FL=True; if(!strcmp(opt_crr,"no_tmp_fl")) WRT_TMP_FL=False; } /* opt != 0 */ /* Process short options */ switch(opt){ case 0: /* Long options have already been processed, return */ break; case '3': /* Request netCDF3 output storage format */ fl_out_fmt=NC_FORMAT_CLASSIC; break; case '4': /* Request netCDF4 output storage format */ fl_out_fmt=NC_FORMAT_NETCDF4; break; case '5': /* Request netCDF3 64-bit offset+data storage (i.e., pnetCDF) format */ fl_out_fmt=NC_FORMAT_CDF5; break; case '6': /* Request netCDF3 64-bit offset output storage format */ fl_out_fmt=NC_FORMAT_64BIT_OFFSET; break; case '7': /* Request netCDF4-classic output storage format */ fl_out_fmt=NC_FORMAT_NETCDF4_CLASSIC; break; case 'A': /* Toggle FORCE_APPEND */ FORCE_APPEND=!FORCE_APPEND; break; case 'C': /* Extract all coordinates associated with extracted variables? */ EXTRACT_ASSOCIATED_COORDINATES=False; break; case 'c': EXTRACT_ALL_COORDINATES=True; break; case 'D': /* The debugging level. Default is 0. */ nco_dbg_lvl=(unsigned short int)strtoul(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10); if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtoul",sng_cnv_rcd); break; case 'd': /* Copy limit argument for later processing */ lmt_arg[lmt_nbr]=(char *)strdup(optarg); lmt_nbr++; break; case 'F': /* Toggle index convention. Default is 0-based arrays (C-style). */ FORTRAN_IDX_CNV=!FORTRAN_IDX_CNV; break; case 'h': /* Toggle appending to history global attribute */ HISTORY_APPEND=!HISTORY_APPEND; break; case 'L': /* [enm] Deflate level. Default is 0. */ dfl_lvl=(int)strtol(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10); if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtol",sng_cnv_rcd); break; case 'l': /* Local path prefix for files retrieved from remote file system */ fl_pth_lcl=(char *)strdup(optarg); break; case 'O': /* Toggle FORCE_OVERWRITE */ FORCE_OVERWRITE=!FORCE_OVERWRITE; break; case 'o': /* Name of output file */ fl_out=(char *)strdup(optarg); break; case 'p': /* Common file path */ fl_pth=(char *)strdup(optarg); break; case 'R': /* Toggle removal of remotely-retrieved-files. Default is True. */ RM_RMT_FL_PST_PRC=!RM_RMT_FL_PST_PRC; break; case 'r': /* Print CVS program information and copyright notice */ (void)nco_vrs_prn(CVS_Id,CVS_Revision); (void)nco_lbr_vrs_prn(); (void)nco_cpy_prn(); (void)nco_cnf_prn(); nco_exit(EXIT_SUCCESS); break; #ifdef ENABLE_MPI case 'S': /* Suspend with signal handler to facilitate debugging */ if(signal(SIGUSR1,nco_cnt_run) == SIG_ERR) (void)fprintf(fp_stdout,"%s: ERROR Could not install suspend handler.\n",nco_prg_nm); while(!nco_spn_lck_brk) usleep(nco_spn_lck_us); /* Spinlock. fxm: should probably insert a sched_yield */ break; #endif /* !ENABLE_MPI */ case 't': /* Thread number */ thr_nbr=(int)strtol(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10); if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtol",sng_cnv_rcd); break; case 'v': /* Variables to extract/exclude */ /* Replace commas with hashes when within braces (convert back later) */ optarg_lcl=(char *)strdup(optarg); (void)nco_rx_comma2hash(optarg_lcl); var_lst_in=nco_lst_prs_2D(optarg_lcl,",",&var_lst_in_nbr); optarg_lcl=(char *)nco_free(optarg_lcl); xtr_nbr_1=xtr_nbr_2=var_lst_in_nbr; break; case 'X': /* Copy auxiliary coordinate argument for later processing */ aux_arg[aux_nbr]=(char *)strdup(optarg); aux_nbr++; MSA_USR_RDR=True; /* [flg] Multi-Slab Algorithm returns hyperslabs in user-specified order */ break; case 'x': /* Exclude rather than extract variables specified with -v */ EXCLUDE_INPUT_LIST=True; break; case 'y': /* User-specified operation type overrides invocation default */ nco_op_typ_sng=(char *)strdup(optarg); nco_op_typ=nco_op_typ_get(nco_op_typ_sng); break; case '?': /* Print proper usage */ (void)nco_usg_prn(); nco_exit(EXIT_SUCCESS); break; case '-': /* Long options are not allowed */ (void)fprintf(stderr,"%s: ERROR Long options are not available in this build. Use single letter options instead.\n",nco_prg_nm_get()); nco_exit(EXIT_FAILURE); break; default: /* Print proper usage */ (void)fprintf(stdout,"%s ERROR in command-line syntax/options. Please reformulate command accordingly.\n",nco_prg_nm_get()); (void)nco_usg_prn(); nco_exit(EXIT_FAILURE); break; } /* end switch */ if(opt_crr) opt_crr=(char *)nco_free(opt_crr); } /* end while loop */ /* Process positional arguments and fill-in filenames */ fl_lst_in=nco_fl_lst_mk(argv,argc,optind,&fl_nbr,&fl_out,&FL_LST_IN_FROM_STDIN,FORCE_OVERWRITE); /* Make uniform list of user-specified chunksizes */ if(cnk_nbr > 0) cnk_dmn=nco_cnk_prs(cnk_nbr,cnk_arg); /* Make uniform list of user-specified dimension limits */ lmt=nco_lmt_prs(lmt_nbr,lmt_arg); /* Initialize thread information */ thr_nbr=nco_openmp_ini(thr_nbr); in_id_1_arr=(int *)nco_malloc(thr_nbr*sizeof(int)); in_id_2_arr=(int *)nco_malloc(thr_nbr*sizeof(int)); /* Parse filenames */ fl_idx=0; /* Input file _1 */ fl_in_1=nco_fl_nm_prs(fl_in_1,fl_idx,&fl_nbr,fl_lst_in,abb_arg_nbr,fl_lst_abb,fl_pth); if(nco_dbg_lvl >= nco_dbg_fl) (void)fprintf(stderr,"%s: INFO Input file %d is %s",nco_prg_nm_get(),fl_idx,fl_in_1); /* Make sure file is on local system and is readable or die trying */ fl_in_1=nco_fl_mk_lcl(fl_in_1,fl_pth_lcl,&FILE_1_RETRIEVED_FROM_REMOTE_LOCATION); if(nco_dbg_lvl >= nco_dbg_fl && FILE_1_RETRIEVED_FROM_REMOTE_LOCATION) (void)fprintf(stderr,", local file is %s",fl_in_1); if(nco_dbg_lvl >= nco_dbg_fl) (void)fprintf(stderr,"\n"); /* Open file once per thread to improve caching */ if(RAM_OPEN) md_open=NC_NOWRITE|NC_DISKLESS; else md_open=NC_NOWRITE; if(SHARE_OPEN) md_open=md_open|NC_SHARE; for(thr_idx=0;thr_idx<thr_nbr;thr_idx++) rcd+=nco_fl_open(fl_in_1,md_open,&bfr_sz_hnt,in_id_1_arr+thr_idx); in_id_1=in_id_1_arr[0]; fl_idx=1; /* Input file _2 */ fl_in_2=nco_fl_nm_prs(fl_in_2,fl_idx,&fl_nbr,fl_lst_in,abb_arg_nbr,fl_lst_abb,fl_pth); if(nco_dbg_lvl >= nco_dbg_fl) (void)fprintf(stderr,"%s: INFO Input file %d is %s",nco_prg_nm_get(),fl_idx,fl_in_2); /* Make sure file is on local system and is readable or die trying */ fl_in_2=nco_fl_mk_lcl(fl_in_2,fl_pth_lcl,&FILE_2_RETRIEVED_FROM_REMOTE_LOCATION); if(nco_dbg_lvl >= nco_dbg_fl && FILE_2_RETRIEVED_FROM_REMOTE_LOCATION) (void)fprintf(stderr,", local file is %s",fl_in_2); if(nco_dbg_lvl >= nco_dbg_fl) (void)fprintf(stderr,"\n"); /* Open file once per thread to improve caching */ if(RAM_OPEN) md_open=NC_NOWRITE|NC_DISKLESS; else md_open=NC_NOWRITE; if(SHARE_OPEN) md_open=md_open|NC_SHARE; for(thr_idx=0;thr_idx<thr_nbr;thr_idx++) rcd+=nco_fl_open(fl_in_2,md_open,&bfr_sz_hnt,in_id_2_arr+thr_idx); in_id_2=in_id_2_arr[0]; /* Parse auxiliary coordinates */ if(aux_nbr > 0){ int aux_idx_nbr; aux=nco_aux_evl(in_id_1,aux_nbr,aux_arg,&aux_idx_nbr); if(aux_idx_nbr > 0){ lmt=(lmt_sct **)nco_realloc(lmt,(lmt_nbr+aux_idx_nbr)*sizeof(lmt_sct *)); int lmt_nbr_new=lmt_nbr+aux_idx_nbr; int aux_idx=0; for(int lmt_idx=lmt_nbr;lmt_idx<lmt_nbr_new;lmt_idx++) lmt[lmt_idx]=aux[aux_idx++]; lmt_nbr=lmt_nbr_new; } /* endif aux */ } /* endif aux_nbr */ /* Get number of variables and dimensions in file */ (void)nco_inq(in_id_1,&nbr_dmn_fl_1,&nbr_var_fl_1,(int *)NULL,(int *)NULL); (void)nco_inq(in_id_2,&nbr_dmn_fl_2,&nbr_var_fl_2,(int *)NULL,(int *)NULL); (void)nco_inq_format(in_id_1,&fl_in_fmt_1); (void)nco_inq_format(in_id_2,&fl_in_fmt_2); /* Form initial extraction list which may include extended regular expressions */ xtr_lst_1=nco_var_lst_mk(in_id_1,nbr_var_fl_1,var_lst_in,EXCLUDE_INPUT_LIST,EXTRACT_ALL_COORDINATES,&xtr_nbr_1); xtr_lst_2=nco_var_lst_mk(in_id_2,nbr_var_fl_2,var_lst_in,EXCLUDE_INPUT_LIST,EXTRACT_ALL_COORDINATES,&xtr_nbr_2); /* Change included variables to excluded variables */ if(EXCLUDE_INPUT_LIST) xtr_lst_1=nco_var_lst_xcl(in_id_1,nbr_var_fl_1,xtr_lst_1,&xtr_nbr_1); if(EXCLUDE_INPUT_LIST) xtr_lst_2=nco_var_lst_xcl(in_id_2,nbr_var_fl_2,xtr_lst_2,&xtr_nbr_2); /* Determine conventions (ARM/CCM/CCSM/CF/MPAS) for treating file */ cnv=nco_cnv_ini(in_id_1); /* Add all coordinate variables to extraction list */ if(EXTRACT_ALL_COORDINATES) xtr_lst_1=nco_var_lst_crd_add(in_id_1,nbr_dmn_fl_1,nbr_var_fl_1,xtr_lst_1,&xtr_nbr_1,cnv); if(EXTRACT_ALL_COORDINATES) xtr_lst_2=nco_var_lst_crd_add(in_id_2,nbr_dmn_fl_2,nbr_var_fl_2,xtr_lst_2,&xtr_nbr_2,cnv); /* Extract coordinates associated with extracted variables */ if(EXTRACT_ASSOCIATED_COORDINATES) xtr_lst_1=nco_var_lst_crd_ass_add(in_id_1,xtr_lst_1,&xtr_nbr_1,cnv); if(EXTRACT_ASSOCIATED_COORDINATES) xtr_lst_2=nco_var_lst_crd_ass_add(in_id_2,xtr_lst_2,&xtr_nbr_2,cnv); /* With fully symmetric 1<->2 ordering, may occasionally find xtr_nbr_2 > xtr_nbr_1 This occurs, e.g., when fl_in_1 contains reduced variables and full coordinates are only in fl_in_2 and so will not appear xtr_lst_1 */ /* Sort extraction list by variable ID for fastest I/O */ if(xtr_nbr_1 > 1) xtr_lst_1=nco_lst_srt_nm_id(xtr_lst_1,xtr_nbr_1,False); if(xtr_nbr_2 > 1) xtr_lst_2=nco_lst_srt_nm_id(xtr_lst_2,xtr_nbr_2,False); /* We now have final list of variables to extract. Phew. */ /* Find coordinate/dimension values associated with user-specified limits NB: nco_lmt_evl() with same nc_id contains OpenMP critical region */ for(idx=0;idx<lmt_nbr;idx++) (void)nco_lmt_evl(in_id_1,lmt[idx],0L,FORTRAN_IDX_CNV); /* Place all dimensions in lmt_all_lst */ lmt_all_lst=(lmt_all_sct **)nco_malloc(nbr_dmn_fl_1*sizeof(lmt_all_sct *)); /* Initialize lmt_all_sct's */ (void)nco_msa_lmt_all_ntl(in_id_1,MSA_USR_RDR,lmt_all_lst,nbr_dmn_fl_1,lmt,lmt_nbr); /* Find dimensions associated with variables to be extracted */ dmn_lst_1=nco_dmn_lst_ass_var(in_id_1,xtr_lst_1,xtr_nbr_1,&nbr_dmn_xtr_1); dmn_lst_2=nco_dmn_lst_ass_var(in_id_2,xtr_lst_2,xtr_nbr_2,&nbr_dmn_xtr_2); /* Fill-in dimension structure for all extracted dimensions */ dim_1=(dmn_sct **)nco_malloc(nbr_dmn_xtr_1*sizeof(dmn_sct *)); dim_2=(dmn_sct **)nco_malloc(nbr_dmn_xtr_2*sizeof(dmn_sct *)); for(idx=0;idx<nbr_dmn_xtr_1;idx++) dim_1[idx]=nco_dmn_fll(in_id_1,dmn_lst_1[idx].id,dmn_lst_1[idx].nm); for(idx=0;idx<nbr_dmn_xtr_2;idx++) dim_2[idx]=nco_dmn_fll(in_id_2,dmn_lst_2[idx].id,dmn_lst_2[idx].nm); /* Dimension lists no longer needed */ dmn_lst_1=nco_nm_id_lst_free(dmn_lst_1,nbr_dmn_xtr_1); dmn_lst_2=nco_nm_id_lst_free(dmn_lst_2,nbr_dmn_xtr_2); /* Check that dims in list 2 are a subset of list 1 and that they are the same size */ (void)nco_dmn_sct_cmp(dim_1,nbr_dmn_xtr_1,dim_2,nbr_dmn_xtr_2,fl_in_1,fl_in_2); /* Duplicate input dimension structures for output dimension structures */ dmn_out=(dmn_sct **)nco_malloc(nbr_dmn_xtr_1*sizeof(dmn_sct *)); for(idx=0;idx<nbr_dmn_xtr_1;idx++){ dmn_out[idx]=nco_dmn_dpl(dim_1[idx]); (void)nco_dmn_xrf(dim_1[idx],dmn_out[idx]); } /* Merge hyperslab limit information into dimension structures */ if(nbr_dmn_fl_1 > 0) (void)nco_dmn_lmt_all_mrg(dmn_out,nbr_dmn_xtr_1,lmt_all_lst,nbr_dmn_fl_1); if(nco_dbg_lvl >= nco_dbg_sbr){ for(idx=0;idx<xtr_nbr_1;idx++) (void)fprintf(stderr,"xtr_lst_1[%d].nm = %s, .id= %d\n",idx,xtr_lst_1[idx].nm,xtr_lst_1[idx].id); } /* end if */ /* Fill-in variable structure list for all extracted variables */ var_1=(var_sct **)nco_malloc(xtr_nbr_1*sizeof(var_sct *)); var_2=(var_sct **)nco_malloc(xtr_nbr_2*sizeof(var_sct *)); var_out=(var_sct **)nco_malloc(xtr_nbr_1*sizeof(var_sct *)); for(idx=0;idx<xtr_nbr_1;idx++){ var_1[idx]=nco_var_fll(in_id_1,xtr_lst_1[idx].id,xtr_lst_1[idx].nm,dim_1,nbr_dmn_xtr_1); var_out[idx]=nco_var_dpl(var_1[idx]); (void)nco_xrf_var(var_1[idx],var_out[idx]); (void)nco_xrf_dmn(var_out[idx]); } /* end loop over idx */ for(idx=0;idx<xtr_nbr_2;idx++) var_2[idx]=nco_var_fll(in_id_2,xtr_lst_2[idx].id,xtr_lst_2[idx].nm,dim_2,nbr_dmn_xtr_2); /* Extraction lists no longer needed */ xtr_lst_1=nco_nm_id_lst_free(xtr_lst_1,xtr_nbr_1); xtr_lst_2=nco_nm_id_lst_free(xtr_lst_2,xtr_nbr_2); /* Die gracefully on unsupported features... */ if(xtr_nbr_1 < xtr_nbr_2){ (void)fprintf(fp_stdout,"%s: WARNING First file has fewer extracted variables than second file (%d < %d). This desired feature is TODO nco581.\n",nco_prg_nm,xtr_nbr_1,xtr_nbr_2); nco_exit(EXIT_FAILURE); } /* endif */ /* Refresh var_out with dim_out data */ (void)nco_var_dmn_refresh(var_out,xtr_nbr_1); /* Change dimensions in dim_2 to dim_out */ for(idx=0;idx<nbr_dmn_xtr_2;idx++){ for(jdx=0;jdx<nbr_dmn_xtr_1;jdx++) if(!strcmp(dim_2[idx]->nm,dmn_out[jdx]->nm)){ /* NB: Copy new dim data but do NOT free original as dimension element is aliased in var_2 array */ (void)nco_dmn_cpy(dim_2[idx],dmn_out[jdx]); break; } /* endif */ /* Dimension not found so die gracefully */ if(jdx==nbr_dmn_xtr_1){ (void)fprintf(fp_stdout,"%s: ERROR dimension \"%s\" in second file %s is not present in first file %s\n",nco_prg_nm,dim_2[idx]->nm,fl_in_2,fl_in_1); nco_exit(EXIT_FAILURE); } /* endif dimension not found */ } /* end loop over dimensions */ /* Refresh var_2 with the new dim_2 data */ (void)nco_var_dmn_refresh(var_2,xtr_nbr_2); /* Divide variable lists into lists of fixed variables and variables to be processed Create lists from file_1 last so those values remain in *_out arrays */ (void)nco_var_lst_dvd(var_2,var_out,xtr_nbr_2,cnv,True,nco_pck_plc_nil,nco_pck_map_nil,(dmn_sct **)NULL,0,&var_fix_2,&var_fix_out,&nbr_var_fix_2,&var_prc_2,&var_prc_out,&nbr_var_prc_2); /* Avoid double-free() condition */ var_fix_out=(var_sct **)nco_free(var_fix_out); var_prc_out=(var_sct **)nco_free(var_prc_out); (void)nco_var_lst_dvd(var_1,var_out,xtr_nbr_1,cnv,True,nco_pck_plc_nil,nco_pck_map_nil,(dmn_sct **)NULL,0,&var_fix_1,&var_fix_out,&nbr_var_fix_1,&var_prc_1,&var_prc_out,&nbr_var_prc_1); /* Die gracefully on unsupported features... */ if(nbr_var_fix_1 < nbr_var_fix_2){ (void)fprintf(fp_stdout,"%s: ERROR First file has fewer fixed variables than second file (%d < %d). This feature is TODO nco581.\n",nco_prg_nm,nbr_var_fix_1,nbr_var_fix_2); nco_exit(EXIT_FAILURE); } /* endif */ /* Merge two variable lists into same order */ rcd=nco_var_lst_mrg(&var_prc_1,&var_prc_2,&nbr_var_prc_1,&nbr_var_prc_2); /* Make output and input files consanguinous */ if(fl_out_fmt == NCO_FORMAT_UNDEFINED) fl_out_fmt=fl_in_fmt_1; /* Verify output file format supports requested actions */ (void)nco_fl_fmt_vet(fl_out_fmt,cnk_nbr,dfl_lvl); /* Open output file */ fl_out_tmp=nco_fl_out_open(fl_out,&FORCE_APPEND,FORCE_OVERWRITE,fl_out_fmt,&bfr_sz_hnt,RAM_CREATE,RAM_OPEN,SHARE_CREATE,SHARE_OPEN,WRT_TMP_FL,&out_id); /* 20101019 fxm got to here merging ncbo 4.0.5 into mpncbo */ /* Assign zero to start and unity to stride vectors in output variables */ (void)nco_var_srd_srt_set(var_out,xtr_nbr_1); #ifdef ENABLE_MPI if(prc_rnk == rnk_mgr){ /* MPI manager code */ #endif /* !ENABLE_MPI */ /* Make output and input files consanguinous */ if(fl_out_fmt == NCO_FORMAT_UNDEFINED) fl_out_fmt=fl_in_fmt_1; /* Open output file */ fl_out_tmp=nco_fl_out_open(fl_out,&FORCE_APPEND,FORCE_OVERWRITE,fl_out_fmt,&bfr_sz_hnt,RAM_CREATE,RAM_OPEN,SHARE_CREATE,SHARE_OPEN,WRT_TMP_FL,&out_id); /* Copy global attributes */ (void)nco_att_cpy(in_id_1,out_id,NC_GLOBAL,NC_GLOBAL,(nco_bool)True); /* Catenate time-stamped command line to "history" global attribute */ if(HISTORY_APPEND) (void)nco_hst_att_cat(out_id,cmd_ln); if(HISTORY_APPEND && FORCE_APPEND) (void)nco_prv_att_cat(fl_in_1,in_id_1,out_id); if(gaa_nbr > 0) (void)nco_glb_att_add(out_id,gaa_arg,gaa_nbr); if(HISTORY_APPEND) (void)nco_vrs_att_cat(out_id); if(thr_nbr > 1 && HISTORY_APPEND) (void)nco_thr_att_cat(out_id,thr_nbr); #ifdef ENABLE_MPI /* Initialize MPI task information */ if(prc_nbr > 0 && HISTORY_APPEND) (void)nco_mpi_att_cat(out_id,prc_nbr); #endif /* !ENABLE_MPI */ /* Define dimensions in output file */ (void)nco_dmn_dfn(fl_out,out_id,dmn_out,nbr_dmn_xtr_1); /* fxm: TODO 550 put max_dim_sz/list(var_1,var_2) into var_def(var_out) */ /* Define variables in output file, copy their attributes */ (void)nco_var_dfn(in_id_1,fl_out,out_id,var_out,xtr_nbr_1,(dmn_sct **)NULL,(int)0,nco_pck_plc_nil,nco_pck_map_nil,dfl_lvl); /* Set chunksize parameters */ if(fl_out_fmt == NC_FORMAT_NETCDF4 || fl_out_fmt == NC_FORMAT_NETCDF4_CLASSIC) (void)nco_cnk_sz_set(out_id,lmt_all_lst,nbr_dmn_fl_1,&cnk_map,&cnk_plc,cnk_sz_scl,cnk_dmn,cnk_nbr); /* Turn-off default filling behavior to enhance efficiency */ nco_set_fill(out_id,NC_NOFILL,&fll_md_old); /* Take output file out of define mode */ if(hdr_pad == 0UL){ (void)nco_enddef(out_id); }else{ (void)nco__enddef(out_id,hdr_pad); if(nco_dbg_lvl >= nco_dbg_scl) (void)fprintf(stderr,"%s: INFO Padding header with %lu extra bytes\n",nco_prg_nm_get(),(unsigned long)hdr_pad); } /* hdr_pad */ #ifdef ENABLE_MPI } /* prc_rnk != rnk_mgr */ /* Manager obtains output filename and broadcasts to workers */ if(prc_rnk == rnk_mgr) fl_nm_lng=(int)strlen(fl_out_tmp); MPI_Bcast(&fl_nm_lng,1,MPI_INT,rnk_mgr,MPI_COMM_WORLD); if(prc_rnk != rnk_mgr) fl_out_tmp=(char *)nco_malloc((fl_nm_lng+1)*sizeof(char)); MPI_Bcast(fl_out_tmp,fl_nm_lng+1,MPI_CHAR,rnk_mgr,MPI_COMM_WORLD); if(prc_rnk == rnk_mgr){ /* MPI manager code */ TKN_WRT_FREE=False; #endif /* !ENABLE_MPI */ /* Copy variable data for non-processed variables */ (void)nco_msa_var_val_cpy(in_id_1,out_id,var_fix_1,nbr_var_fix_1,lmt_all_lst,nbr_dmn_fl_1); #ifdef ENABLE_MPI /* Close output file so workers can open it */ nco_close(out_id); TKN_WRT_FREE=True; } /* prc_rnk != rnk_mgr */ #endif /* !ENABLE_MPI */ /* ncbo() code has been similar to nces() (and ncra()) wherever possible Major differences occur where performance would otherwise suffer From now on, however, binary-file and binary-operation nature of ncbo() is too different from nces() paradigm to justify following nces() style. Instead, we adopt symmetric nomenclature (e.g., file_1, file_2), and perform differences variable-by-variable so peak memory usage goes as Order(2*maximum variable size) rather than Order(3*maximum record size) or Order(3*file size) */ /* Perform various error-checks on input file */ if(False) (void)nco_fl_cmp_err_chk(); /* Default operation depends on invocation name */ if(nco_op_typ_sng == NULL) nco_op_typ=nco_op_typ_get(nco_op_typ_sng); /* Timestamp end of metadata setup and disk layout */ rcd+=nco_ddra((char *)NULL,(char *)NULL,&ddra_info); ddra_info.tmr_flg=nco_tmr_rgl; #ifdef ENABLE_MPI if(prc_rnk == rnk_mgr){ /* MPI manager code */ /* Compensate for incrementing on each worker's first message */ var_wrt_nbr=-prc_nbr+1; idx=0; /* While variables remain to be processed or written... */ while(var_wrt_nbr < nbr_var_prc_1){ /* Receive message from any worker */ MPI_Recv(wrk_id_bfr,wrk_id_bfr_lng,MPI_INT,MPI_ANY_SOURCE,MPI_ANY_TAG,MPI_COMM_WORLD,&mpi_stt); /* Obtain MPI message tag type */ msg_tag_typ=mpi_stt.MPI_TAG; /* Get sender's prc_rnk */ rnk_wrk=wrk_id_bfr[0]; /* Allocate next variable, if any, to worker */ if(msg_tag_typ == msg_tag_wrk_rqs){ var_wrt_nbr++; /* [nbr] Number of variables written */ /* Worker closed output file before sending msg_tag_wrk_rqs */ TKN_WRT_FREE=True; if(idx < nbr_var_prc_1){ /* Tell requesting worker to allocate space for next variable */ msg_bfr[0]=idx; /* [idx] Variable to be processed */ msg_bfr[1]=out_id; /* Output file ID */ msg_bfr[2]=var_prc_out[idx]->id; /* [id] Variable ID in output file */ /* Point to next variable on list */ idx++; }else{ msg_bfr[0]=idx_all_wrk_ass; /* [enm] All variables already assigned */ msg_bfr[1]=out_id; /* Output file ID */ } /* endif idx */ MPI_Send(msg_bfr,msg_bfr_lng,MPI_INT,rnk_wrk,msg_tag_wrk_rsp,MPI_COMM_WORLD); /* msg_tag_typ != msg_tag_wrk_rqs */ }else if(msg_tag_typ == msg_tag_tkn_wrt_rqs){ /* Allocate token if free, else ask worker to try later */ if(TKN_WRT_FREE){ TKN_WRT_FREE=False; msg_bfr[0]=tkn_wrt_rqs_xcp; /* Accept request for write token */ }else{ msg_bfr[0]=tkn_wrt_rqs_dny; /* Deny request for write token */ } /* !TKN_WRT_FREE */ MPI_Send(msg_bfr,msg_bfr_lng,MPI_INT,rnk_wrk,msg_tag_tkn_wrt_rsp,MPI_COMM_WORLD); } /* msg_tag_typ != msg_tag_tkn_wrt_rqs */ } /* end while var_wrt_nbr < nbr_var_prc_1 */ }else{ /* prc_rnk != rnk_mgr, end Manager code begin Worker code */ wrk_id_bfr[0]=prc_rnk; while(1){ /* While work remains... */ /* Send msg_tag_wrk_rqs */ wrk_id_bfr[0]=prc_rnk; MPI_Send(wrk_id_bfr,wrk_id_bfr_lng,MPI_INT,rnk_mgr,msg_tag_wrk_rqs,MPI_COMM_WORLD); /* Receive msg_tag_wrk_rsp */ MPI_Recv(msg_bfr,msg_bfr_lng,MPI_INT,0,msg_tag_wrk_rsp,MPI_COMM_WORLD,&mpi_stt); idx=msg_bfr[0]; out_id=msg_bfr[1]; if(idx == idx_all_wrk_ass) break; else{ var_prc_out[idx]->id=msg_bfr[2]; /* Process this variable same as UP code */ #else /* !ENABLE_MPI */ #ifdef _OPENMP /* OpenMP notes: shared(): msk and wgt are not altered within loop private(): wgt_avg does not need initialization */ #pragma omp parallel for default(none) firstprivate(ddra_info) private(idx,in_id_1,in_id_2,dmn_idx,dmn_jdx) shared(nco_dbg_lvl,dim_1,fl_in_1,fl_in_2,fl_out,flg_ddra,in_id_1_arr,in_id_2_arr,nbr_dmn_xtr_1,nbr_var_prc_1,nbr_var_prc_2,nco_op_typ,out_id,nco_prg_nm,rcd,var_prc_1,var_prc_2,var_prc_out,lmt_all_lst,nbr_dmn_fl_1) #endif /* !_OPENMP */ /* UP and SMP codes main loop over variables */ for(idx=0;idx<nbr_var_prc_1;idx++){ #endif /* ENABLE_MPI */ /* Common code for UP, SMP, and MPI */ int has_mss_val=False; ptr_unn mss_val; if(nco_dbg_lvl >= nco_dbg_var) (void)fprintf(fp_stderr,"%s, ",var_prc_1[idx]->nm); if(nco_dbg_lvl >= nco_dbg_var) (void)fflush(fp_stderr); in_id_1=in_id_1_arr[omp_get_thread_num()]; in_id_2=in_id_2_arr[omp_get_thread_num()]; (void)nco_var_mtd_refresh(in_id_1,var_prc_1[idx]); has_mss_val=var_prc_1[idx]->has_mss_val; (void)nco_msa_var_get(in_id_1,var_prc_1[idx],lmt_all_lst,nbr_dmn_fl_1); /* Find and set variable dmn_nbr, ID, mss_val, type in second file */ (void)nco_var_mtd_refresh(in_id_2,var_prc_2[idx]); /* Read hyperslab from second file */ (void)nco_msa_var_get(in_id_2,var_prc_2[idx],lmt_all_lst,nbr_dmn_fl_1); /* Check that all dims in var_prc_2 are in var_prc_1 */ for(dmn_idx=0;dmn_idx<var_prc_2[idx]->nbr_dim;dmn_idx++){ for(dmn_jdx=0;dmn_jdx<var_prc_1[idx]->nbr_dim;dmn_jdx++) if(!strcmp(var_prc_2[idx]->dim[dmn_idx]->nm,var_prc_1[idx]->dim[dmn_jdx]->nm)) break; if(dmn_jdx==var_prc_1[idx]->nbr_dim){ (void)fprintf(fp_stdout,"%s: ERROR Variables do not conform:\nFile %s variable %s has dimension %s not present in file %s variable %s\n",nco_prg_nm,fl_in_2,var_prc_2[idx]->nm, var_prc_2[idx]->dim[dmn_idx]->nm,fl_in_1,var_prc_1[idx]->nm); nco_exit(EXIT_FAILURE); } /* endif error */ } /* end loop over idx */ /* Die gracefully on unsupported features... */ if(var_prc_1[idx]->nbr_dim < var_prc_2[idx]->nbr_dim){ (void)fprintf(fp_stdout,"%s: ERROR Variable %s has lesser rank in first file than in second file (%d < %d). This feature is NCO TODO 552.\n",nco_prg_nm,var_prc_1[idx]->nm,var_prc_1[idx]->nbr_dim,var_prc_2[idx]->nbr_dim); nco_exit(EXIT_FAILURE); } /* endif */ if(var_prc_1[idx]->nbr_dim > var_prc_2[idx]->nbr_dim) (void)ncap_var_cnf_dmn(&var_prc_out[idx],&var_prc_2[idx]); /* var2 now conforms in size to var1, and is in memory */ /* fxm: TODO 268 allow var1 or var2 to typecast */ /* Make sure var2 conforms to type of var1 */ if(var_prc_1[idx]->type != var_prc_2[idx]->type){ if(nco_dbg_lvl >= nco_dbg_std) (void)fprintf(fp_stderr,"%s: INFO Input variables do not conform in type:\nFile 1 = %s variable %s has type %s\nFile 2 = %s variable %s has type %s\nFile 3 = %s variable %s will have type %s\n",nco_prg_nm,fl_in_1,var_prc_1[idx]->nm,nco_typ_sng(var_prc_1[idx]->type),fl_in_2,var_prc_2[idx]->nm,nco_typ_sng(var_prc_2[idx]->type),fl_out,var_prc_1[idx]->nm,nco_typ_sng(var_prc_1[idx]->type)); } /* endif different type */ var_prc_2[idx]=nco_var_cnf_typ(var_prc_1[idx]->type,var_prc_2[idx]); /* Change missing_value of var_prc_2, if any, to missing_value of var_prc_1, if any */ has_mss_val=nco_mss_val_cnf(var_prc_1[idx],var_prc_2[idx]); /* mss_val in fl_1, if any, overrides mss_val in fl_2 */ if(has_mss_val) mss_val=var_prc_1[idx]->mss_val; /* Perform specified binary operation */ switch(nco_op_typ){ case nco_op_add: /* [enm] Add file_1 to file_2 */ (void)nco_var_add(var_prc_1[idx]->type,var_prc_1[idx]->sz,has_mss_val,mss_val,var_prc_2[idx]->val,var_prc_1[idx]->val); break; case nco_op_mlt: /* [enm] Multiply file_1 by file_2 */ (void)nco_var_mlt(var_prc_1[idx]->type,var_prc_1[idx]->sz,has_mss_val,mss_val,var_prc_2[idx]->val,var_prc_1[idx]->val); break; case nco_op_dvd: /* [enm] Divide file_1 by file_2 */ (void)nco_var_dvd(var_prc_1[idx]->type,var_prc_1[idx]->sz,has_mss_val,mss_val,var_prc_2[idx]->val,var_prc_1[idx]->val); break; case nco_op_sbt: /* [enm] Subtract file_2 from file_1 */ (void)nco_var_sbt(var_prc_1[idx]->type,var_prc_1[idx]->sz,has_mss_val,mss_val,var_prc_2[idx]->val,var_prc_1[idx]->val); break; default: /* Other defined nco_op_typ values are valid for ncra(), ncrcat(), ncwa(), not ncbo() */ (void)fprintf(fp_stdout,"%s: ERROR Illegal nco_op_typ in binary operation\n",nco_prg_nm); nco_exit(EXIT_FAILURE); break; } /* end case */ var_prc_2[idx]->val.vp=nco_free(var_prc_2[idx]->val.vp); #ifdef ENABLE_MPI /* Obtain token and prepare to write */ while(1){ /* Send msg_tag_tkn_wrt_rqs repeatedly until token obtained */ wrk_id_bfr[0]=prc_rnk; MPI_Send(wrk_id_bfr,wrk_id_bfr_lng,MPI_INT,rnk_mgr,msg_tag_tkn_wrt_rqs,MPI_COMM_WORLD); MPI_Recv(msg_bfr,msg_bfr_lng,MPI_INT,rnk_mgr,msg_tag_tkn_wrt_rsp,MPI_COMM_WORLD,&mpi_stt); tkn_wrt_rsp=msg_bfr[0]; /* Wait then re-send request */ if(tkn_wrt_rsp == tkn_wrt_rqs_dny) sleep(tkn_wrt_rqs_ntv); else break; } /* end while loop waiting for write token */ /* Worker has token---prepare to write */ if(tkn_wrt_rsp == tkn_wrt_rqs_xcp){ if(RAM_OPEN) md_open=NC_WRITE|NC_SHARE|NC_DISKLESS; else md_open=NC_WRITE|NC_SHARE; rcd=nco_fl_open(fl_out_tmp,md_open,&bfr_sz_hnt,&out_id); /* Set chunksize parameters */ if(fl_out_fmt == NC_FORMAT_NETCDF4 || fl_out_fmt == NC_FORMAT_NETCDF4_CLASSIC) (void)nco_cnk_sz_set(out_id,lmt_all_lst,nbr_dmn_fl_1,&cnk_map,&cnk_plc,cnk_sz_scl,cnk_dmn,cnk_nbr); /* Turn-off default filling behavior to enhance efficiency */ nco_set_fill(out_id,NC_NOFILL,&fll_md_old); #else /* !ENABLE_MPI */ #ifdef _OPENMP #pragma omp critical #endif /* !_OPENMP */ #endif /* !ENABLE_MPI */ /* Common code for UP, SMP, and MPI */ { /* begin OpenMP critical */ /* Copy result to output file and free workspace buffer */ if(var_prc_1[idx]->nbr_dim == 0){ (void)nco_put_var1(out_id,var_prc_out[idx]->id,var_prc_out[idx]->srt,var_prc_1[idx]->val.vp,var_prc_1[idx]->type); }else{ /* end if variable is scalar */ (void)nco_put_vara(out_id,var_prc_out[idx]->id,var_prc_out[idx]->srt,var_prc_out[idx]->cnt,var_prc_1[idx]->val.vp,var_prc_1[idx]->type); } /* end else */ } /* end OpenMP critical */ var_prc_1[idx]->val.vp=nco_free(var_prc_1[idx]->val.vp); if(flg_ddra){ /* DDRA diagnostics Usage: ncbo -O -C --mdl -p ~/nco/data in.nc in.nc ~/foo.nc ncbo -O -C --mdl -p ${DATA}/nco_bm stl_5km.nc stl_5km.nc ~/foo.nc ncbo -O -C --mdl -p ${DATA}/nco_bm gcm_T85.nc gcm_T85.nc ~/foo.nc */ /* Assign remaining input for DDRA diagnostics */ ddra_info.lmn_nbr=var_prc_1[idx]->sz; /* [nbr] Variable size */ ddra_info.nco_op_typ=nco_op_typ; /* [enm] Operation type */ ddra_info.rnk_var=var_prc_1[idx]->nbr_dim; /* I [nbr] Variable rank (in input file) */ ddra_info.var_idx=idx; /* [enm] Index */ ddra_info.wrd_sz=nco_typ_lng(var_prc_1[idx]->type); /* [B] Bytes per element */ /* DDRA diagnostics */ rcd+=nco_ddra /* [fnc] Count operations */ (var_prc_1[idx]->nm, /* I [sng] Variable name */ (char *)NULL, /* I [sng] Weight name */ &ddra_info); /* I [sct] DDRA information */ } /* !flg_ddra */ #ifdef ENABLE_MPI /* Close output file and increment written counter */ nco_close(out_id); var_wrt_nbr++; } /* endif tkn_wrt_rqs_xcp */ } /* end else !idx_all_wrk_ass */ } /* end while loop requesting work/token */ } /* endif Worker */ #else /* !ENABLE_MPI */ } /* end (OpenMP parallel for) loop over idx */ #endif /* !ENABLE_MPI */ if(nco_dbg_lvl >= nco_dbg_fl) (void)fprintf(stderr,"\n"); /* Close input netCDF files */ for(thr_idx=0;thr_idx<thr_nbr;thr_idx++) nco_close(in_id_1_arr[thr_idx]); for(thr_idx=0;thr_idx<thr_nbr;thr_idx++) nco_close(in_id_2_arr[thr_idx]); #ifdef ENABLE_MPI /* Manager moves output file (closed by workers) from temporary to permanent location */ if(prc_rnk == rnk_mgr) (void)nco_fl_mv(fl_out_tmp,fl_out); #else /* !ENABLE_MPI */ /* Close output file and move it from temporary to permanent location */ (void)nco_fl_out_cls(fl_out,fl_out_tmp,out_id); #endif /* end !ENABLE_MPI */ /* Remove local copy of file */ if(FILE_1_RETRIEVED_FROM_REMOTE_LOCATION && RM_RMT_FL_PST_PRC) (void)nco_fl_rm(fl_in_1); if(FILE_2_RETRIEVED_FROM_REMOTE_LOCATION && RM_RMT_FL_PST_PRC) (void)nco_fl_rm(fl_in_2); /* Clean memory unless dirty memory allowed */ if(flg_mmr_cln){ /* ncbo-specific memory */ if(fl_in_1) fl_in_1=(char *)nco_free(fl_in_1); if(fl_in_2) fl_in_2=(char *)nco_free(fl_in_2); /* NCO-generic clean-up */ /* Free individual strings/arrays */ for(idx=0;idx<nbr_dmn_fl_1;idx++) for(jdx=0;jdx<lmt_all_lst[idx]->lmt_dmn_nbr;jdx++) lmt_all_lst[idx]->lmt_dmn[jdx]=nco_lmt_free(lmt_all_lst[idx]->lmt_dmn[jdx]); if(nbr_dmn_fl_1 > 0) lmt_all_lst=nco_lmt_all_lst_free(lmt_all_lst,nbr_dmn_fl_1); lmt=(lmt_sct**)nco_free(lmt); if(cmd_ln) cmd_ln=(char *)nco_free(cmd_ln); if(cnk_map_sng) cnk_map_sng=(char *)nco_free(cnk_map_sng); if(cnk_plc_sng) cnk_plc_sng=(char *)nco_free(cnk_plc_sng); if(fl_out) fl_out=(char *)nco_free(fl_out); if(fl_out_tmp) fl_out_tmp=(char *)nco_free(fl_out_tmp); if(fl_pth) fl_pth=(char *)nco_free(fl_pth); if(fl_pth_lcl) fl_pth_lcl=(char *)nco_free(fl_pth_lcl); if(in_id_1_arr) in_id_1_arr=(int *)nco_free(in_id_1_arr); if(in_id_2_arr) in_id_2_arr=(int *)nco_free(in_id_2_arr); /* Free lists of strings */ if(fl_lst_in && fl_lst_abb == NULL) fl_lst_in=nco_sng_lst_free(fl_lst_in,fl_nbr); if(fl_lst_in && fl_lst_abb) fl_lst_in=nco_sng_lst_free(fl_lst_in,1); if(fl_lst_abb) fl_lst_abb=nco_sng_lst_free(fl_lst_abb,abb_arg_nbr); if(gaa_nbr > 0) gaa_arg=nco_sng_lst_free(gaa_arg,gaa_nbr); if(var_lst_in_nbr > 0) var_lst_in=nco_sng_lst_free(var_lst_in,var_lst_in_nbr); /* Free limits */ for(idx=0;idx<lmt_nbr;idx++) lmt_arg[idx]=(char *)nco_free(lmt_arg[idx]); for(idx=0;idx<aux_nbr;idx++) aux_arg[idx]=(char *)nco_free(aux_arg[idx]); if(aux_nbr > 0) aux=(lmt_sct **)nco_free(aux); /* Free chunking information */ for(idx=0;idx<cnk_nbr;idx++) cnk_arg[idx]=(char *)nco_free(cnk_arg[idx]); if(cnk_nbr > 0) cnk_dmn=nco_cnk_lst_free(cnk_dmn,cnk_nbr); /* Free dimension lists */ if(nbr_dmn_xtr_1 > 0) dim_1=nco_dmn_lst_free(dim_1,nbr_dmn_xtr_1); if(nbr_dmn_xtr_2 > 0) dim_2=nco_dmn_lst_free(dim_2,nbr_dmn_xtr_2); if(nbr_dmn_xtr_1 > 0) dmn_out=nco_dmn_lst_free(dmn_out,nbr_dmn_xtr_1); /* Free variable lists Using nco_var_lst_free() to free main var_1 and var_2 lists would fail if ncap_var_prc_dmn() had to broadcast any variables because pointer var_1 and var_2 still contain dangling pointer to old variable. Hence, use nco_var_lst_free() to free prc and fix lists and use nco_free() to free main var_1 and var_2 lists. Dangling pointers in var_1 and var_2 are unsafe: fxm TODO 578 */ if(nbr_var_prc_1 > 0) var_prc_1=nco_var_lst_free(var_prc_1,nbr_var_prc_1); if(nbr_var_fix_1 > 0) var_fix_1=nco_var_lst_free(var_fix_1,nbr_var_fix_1); if(nbr_var_prc_2 > 0) var_prc_2=nco_var_lst_free(var_prc_2,nbr_var_prc_2); if(nbr_var_fix_2 > 0) var_fix_2=nco_var_lst_free(var_fix_2,nbr_var_fix_2); var_1=(var_sct **)nco_free(var_1); var_2=(var_sct **)nco_free(var_2); if(xtr_nbr_1 > 0) var_out=nco_var_lst_free(var_out,xtr_nbr_1); var_prc_out=(var_sct **)nco_free(var_prc_out); var_fix_out=(var_sct **)nco_free(var_fix_out); } /* !flg_mmr_cln */ #ifdef ENABLE_MPI MPI_Finalize(); #endif /* !ENABLE_MPI */ /* End timer */ ddra_info.tmr_flg=nco_tmr_end; /* [enm] Timer flag */ rcd+=nco_ddra((char *)NULL,(char *)NULL,&ddra_info); if(rcd != NC_NOERR) nco_err_exit(rcd,"main"); nco_exit_gracefully(); return EXIT_SUCCESS; } /* end main() */
GB_unop__log10_fc64_fc64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__log10_fc64_fc64) // op(A') function: GB (_unop_tran__log10_fc64_fc64) // C type: GxB_FC64_t // A type: GxB_FC64_t // cast: GxB_FC64_t cij = aij // unaryop: cij = GB_clog10 (aij) #define GB_ATYPE \ GxB_FC64_t #define GB_CTYPE \ GxB_FC64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_clog10 (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC64_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC64_t z = aij ; \ Cx [pC] = GB_clog10 (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LOG10 || GxB_NO_FC64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__log10_fc64_fc64) ( GxB_FC64_t *Cx, // Cx and Ax may be aliased const GxB_FC64_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = aij ; Cx [p] = GB_clog10 (z) ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = aij ; Cx [p] = GB_clog10 (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__log10_fc64_fc64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
1140.c
/* POLYBENCH/GPU-OPENMP * * This file is a part of the Polybench/GPU-OpenMP suite * * Contact: * William Killian <killian@udel.edu> * * Copyright 2013, The University of Delaware */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4000. */ #include "atax.h" /* Array initialization. */ static void init_array (int nx, int ny, DATA_TYPE POLYBENCH_2D(A,NX,NY,nx,ny), DATA_TYPE POLYBENCH_1D(x,NY,ny)) { int i, j; for (i = 0; i < ny; i++) x[i] = i * M_PI; for (i = 0; i < nx; i++) for (j = 0; j < ny; j++) A[i][j] = ((DATA_TYPE) i*(j+1)) / nx; } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int nx, DATA_TYPE POLYBENCH_1D(y,NX,nx)) { int i; for (i = 0; i < nx; i++) { fprintf (stderr, DATA_PRINTF_MODIFIER, y[i]); if (i % 20 == 0) fprintf (stderr, "\n"); } fprintf (stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_atax(int nx, int ny, DATA_TYPE POLYBENCH_2D(A,NX,NY,nx,ny), DATA_TYPE POLYBENCH_1D(x,NY,ny), DATA_TYPE POLYBENCH_1D(y,NY,ny), DATA_TYPE POLYBENCH_1D(tmp,NX,nx)) { int i, j; #pragma scop { #pragma omp parallel for num_threads(2) for (i = 0; i < _PB_NY; i++) { y[i] = 0; } #pragma omp parallel for num_threads(2) for (i = 0; i < _PB_NX; i++) { tmp[i] = 0; for (j = 0; j < _PB_NY; j++) tmp[i] = tmp[i] + A[i][j] * x[j]; for (j = 0; j < _PB_NY; j++) y[j] = y[j] + A[i][j] * tmp[i]; } } #pragma endscop } int main(int argc, char** argv) { /* Retrieve problem size. */ int nx = NX; int ny = NY; /* Variable declaration/allocation. */ POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NX, NY, nx, ny); POLYBENCH_1D_ARRAY_DECL(x, DATA_TYPE, NY, ny); POLYBENCH_1D_ARRAY_DECL(y, DATA_TYPE, NY, ny); POLYBENCH_1D_ARRAY_DECL(tmp, DATA_TYPE, NX, nx); /* Initialize array(s). */ init_array (nx, ny, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(x)); /* Start timer. */ polybench_start_instruments; /* Run kernel. */ kernel_atax (nx, ny, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(x), POLYBENCH_ARRAY(y), POLYBENCH_ARRAY(tmp)); /* Stop and print timer. */ polybench_stop_instruments; polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(nx, POLYBENCH_ARRAY(y))); /* Be clean. */ POLYBENCH_FREE_ARRAY(A); POLYBENCH_FREE_ARRAY(x); POLYBENCH_FREE_ARRAY(y); POLYBENCH_FREE_ARRAY(tmp); return 0; }
LevMarFitting.h
#ifndef LEVMARFITTING_HEADER #define LEVMARFITTING_HEADER #include <algorithm> #include <iostream> #include "LevMarFunc.h" #ifdef DOPARALLEL #include <omp.h> #endif template< class ScalarT > bool Cholesky(ScalarT *a, size_t n, ScalarT p[]) /*Given a positive-definite symmetric matrix a[1..n][1..n], this routine constructs its Cholesky decomposition, A = L ?LT . On input, only the upper triangle of a need be given; it is not modified. The Cholesky factor L is returned in the lower triangle of a, except for its diagonal elements which are returned in p[1..n].*/ { size_t i, j, k; ScalarT sum; for(i = 0; i < n; ++i) { for(j = i; j < n; ++j) { for(sum = a[i * n + j], k = i - 1; k != -1; --k) sum -= a[i * n + k] * a[j * n + k]; if(i == j) { if(sum <= ScalarT(0)) // a, with rounding errors, is not positive definite. return false; p[i] = std::sqrt(sum); } else a[j * n + i]= sum / p[i]; } } return true; } template< class ScalarT, unsigned int N > bool Cholesky(ScalarT *a, ScalarT p[]) /*Given a positive-definite symmetric matrix a[1..n][1..n], this routine constructs its Cholesky decomposition, A = L ?LT . On input, only the upper triangle of a need be given; it is not modified. The Cholesky factor L is returned in the lower triangle of a, except for its diagonal elements which are returned in p[1..n].*/ { size_t i, j, k; ScalarT sum; for(i = 0; i < N; ++i) { for(j = i; j < N; ++j) { for(sum = a[i * N + j], k = i - 1; k != -1; --k) sum -= a[i * N + k] * a[j * N + k]; if(i == j) { if(sum <= ScalarT(0)) // a, with rounding errors, is not positive definite. return false; p[i] = std::sqrt(sum); } else a[j * N + i]= sum / p[i]; } } return true; } template< class ScalarT > void CholeskySolve(ScalarT *a, size_t n, ScalarT p[], ScalarT b[], ScalarT x[]) /*Solves the set of n linear equations A ?x = b, where a is a positive-definite symmetric matrix. a[1..n][1..n] and p[1..n] are input as the output of the routine choldc. Only the lower subdiagonal portion of a is accessed. b[1..n] is input as the right-hand side vector. The solution vector is returned in x[1..n]. a, n, and p are not modified and can be left in place for successive calls with different right-hand sides b. b is not modified unless you identify b and x in the calling sequence, which is allowed.*/ { size_t i, k; ScalarT sum; for(i = 0; i < n; i++) { // Solve L ?y = b, storing y in x. for(sum = b[i], k = i-1; k != -1; --k) sum -= a[i * n + k] * x[k]; x[i] = sum / p[i]; } for(i = n - 1; i != -1; --i) { // Solve LT ?x = y. for(sum = x[i], k = i + 1; k < n; ++k) sum -= a[k * n + i] * x[k]; x[i]= sum / p[i]; } } template< class ScalarT, unsigned int N > void CholeskySolve(ScalarT *a, ScalarT p[], ScalarT b[], ScalarT x[]) /*Solves the set of n linear equations A ?x = b, where a is a positive-definite symmetric matrix. a[1..n][1..n] and p[1..n] are input as the output of the routine choldc. Only the lower subdiagonal portion of a is accessed. b[1..n] is input as the right-hand side vector. The solution vector is returned in x[1..n]. a, n, and p are not modified and can be left in place for successive calls with different right-hand sides b. b is not modified unless you identify b and x in the calling sequence, which is allowed.*/ { size_t i, k; ScalarT sum; for(i = 0; i < N; i++) { // Solve L ?y = b, storing y in x. for(sum = b[i], k = i-1; k != -1; --k) sum -= a[i * N + k] * x[k]; x[i] = sum / p[i]; } for(i = N - 1; i != -1; --i) { // Solve LT ?x = y. for(sum = x[i], k = i + 1; k < N; ++k) sum -= a[k * N + i] * x[k]; x[i]= sum / p[i]; } } template< class IteratorT, class FuncT > bool LevMar(IteratorT begin, IteratorT end, FuncT &func, typename FuncT::ScalarType *param) { typedef typename FuncT::ScalarType ScalarType; enum { paramDim = FuncT::NumParams }; bool retVal = true; unsigned int totalSize = end - begin; if(!totalSize) return false; ScalarType lambda = ScalarType(0.0001); ScalarType *F0 = new ScalarType[totalSize * paramDim]; ScalarType *U = new ScalarType[paramDim * paramDim]; ScalarType *H = new ScalarType[paramDim * paramDim]; ScalarType *v = new ScalarType[paramDim]; ScalarType *d = new ScalarType[totalSize]; ScalarType *temp = new ScalarType[totalSize]; ScalarType *x = new ScalarType[paramDim]; ScalarType *p = new ScalarType[paramDim]; ScalarType *paramNew = new ScalarType[paramDim]; size_t nu = 2; func.Normalize(param); ScalarType paramNorm = 0; // do fitting in different steps unsigned int subsets = std::max(int(std::floor(std::log((float)totalSize)/std::log(2.f)))-8, 1); #ifdef PRECISIONLEVMAR subsets = 1; #endif MiscLib::Vector< unsigned int > subsetSizes(subsets); for(unsigned int i = subsetSizes.size(); i;) { --i; subsetSizes[i] = totalSize; if(i) subsetSizes[i] = subsetSizes[i] >> 1; totalSize -= subsetSizes[i]; } unsigned int curSubset = 0; unsigned int size = 0; // get current error ScalarType chi = 0, newChi = 0; ScalarType rho = 1; unsigned int outerIter = 0, #ifndef PRECISIONLEVMAR maxOuterIter = 200 / subsetSizes.size(), #else maxOuterIter = 500, #endif usefulIter = 0, totalIter = 0;; do { // get current error size += subsetSizes[curSubset]; newChi = func.Chi(param, begin, begin + size, d, temp); for(unsigned int i = 0; i < paramDim; ++i) paramNew[i] = param[i]; outerIter = 0; if(rho < 0) rho = 1; do { ++outerIter; ++totalIter; if(rho > 0) { nu = 2; chi = newChi; for(size_t i = 0; i < paramDim; ++i) param[i] = paramNew[i]; #ifndef PRECISIONLEVMAR if(std::sqrt(chi / size) < ScalarType(1e-5)) // chi very small? -> will be hard to improve { //std::cout << "LevMar converged because of small chi" << std::endl; break; } #endif paramNorm = 0; for(size_t i = 0; i < paramDim; ++i) paramNorm += param[i] * param[i]; paramNorm = std::sqrt(paramNorm); // construct the needed matrices // F0 is the matrix constructed from param // F0 has gradient_i(param) as its ith row func.Derivatives(param, begin, begin + size, d, temp, F0); // U = F0_t * F0 // v = F0_t * d(param) (d(param) = [d_i(param)]) #ifdef DOPARALLEL #pragma omp parallel for #endif for(int i = 0; i < paramDim; ++i) { for(size_t j = i; j < paramDim; ++j) // j = i since only upper triangle is needed { U[i * paramDim + j] = 0; for(size_t k = 0; k < size; ++k) { U[i * paramDim + j] += F0[k * paramDim + i] * F0[k * paramDim + j]; } } } ScalarType vmag = 0; // magnitude of v #ifdef DOPARALLEL #pragma omp parallel for #endif for(int i = 0; i < paramDim; ++i) { v[i] = 0; for(size_t k = 0; k < size; ++k) v[i] += F0[k * paramDim + i] * d[k]; v[i] *= -1; #ifndef DOPARALLEL vmag = std::max((ScalarType)fabs(v[i]), vmag); #endif } #ifdef DOPARALLEL for(unsigned int i = 0; i < paramDim; ++i) vmag = std::max(fabs(v[i]), vmag); #endif // and check for convergence with magnitude of v #ifndef PRECISIONLEVMAR if(vmag < ScalarType(1.0e-6)) #else if(vmag < ScalarType(1e-8)) #endif { //std::cout << "LevMar converged with small gradient" << std::endl; //retVal = chi < initialChi; //goto cleanup; break; } if(outerIter == 1) { // compute magnitue of F0 ScalarType fmag = fabs(F0[0]); for(size_t i = 1; i < paramDim * size; ++i) if(fmag < fabs(F0[i])) fmag = fabs(F0[i]); lambda = 1e-3f * fmag; } else lambda *= std::max( ScalarType(0.3), 1 - ScalarType(std::pow(2 * rho - 1, 3)) ); } memcpy(H, U, sizeof(ScalarType) * paramDim * paramDim); for(size_t i = 0; i < paramDim; ++i) H[i * paramDim + i] += lambda; // * (ScalarType(1) + H[i * paramDim + i]); // now H is positive definite and symmetric // solve Hx = -v with Cholesky ScalarType xNorm = 0, L = 0; if(!Cholesky< ScalarType, paramDim >(H, p)) goto increment; CholeskySolve< ScalarType, paramDim >(H, p, v, x); // magnitude of x small? If yes we are done for(size_t i = 0; i < paramDim; ++i) xNorm += x[i] * x[i]; xNorm = std::sqrt(xNorm); #ifndef PRECISIONLEVMAR if(xNorm <= ScalarType(1.0e-6) * (paramNorm + ScalarType(1.0e-6))) #else if(xNorm <= ScalarType(1e-8) * (paramNorm + ScalarType(1e-8))) #endif { //std::cout << "LevMar converged with small step" << std::endl; //goto cleanup; break; } for(size_t i = 0; i < paramDim; ++i) paramNew[i] = param[i] + x[i]; func.Normalize(paramNew); // get new error newChi = func.Chi(paramNew, begin, begin + size, d, temp); // the following test is taken from // "Methods for non-linear least squares problems" // by Madsen, Nielsen, Tingleff L = 0; for(size_t i = 0; i < paramDim; ++i) L += .5f * x[i] * (lambda * x[i] + v[i]); rho = (chi - newChi) / L; if(rho > 0) { ++usefulIter; #ifndef PRECISIONLEVMAR if((chi - newChi) < 1e-4 * chi) { //std::cout << "LevMar converged with small chi difference" << std::endl; chi = newChi; for(size_t i = 0; i < paramDim; ++i) param[i] = paramNew[i]; break; } #endif continue; } increment: rho = -1; // increment lambda lambda = nu * lambda; size_t nu2 = nu << 1; if(nu2 < nu) nu2 = 2; nu = nu2; } while(outerIter < maxOuterIter); ++curSubset; } while(curSubset < subsetSizes.size()); retVal = usefulIter > 0; delete[] F0; delete[] U; delete[] H ; delete[] v; delete[] d; delete[] temp; delete[] x; delete[] p; delete[] paramNew; return retVal; } template< class ScalarT > ScalarT LevMar(unsigned int paramDim, unsigned int imgDim, const LevMarFunc< ScalarT > **funcs, ScalarT *param) { ScalarT retVal = -1; size_t size = imgDim; float lambda = ScalarT(0.0001); ScalarT *F0 = new ScalarT[size * paramDim]; ScalarT *U = new ScalarT[paramDim * paramDim]; ScalarT *H = new ScalarT[paramDim * paramDim]; ScalarT *v = new ScalarT[paramDim]; ScalarT *d = new ScalarT[size]; ScalarT *dNew = new ScalarT[size]; ScalarT *x = new ScalarT[paramDim]; ScalarT *p = new ScalarT[paramDim]; ScalarT *paramNew = new ScalarT[paramDim]; size_t outerIter = 0, maxOuterIter = 10; // get current error ScalarT chi = 0, newChi; for(size_t i = 0; i < size; ++i) { d[i] = (*(funcs[i]))(param); chi += d[i] * d[i]; } do { ++outerIter; lambda *= ScalarT(0.04); // construct the needed matrices // F0 is the matrix constructed from param // F0 has gradient_i(param) as its ith row for(size_t i = 0; i < size; ++i) (*(funcs[i]))(param, &F0[i * paramDim]); // U = F0_t * F0 for(size_t i = 0; i < paramDim; ++i) for(size_t j = i; j < paramDim; ++j) // j = i since only upper triangle is needed { U[i * paramDim + j] = 0; for(size_t k = 0; k < size; ++k) U[i * paramDim + j] += F0[k * paramDim + i] * F0[k * paramDim + j]; } // v = F_t * d(param) (d(param) = [d_i(param)]) for(size_t i = 0; i < paramDim; ++i) { v[i] = 0; for(size_t k = 0; k < size; ++k) v[i] += F0[k * paramDim + i] * d[k]; v[i] *= -1; } size_t iter = 0, maxIter = 10; do { ++iter; // increment lambda lambda = 10 * lambda; memcpy(H, U, sizeof(ScalarT) * paramDim * paramDim); for(size_t i = 0; i < paramDim; ++i) H[i * paramDim + i] += lambda * (ScalarT(1) + H[i * paramDim + i]); // now H is positive definite and symmetric // solve Hx = -v with Cholesky if(!Cholesky(H, paramDim, p)) goto cleanup; CholeskySolve(H, paramDim, p, v, x); for(size_t i = 0; i < paramDim; ++i) paramNew[i] = param[i] + x[i]; // get new error newChi = 0; for(size_t i = 0; i < size; ++i) { dNew[i] = (*(funcs[i]))(paramNew); newChi += dNew[i] * dNew[i]; } // check for convergence /*float cvgTest = 0; for(size_t i = 0; i < paramDim; ++i) { //float c = param[i] - paramNew[i]; cvgTest += x[i] * x[i]; } if(std::sqrt(cvgTest) < 1.0e-6) { for(size_t i = 0; i < paramDim; ++i) param[i] = paramNew[i]; goto cleanup; }*/ if(/*newChi <= chi &&*/ fabs(chi - newChi) / chi < ScalarT(1e-4)) { for(size_t i = 0; i < paramDim; ++i) param[i] = paramNew[i]; retVal = newChi; goto cleanup; } } while(newChi > chi && iter < maxIter); if(newChi < chi) { chi = newChi; for(size_t i = 0; i < paramDim; ++i) param[i] = paramNew[i]; std::swap(d, dNew); } } while(outerIter < maxOuterIter); cleanup: delete[] F0; delete[] U; delete[] H ; delete[] v; delete[] d; delete[] dNew; delete[] x; delete[] p; delete[] paramNew; return retVal; } #endif
GB_binop__bxor_int32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__bxor_int32) // A.*B function (eWiseMult): GB (_AemultB_08__bxor_int32) // A.*B function (eWiseMult): GB (_AemultB_02__bxor_int32) // A.*B function (eWiseMult): GB (_AemultB_04__bxor_int32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__bxor_int32) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__bxor_int32) // C+=b function (dense accum): GB (_Cdense_accumb__bxor_int32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bxor_int32) // C=scalar+B GB (_bind1st__bxor_int32) // C=scalar+B' GB (_bind1st_tran__bxor_int32) // C=A+scalar GB (_bind2nd__bxor_int32) // C=A'+scalar GB (_bind2nd_tran__bxor_int32) // C type: int32_t // A type: int32_t // A pattern? 0 // B type: int32_t // B pattern? 0 // BinaryOp: cij = (aij) ^ (bij) #define GB_ATYPE \ int32_t #define GB_BTYPE \ int32_t #define GB_CTYPE \ int32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int32_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int32_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x) ^ (y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BXOR || GxB_NO_INT32 || GxB_NO_BXOR_INT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__bxor_int32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__bxor_int32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__bxor_int32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int32_t int32_t bwork = (*((int32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *restrict Cx = (int32_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *restrict Cx = (int32_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__bxor_int32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int32_t alpha_scalar ; int32_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int32_t *) alpha_scalar_in)) ; beta_scalar = (*((int32_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__bxor_int32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__bxor_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__bxor_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__bxor_int32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__bxor_int32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *Cx = (int32_t *) Cx_output ; int32_t x = (*((int32_t *) x_input)) ; int32_t *Bx = (int32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int32_t bij = GBX (Bx, p, false) ; Cx [p] = (x) ^ (bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__bxor_int32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int32_t *Cx = (int32_t *) Cx_output ; int32_t *Ax = (int32_t *) Ax_input ; int32_t y = (*((int32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int32_t aij = GBX (Ax, p, false) ; Cx [p] = (aij) ^ (y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x) ^ (aij) ; \ } GrB_Info GB (_bind1st_tran__bxor_int32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t x = (*((const int32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij) ^ (y) ; \ } GrB_Info GB (_bind2nd_tran__bxor_int32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t y = (*((const int32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
tti-so8.c
#define _POSIX_C_SOURCE 200809L #include "stdlib.h" #include "math.h" #include "sys/time.h" #include "xmmintrin.h" #include "pmmintrin.h" #include "omp.h" struct dataobj { void *restrict data; int * size; int * npsize; int * dsize; int * hsize; int * hofs; int * oofs; } ; struct profiler { double section0; double section1; double section2; double section3; } ; void bf0(struct dataobj *restrict damp_vec, const float dt, struct dataobj *restrict epsilon_vec, float *restrict r49_vec, float *restrict r50_vec, float *restrict r51_vec, float *restrict r52_vec, float *restrict r53_vec, struct dataobj *restrict u_vec, struct dataobj *restrict v_vec, struct dataobj *restrict vp_vec, const int x0_blk0_size, const int x_size, const int y0_blk0_size, const int y_size, const int z_size, const int t0, const int t1, const int t2, const int x_M, const int x_m, const int y_M, const int y_m, const int z_M, const int z_m, const int nthreads); int ForwardTTI(struct dataobj *restrict damp_vec, struct dataobj *restrict delta_vec, const float dt, struct dataobj *restrict epsilon_vec, const float o_x, const float o_y, const float o_z, struct dataobj *restrict phi_vec, struct dataobj *restrict rec_vec, struct dataobj *restrict rec_coords_vec, struct dataobj *restrict src_vec, struct dataobj *restrict src_coords_vec, struct dataobj *restrict theta_vec, struct dataobj *restrict u_vec, struct dataobj *restrict v_vec, struct dataobj *restrict vp_vec, const int x0_blk0_size, const int x_M, const int x_m, const int x_size, const int y0_blk0_size, const int y_M, const int y_m, const int y_size, const int z_M, const int z_m, const int z_size, const int p_rec_M, const int p_rec_m, const int p_src_M, const int p_src_m, const int time_M, const int time_m, struct profiler * timers, const int nthreads, const int nthreads_nonaffine) { float (*restrict damp)[damp_vec->size[1]][damp_vec->size[2]] __attribute__ ((aligned (64))) = (float (*)[damp_vec->size[1]][damp_vec->size[2]]) damp_vec->data; float (*restrict delta)[delta_vec->size[1]][delta_vec->size[2]] __attribute__ ((aligned (64))) = (float (*)[delta_vec->size[1]][delta_vec->size[2]]) delta_vec->data; float (*restrict epsilon)[epsilon_vec->size[1]][epsilon_vec->size[2]] __attribute__ ((aligned (64))) = (float (*)[epsilon_vec->size[1]][epsilon_vec->size[2]]) epsilon_vec->data; float (*restrict phi)[phi_vec->size[1]][phi_vec->size[2]] __attribute__ ((aligned (64))) = (float (*)[phi_vec->size[1]][phi_vec->size[2]]) phi_vec->data; float (*restrict rec)[rec_vec->size[1]] __attribute__ ((aligned (64))) = (float (*)[rec_vec->size[1]]) rec_vec->data; float (*restrict rec_coords)[rec_coords_vec->size[1]] __attribute__ ((aligned (64))) = (float (*)[rec_coords_vec->size[1]]) rec_coords_vec->data; float (*restrict src)[src_vec->size[1]] __attribute__ ((aligned (64))) = (float (*)[src_vec->size[1]]) src_vec->data; float (*restrict src_coords)[src_coords_vec->size[1]] __attribute__ ((aligned (64))) = (float (*)[src_coords_vec->size[1]]) src_coords_vec->data; float (*restrict theta)[theta_vec->size[1]][theta_vec->size[2]] __attribute__ ((aligned (64))) = (float (*)[theta_vec->size[1]][theta_vec->size[2]]) theta_vec->data; float (*restrict u)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]] __attribute__ ((aligned (64))) = (float (*)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]]) u_vec->data; float (*restrict v)[v_vec->size[1]][v_vec->size[2]][v_vec->size[3]] __attribute__ ((aligned (64))) = (float (*)[v_vec->size[1]][v_vec->size[2]][v_vec->size[3]]) v_vec->data; float (*restrict vp)[vp_vec->size[1]][vp_vec->size[2]] __attribute__ ((aligned (64))) = (float (*)[vp_vec->size[1]][vp_vec->size[2]]) vp_vec->data; float (*r53)[y_size + 2 + 2][z_size + 2 + 2]; posix_memalign((void**)&r53, 64, sizeof(float[x_size + 2 + 2][y_size + 2 + 2][z_size + 2 + 2])); float (*r52)[y_size + 2 + 2][z_size + 2 + 2]; posix_memalign((void**)&r52, 64, sizeof(float[x_size + 2 + 2][y_size + 2 + 2][z_size + 2 + 2])); float (*r51)[y_size + 2 + 2][z_size + 2 + 2]; posix_memalign((void**)&r51, 64, sizeof(float[x_size + 2 + 2][y_size + 2 + 2][z_size + 2 + 2])); float (*r50)[y_size + 2 + 2][z_size + 2 + 2]; posix_memalign((void**)&r50, 64, sizeof(float[x_size + 2 + 2][y_size + 2 + 2][z_size + 2 + 2])); float (*r49)[y_size + 2 + 2][z_size + 2 + 2]; posix_memalign((void**)&r49, 64, sizeof(float[x_size + 2 + 2][y_size + 2 + 2][z_size + 2 + 2])); /* Flush denormal numbers to zero in hardware */ _MM_SET_DENORMALS_ZERO_MODE(_MM_DENORMALS_ZERO_ON); _MM_SET_FLUSH_ZERO_MODE(_MM_FLUSH_ZERO_ON); struct timeval start_section0, end_section0; gettimeofday(&start_section0, NULL); /* Begin section0 */ #pragma omp parallel num_threads(nthreads) { #pragma omp for collapse(1) schedule(dynamic,1) for (int x = x_m - 2; x <= x_M + 2; x += 1) { for (int y = y_m - 2; y <= y_M + 2; y += 1) { #pragma omp simd aligned(delta,phi,theta:32) for (int z = z_m - 2; z <= z_M + 2; z += 1) { r53[x + 2][y + 2][z + 2] = sin(phi[x + 8][y + 8][z + 8]); r52[x + 2][y + 2][z + 2] = sin(theta[x + 8][y + 8][z + 8]); r51[x + 2][y + 2][z + 2] = cos(phi[x + 8][y + 8][z + 8]); r50[x + 2][y + 2][z + 2] = cos(theta[x + 8][y + 8][z + 8]); r49[x + 2][y + 2][z + 2] = sqrt(2*delta[x + 8][y + 8][z + 8] + 1); } } } } /* End section0 */ gettimeofday(&end_section0, NULL); timers->section0 += (double)(end_section0.tv_sec-start_section0.tv_sec)+(double)(end_section0.tv_usec-start_section0.tv_usec)/1000000; for (int time = time_m, t0 = (time)%(3), t1 = (time + 1)%(3), t2 = (time + 2)%(3); time <= time_M; time += 1, t0 = (time)%(3), t1 = (time + 1)%(3), t2 = (time + 2)%(3)) { struct timeval start_section1, end_section1; gettimeofday(&start_section1, NULL); /* Begin section1 */ bf0(damp_vec,dt,epsilon_vec,(float *)r49,(float *)r50,(float *)r51,(float *)r52,(float *)r53,u_vec,v_vec,vp_vec,x0_blk0_size,x_size,y0_blk0_size,y_size,z_size,t0,t1,t2,x_M - (x_M - x_m + 1)%(x0_blk0_size),x_m,y_M - (y_M - y_m + 1)%(y0_blk0_size),y_m,z_M,z_m,nthreads); bf0(damp_vec,dt,epsilon_vec,(float *)r49,(float *)r50,(float *)r51,(float *)r52,(float *)r53,u_vec,v_vec,vp_vec,x0_blk0_size,x_size,(y_M - y_m + 1)%(y0_blk0_size),y_size,z_size,t0,t1,t2,x_M - (x_M - x_m + 1)%(x0_blk0_size),x_m,y_M,y_M - (y_M - y_m + 1)%(y0_blk0_size) + 1,z_M,z_m,nthreads); bf0(damp_vec,dt,epsilon_vec,(float *)r49,(float *)r50,(float *)r51,(float *)r52,(float *)r53,u_vec,v_vec,vp_vec,(x_M - x_m + 1)%(x0_blk0_size),x_size,y0_blk0_size,y_size,z_size,t0,t1,t2,x_M,x_M - (x_M - x_m + 1)%(x0_blk0_size) + 1,y_M - (y_M - y_m + 1)%(y0_blk0_size),y_m,z_M,z_m,nthreads); bf0(damp_vec,dt,epsilon_vec,(float *)r49,(float *)r50,(float *)r51,(float *)r52,(float *)r53,u_vec,v_vec,vp_vec,(x_M - x_m + 1)%(x0_blk0_size),x_size,(y_M - y_m + 1)%(y0_blk0_size),y_size,z_size,t0,t1,t2,x_M,x_M - (x_M - x_m + 1)%(x0_blk0_size) + 1,y_M,y_M - (y_M - y_m + 1)%(y0_blk0_size) + 1,z_M,z_m,nthreads); /* End section1 */ gettimeofday(&end_section1, NULL); timers->section1 += (double)(end_section1.tv_sec-start_section1.tv_sec)+(double)(end_section1.tv_usec-start_section1.tv_usec)/1000000; struct timeval start_section2, end_section2; gettimeofday(&start_section2, NULL); /* Begin section2 */ #pragma omp parallel num_threads(nthreads_nonaffine) { int chunk_size = (int)(fmax(1, (1.0F/3.0F)*(p_src_M - p_src_m + 1)/nthreads_nonaffine)); #pragma omp for collapse(1) schedule(dynamic,chunk_size) for (int p_src = p_src_m; p_src <= p_src_M; p_src += 1) { int ii_src_0 = (int)(floor(-1.0e-1*o_x + 1.0e-1*src_coords[p_src][0])); int ii_src_1 = (int)(floor(-1.0e-1*o_y + 1.0e-1*src_coords[p_src][1])); int ii_src_2 = (int)(floor(-1.0e-1*o_z + 1.0e-1*src_coords[p_src][2])); int ii_src_3 = (int)(floor(-1.0e-1*o_z + 1.0e-1*src_coords[p_src][2])) + 1; int ii_src_4 = (int)(floor(-1.0e-1*o_y + 1.0e-1*src_coords[p_src][1])) + 1; int ii_src_5 = (int)(floor(-1.0e-1*o_x + 1.0e-1*src_coords[p_src][0])) + 1; float px = (float)(-o_x - 1.0e+1F*(int)(floor(-1.0e-1F*o_x + 1.0e-1F*src_coords[p_src][0])) + src_coords[p_src][0]); float py = (float)(-o_y - 1.0e+1F*(int)(floor(-1.0e-1F*o_y + 1.0e-1F*src_coords[p_src][1])) + src_coords[p_src][1]); float pz = (float)(-o_z - 1.0e+1F*(int)(floor(-1.0e-1F*o_z + 1.0e-1F*src_coords[p_src][2])) + src_coords[p_src][2]); if (ii_src_0 >= x_m - 1 && ii_src_1 >= y_m - 1 && ii_src_2 >= z_m - 1 && ii_src_0 <= x_M + 1 && ii_src_1 <= y_M + 1 && ii_src_2 <= z_M + 1) { float r54 = (dt*dt)*(vp[ii_src_0 + 8][ii_src_1 + 8][ii_src_2 + 8]*vp[ii_src_0 + 8][ii_src_1 + 8][ii_src_2 + 8])*(-1.0e-3F*px*py*pz + 1.0e-2F*px*py + 1.0e-2F*px*pz - 1.0e-1F*px + 1.0e-2F*py*pz - 1.0e-1F*py - 1.0e-1F*pz + 1)*src[time][p_src]; #pragma omp atomic update u[t1][ii_src_0 + 8][ii_src_1 + 8][ii_src_2 + 8] += r54; } if (ii_src_0 >= x_m - 1 && ii_src_1 >= y_m - 1 && ii_src_3 >= z_m - 1 && ii_src_0 <= x_M + 1 && ii_src_1 <= y_M + 1 && ii_src_3 <= z_M + 1) { float r55 = (dt*dt)*(vp[ii_src_0 + 8][ii_src_1 + 8][ii_src_3 + 8]*vp[ii_src_0 + 8][ii_src_1 + 8][ii_src_3 + 8])*(1.0e-3F*px*py*pz - 1.0e-2F*px*pz - 1.0e-2F*py*pz + 1.0e-1F*pz)*src[time][p_src]; #pragma omp atomic update u[t1][ii_src_0 + 8][ii_src_1 + 8][ii_src_3 + 8] += r55; } if (ii_src_0 >= x_m - 1 && ii_src_2 >= z_m - 1 && ii_src_4 >= y_m - 1 && ii_src_0 <= x_M + 1 && ii_src_2 <= z_M + 1 && ii_src_4 <= y_M + 1) { float r56 = (dt*dt)*(vp[ii_src_0 + 8][ii_src_4 + 8][ii_src_2 + 8]*vp[ii_src_0 + 8][ii_src_4 + 8][ii_src_2 + 8])*(1.0e-3F*px*py*pz - 1.0e-2F*px*py - 1.0e-2F*py*pz + 1.0e-1F*py)*src[time][p_src]; #pragma omp atomic update u[t1][ii_src_0 + 8][ii_src_4 + 8][ii_src_2 + 8] += r56; } if (ii_src_0 >= x_m - 1 && ii_src_3 >= z_m - 1 && ii_src_4 >= y_m - 1 && ii_src_0 <= x_M + 1 && ii_src_3 <= z_M + 1 && ii_src_4 <= y_M + 1) { float r57 = (dt*dt)*(vp[ii_src_0 + 8][ii_src_4 + 8][ii_src_3 + 8]*vp[ii_src_0 + 8][ii_src_4 + 8][ii_src_3 + 8])*(-1.0e-3F*px*py*pz + 1.0e-2F*py*pz)*src[time][p_src]; #pragma omp atomic update u[t1][ii_src_0 + 8][ii_src_4 + 8][ii_src_3 + 8] += r57; } if (ii_src_1 >= y_m - 1 && ii_src_2 >= z_m - 1 && ii_src_5 >= x_m - 1 && ii_src_1 <= y_M + 1 && ii_src_2 <= z_M + 1 && ii_src_5 <= x_M + 1) { float r58 = (dt*dt)*(vp[ii_src_5 + 8][ii_src_1 + 8][ii_src_2 + 8]*vp[ii_src_5 + 8][ii_src_1 + 8][ii_src_2 + 8])*(1.0e-3F*px*py*pz - 1.0e-2F*px*py - 1.0e-2F*px*pz + 1.0e-1F*px)*src[time][p_src]; #pragma omp atomic update u[t1][ii_src_5 + 8][ii_src_1 + 8][ii_src_2 + 8] += r58; } if (ii_src_1 >= y_m - 1 && ii_src_3 >= z_m - 1 && ii_src_5 >= x_m - 1 && ii_src_1 <= y_M + 1 && ii_src_3 <= z_M + 1 && ii_src_5 <= x_M + 1) { float r59 = (dt*dt)*(vp[ii_src_5 + 8][ii_src_1 + 8][ii_src_3 + 8]*vp[ii_src_5 + 8][ii_src_1 + 8][ii_src_3 + 8])*(-1.0e-3F*px*py*pz + 1.0e-2F*px*pz)*src[time][p_src]; #pragma omp atomic update u[t1][ii_src_5 + 8][ii_src_1 + 8][ii_src_3 + 8] += r59; } if (ii_src_2 >= z_m - 1 && ii_src_4 >= y_m - 1 && ii_src_5 >= x_m - 1 && ii_src_2 <= z_M + 1 && ii_src_4 <= y_M + 1 && ii_src_5 <= x_M + 1) { float r60 = (dt*dt)*(vp[ii_src_5 + 8][ii_src_4 + 8][ii_src_2 + 8]*vp[ii_src_5 + 8][ii_src_4 + 8][ii_src_2 + 8])*(-1.0e-3F*px*py*pz + 1.0e-2F*px*py)*src[time][p_src]; #pragma omp atomic update u[t1][ii_src_5 + 8][ii_src_4 + 8][ii_src_2 + 8] += r60; } if (ii_src_3 >= z_m - 1 && ii_src_4 >= y_m - 1 && ii_src_5 >= x_m - 1 && ii_src_3 <= z_M + 1 && ii_src_4 <= y_M + 1 && ii_src_5 <= x_M + 1) { float r61 = 1.0e-3F*px*py*pz*(dt*dt)*(vp[ii_src_5 + 8][ii_src_4 + 8][ii_src_3 + 8]*vp[ii_src_5 + 8][ii_src_4 + 8][ii_src_3 + 8])*src[time][p_src]; #pragma omp atomic update u[t1][ii_src_5 + 8][ii_src_4 + 8][ii_src_3 + 8] += r61; } ii_src_0 = (int)(floor(-1.0e-1*o_x + 1.0e-1*src_coords[p_src][0])); ii_src_1 = (int)(floor(-1.0e-1*o_y + 1.0e-1*src_coords[p_src][1])); ii_src_2 = (int)(floor(-1.0e-1*o_z + 1.0e-1*src_coords[p_src][2])); ii_src_3 = (int)(floor(-1.0e-1*o_z + 1.0e-1*src_coords[p_src][2])) + 1; ii_src_4 = (int)(floor(-1.0e-1*o_y + 1.0e-1*src_coords[p_src][1])) + 1; ii_src_5 = (int)(floor(-1.0e-1*o_x + 1.0e-1*src_coords[p_src][0])) + 1; px = (float)(-o_x - 1.0e+1F*(int)(floor(-1.0e-1F*o_x + 1.0e-1F*src_coords[p_src][0])) + src_coords[p_src][0]); py = (float)(-o_y - 1.0e+1F*(int)(floor(-1.0e-1F*o_y + 1.0e-1F*src_coords[p_src][1])) + src_coords[p_src][1]); pz = (float)(-o_z - 1.0e+1F*(int)(floor(-1.0e-1F*o_z + 1.0e-1F*src_coords[p_src][2])) + src_coords[p_src][2]); if (ii_src_0 >= x_m - 1 && ii_src_1 >= y_m - 1 && ii_src_2 >= z_m - 1 && ii_src_0 <= x_M + 1 && ii_src_1 <= y_M + 1 && ii_src_2 <= z_M + 1) { float r62 = (dt*dt)*(vp[ii_src_0 + 8][ii_src_1 + 8][ii_src_2 + 8]*vp[ii_src_0 + 8][ii_src_1 + 8][ii_src_2 + 8])*(-1.0e-3F*px*py*pz + 1.0e-2F*px*py + 1.0e-2F*px*pz - 1.0e-1F*px + 1.0e-2F*py*pz - 1.0e-1F*py - 1.0e-1F*pz + 1)*src[time][p_src]; #pragma omp atomic update v[t1][ii_src_0 + 8][ii_src_1 + 8][ii_src_2 + 8] += r62; } if (ii_src_0 >= x_m - 1 && ii_src_1 >= y_m - 1 && ii_src_3 >= z_m - 1 && ii_src_0 <= x_M + 1 && ii_src_1 <= y_M + 1 && ii_src_3 <= z_M + 1) { float r63 = (dt*dt)*(vp[ii_src_0 + 8][ii_src_1 + 8][ii_src_3 + 8]*vp[ii_src_0 + 8][ii_src_1 + 8][ii_src_3 + 8])*(1.0e-3F*px*py*pz - 1.0e-2F*px*pz - 1.0e-2F*py*pz + 1.0e-1F*pz)*src[time][p_src]; #pragma omp atomic update v[t1][ii_src_0 + 8][ii_src_1 + 8][ii_src_3 + 8] += r63; } if (ii_src_0 >= x_m - 1 && ii_src_2 >= z_m - 1 && ii_src_4 >= y_m - 1 && ii_src_0 <= x_M + 1 && ii_src_2 <= z_M + 1 && ii_src_4 <= y_M + 1) { float r64 = (dt*dt)*(vp[ii_src_0 + 8][ii_src_4 + 8][ii_src_2 + 8]*vp[ii_src_0 + 8][ii_src_4 + 8][ii_src_2 + 8])*(1.0e-3F*px*py*pz - 1.0e-2F*px*py - 1.0e-2F*py*pz + 1.0e-1F*py)*src[time][p_src]; #pragma omp atomic update v[t1][ii_src_0 + 8][ii_src_4 + 8][ii_src_2 + 8] += r64; } if (ii_src_0 >= x_m - 1 && ii_src_3 >= z_m - 1 && ii_src_4 >= y_m - 1 && ii_src_0 <= x_M + 1 && ii_src_3 <= z_M + 1 && ii_src_4 <= y_M + 1) { float r65 = (dt*dt)*(vp[ii_src_0 + 8][ii_src_4 + 8][ii_src_3 + 8]*vp[ii_src_0 + 8][ii_src_4 + 8][ii_src_3 + 8])*(-1.0e-3F*px*py*pz + 1.0e-2F*py*pz)*src[time][p_src]; #pragma omp atomic update v[t1][ii_src_0 + 8][ii_src_4 + 8][ii_src_3 + 8] += r65; } if (ii_src_1 >= y_m - 1 && ii_src_2 >= z_m - 1 && ii_src_5 >= x_m - 1 && ii_src_1 <= y_M + 1 && ii_src_2 <= z_M + 1 && ii_src_5 <= x_M + 1) { float r66 = (dt*dt)*(vp[ii_src_5 + 8][ii_src_1 + 8][ii_src_2 + 8]*vp[ii_src_5 + 8][ii_src_1 + 8][ii_src_2 + 8])*(1.0e-3F*px*py*pz - 1.0e-2F*px*py - 1.0e-2F*px*pz + 1.0e-1F*px)*src[time][p_src]; #pragma omp atomic update v[t1][ii_src_5 + 8][ii_src_1 + 8][ii_src_2 + 8] += r66; } if (ii_src_1 >= y_m - 1 && ii_src_3 >= z_m - 1 && ii_src_5 >= x_m - 1 && ii_src_1 <= y_M + 1 && ii_src_3 <= z_M + 1 && ii_src_5 <= x_M + 1) { float r67 = (dt*dt)*(vp[ii_src_5 + 8][ii_src_1 + 8][ii_src_3 + 8]*vp[ii_src_5 + 8][ii_src_1 + 8][ii_src_3 + 8])*(-1.0e-3F*px*py*pz + 1.0e-2F*px*pz)*src[time][p_src]; #pragma omp atomic update v[t1][ii_src_5 + 8][ii_src_1 + 8][ii_src_3 + 8] += r67; } if (ii_src_2 >= z_m - 1 && ii_src_4 >= y_m - 1 && ii_src_5 >= x_m - 1 && ii_src_2 <= z_M + 1 && ii_src_4 <= y_M + 1 && ii_src_5 <= x_M + 1) { float r68 = (dt*dt)*(vp[ii_src_5 + 8][ii_src_4 + 8][ii_src_2 + 8]*vp[ii_src_5 + 8][ii_src_4 + 8][ii_src_2 + 8])*(-1.0e-3F*px*py*pz + 1.0e-2F*px*py)*src[time][p_src]; #pragma omp atomic update v[t1][ii_src_5 + 8][ii_src_4 + 8][ii_src_2 + 8] += r68; } if (ii_src_3 >= z_m - 1 && ii_src_4 >= y_m - 1 && ii_src_5 >= x_m - 1 && ii_src_3 <= z_M + 1 && ii_src_4 <= y_M + 1 && ii_src_5 <= x_M + 1) { float r69 = 1.0e-3F*px*py*pz*(dt*dt)*(vp[ii_src_5 + 8][ii_src_4 + 8][ii_src_3 + 8]*vp[ii_src_5 + 8][ii_src_4 + 8][ii_src_3 + 8])*src[time][p_src]; #pragma omp atomic update v[t1][ii_src_5 + 8][ii_src_4 + 8][ii_src_3 + 8] += r69; } } } /* End section2 */ gettimeofday(&end_section2, NULL); timers->section2 += (double)(end_section2.tv_sec-start_section2.tv_sec)+(double)(end_section2.tv_usec-start_section2.tv_usec)/1000000; struct timeval start_section3, end_section3; gettimeofday(&start_section3, NULL); /* Begin section3 */ #pragma omp parallel num_threads(nthreads_nonaffine) { int chunk_size = (int)(fmax(1, (1.0F/3.0F)*(p_rec_M - p_rec_m + 1)/nthreads_nonaffine)); #pragma omp for collapse(1) schedule(dynamic,chunk_size) for (int p_rec = p_rec_m; p_rec <= p_rec_M; p_rec += 1) { int ii_rec_0 = (int)(floor(-1.0e-1*o_x + 1.0e-1*rec_coords[p_rec][0])); int ii_rec_1 = (int)(floor(-1.0e-1*o_y + 1.0e-1*rec_coords[p_rec][1])); int ii_rec_2 = (int)(floor(-1.0e-1*o_z + 1.0e-1*rec_coords[p_rec][2])); int ii_rec_3 = (int)(floor(-1.0e-1*o_z + 1.0e-1*rec_coords[p_rec][2])) + 1; int ii_rec_4 = (int)(floor(-1.0e-1*o_y + 1.0e-1*rec_coords[p_rec][1])) + 1; int ii_rec_5 = (int)(floor(-1.0e-1*o_x + 1.0e-1*rec_coords[p_rec][0])) + 1; float px = (float)(-o_x - 1.0e+1F*(int)(floor(-1.0e-1F*o_x + 1.0e-1F*rec_coords[p_rec][0])) + rec_coords[p_rec][0]); float py = (float)(-o_y - 1.0e+1F*(int)(floor(-1.0e-1F*o_y + 1.0e-1F*rec_coords[p_rec][1])) + rec_coords[p_rec][1]); float pz = (float)(-o_z - 1.0e+1F*(int)(floor(-1.0e-1F*o_z + 1.0e-1F*rec_coords[p_rec][2])) + rec_coords[p_rec][2]); float sum = 0.0F; if (ii_rec_0 >= x_m - 1 && ii_rec_1 >= y_m - 1 && ii_rec_2 >= z_m - 1 && ii_rec_0 <= x_M + 1 && ii_rec_1 <= y_M + 1 && ii_rec_2 <= z_M + 1) { sum += (u[t0][ii_rec_0 + 8][ii_rec_1 + 8][ii_rec_2 + 8] + v[t0][ii_rec_0 + 8][ii_rec_1 + 8][ii_rec_2 + 8])*(-1.0e-3F*px*py*pz + 1.0e-2F*px*py + 1.0e-2F*px*pz - 1.0e-1F*px + 1.0e-2F*py*pz - 1.0e-1F*py - 1.0e-1F*pz + 1); } if (ii_rec_0 >= x_m - 1 && ii_rec_1 >= y_m - 1 && ii_rec_3 >= z_m - 1 && ii_rec_0 <= x_M + 1 && ii_rec_1 <= y_M + 1 && ii_rec_3 <= z_M + 1) { sum += (u[t0][ii_rec_0 + 8][ii_rec_1 + 8][ii_rec_3 + 8] + v[t0][ii_rec_0 + 8][ii_rec_1 + 8][ii_rec_3 + 8])*(1.0e-3F*px*py*pz - 1.0e-2F*px*pz - 1.0e-2F*py*pz + 1.0e-1F*pz); } if (ii_rec_0 >= x_m - 1 && ii_rec_2 >= z_m - 1 && ii_rec_4 >= y_m - 1 && ii_rec_0 <= x_M + 1 && ii_rec_2 <= z_M + 1 && ii_rec_4 <= y_M + 1) { sum += (u[t0][ii_rec_0 + 8][ii_rec_4 + 8][ii_rec_2 + 8] + v[t0][ii_rec_0 + 8][ii_rec_4 + 8][ii_rec_2 + 8])*(1.0e-3F*px*py*pz - 1.0e-2F*px*py - 1.0e-2F*py*pz + 1.0e-1F*py); } if (ii_rec_0 >= x_m - 1 && ii_rec_3 >= z_m - 1 && ii_rec_4 >= y_m - 1 && ii_rec_0 <= x_M + 1 && ii_rec_3 <= z_M + 1 && ii_rec_4 <= y_M + 1) { sum += (-1.0e-3F*px*py*pz + 1.0e-2F*py*pz)*(u[t0][ii_rec_0 + 8][ii_rec_4 + 8][ii_rec_3 + 8] + v[t0][ii_rec_0 + 8][ii_rec_4 + 8][ii_rec_3 + 8]); } if (ii_rec_1 >= y_m - 1 && ii_rec_2 >= z_m - 1 && ii_rec_5 >= x_m - 1 && ii_rec_1 <= y_M + 1 && ii_rec_2 <= z_M + 1 && ii_rec_5 <= x_M + 1) { sum += (u[t0][ii_rec_5 + 8][ii_rec_1 + 8][ii_rec_2 + 8] + v[t0][ii_rec_5 + 8][ii_rec_1 + 8][ii_rec_2 + 8])*(1.0e-3F*px*py*pz - 1.0e-2F*px*py - 1.0e-2F*px*pz + 1.0e-1F*px); } if (ii_rec_1 >= y_m - 1 && ii_rec_3 >= z_m - 1 && ii_rec_5 >= x_m - 1 && ii_rec_1 <= y_M + 1 && ii_rec_3 <= z_M + 1 && ii_rec_5 <= x_M + 1) { sum += (-1.0e-3F*px*py*pz + 1.0e-2F*px*pz)*(u[t0][ii_rec_5 + 8][ii_rec_1 + 8][ii_rec_3 + 8] + v[t0][ii_rec_5 + 8][ii_rec_1 + 8][ii_rec_3 + 8]); } if (ii_rec_2 >= z_m - 1 && ii_rec_4 >= y_m - 1 && ii_rec_5 >= x_m - 1 && ii_rec_2 <= z_M + 1 && ii_rec_4 <= y_M + 1 && ii_rec_5 <= x_M + 1) { sum += (-1.0e-3F*px*py*pz + 1.0e-2F*px*py)*(u[t0][ii_rec_5 + 8][ii_rec_4 + 8][ii_rec_2 + 8] + v[t0][ii_rec_5 + 8][ii_rec_4 + 8][ii_rec_2 + 8]); } if (ii_rec_3 >= z_m - 1 && ii_rec_4 >= y_m - 1 && ii_rec_5 >= x_m - 1 && ii_rec_3 <= z_M + 1 && ii_rec_4 <= y_M + 1 && ii_rec_5 <= x_M + 1) { sum += 1.0e-3F*px*py*pz*(u[t0][ii_rec_5 + 8][ii_rec_4 + 8][ii_rec_3 + 8] + v[t0][ii_rec_5 + 8][ii_rec_4 + 8][ii_rec_3 + 8]); } rec[time][p_rec] = sum; } } /* End section3 */ gettimeofday(&end_section3, NULL); timers->section3 += (double)(end_section3.tv_sec-start_section3.tv_sec)+(double)(end_section3.tv_usec-start_section3.tv_usec)/1000000; } free(r53); free(r52); free(r51); free(r50); free(r49); return 0; } void bf0(struct dataobj *restrict damp_vec, const float dt, struct dataobj *restrict epsilon_vec, float *restrict r49_vec, float *restrict r50_vec, float *restrict r51_vec, float *restrict r52_vec, float *restrict r53_vec, struct dataobj *restrict u_vec, struct dataobj *restrict v_vec, struct dataobj *restrict vp_vec, const int x0_blk0_size, const int x_size, const int y0_blk0_size, const int y_size, const int z_size, const int t0, const int t1, const int t2, const int x_M, const int x_m, const int y_M, const int y_m, const int z_M, const int z_m, const int nthreads) { float (*restrict damp)[damp_vec->size[1]][damp_vec->size[2]] __attribute__ ((aligned (64))) = (float (*)[damp_vec->size[1]][damp_vec->size[2]]) damp_vec->data; float (*restrict epsilon)[epsilon_vec->size[1]][epsilon_vec->size[2]] __attribute__ ((aligned (64))) = (float (*)[epsilon_vec->size[1]][epsilon_vec->size[2]]) epsilon_vec->data; float (*restrict r49)[y_size + 2 + 2][z_size + 2 + 2] __attribute__ ((aligned (64))) = (float (*)[y_size + 2 + 2][z_size + 2 + 2]) r49_vec; float (*restrict r50)[y_size + 2 + 2][z_size + 2 + 2] __attribute__ ((aligned (64))) = (float (*)[y_size + 2 + 2][z_size + 2 + 2]) r50_vec; float (*restrict r51)[y_size + 2 + 2][z_size + 2 + 2] __attribute__ ((aligned (64))) = (float (*)[y_size + 2 + 2][z_size + 2 + 2]) r51_vec; float (*restrict r52)[y_size + 2 + 2][z_size + 2 + 2] __attribute__ ((aligned (64))) = (float (*)[y_size + 2 + 2][z_size + 2 + 2]) r52_vec; float (*restrict r53)[y_size + 2 + 2][z_size + 2 + 2] __attribute__ ((aligned (64))) = (float (*)[y_size + 2 + 2][z_size + 2 + 2]) r53_vec; float (*restrict u)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]] __attribute__ ((aligned (64))) = (float (*)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]]) u_vec->data; float (*restrict v)[v_vec->size[1]][v_vec->size[2]][v_vec->size[3]] __attribute__ ((aligned (64))) = (float (*)[v_vec->size[1]][v_vec->size[2]][v_vec->size[3]]) v_vec->data; float (*restrict vp)[vp_vec->size[1]][vp_vec->size[2]] __attribute__ ((aligned (64))) = (float (*)[vp_vec->size[1]][vp_vec->size[2]]) vp_vec->data; float r179[x0_blk0_size + 2 + 2][y0_blk0_size + 2 + 2][z_size + 2 + 2] __attribute__((aligned(64))); float r178[x0_blk0_size + 2 + 2][y0_blk0_size + 2 + 2][z_size + 2 + 2] __attribute__((aligned(64))); if (x0_blk0_size == 0) { return; } #pragma omp parallel num_threads(nthreads) private(r178,r179) { #pragma omp for collapse(1) schedule(dynamic,1) for (int x0_blk0 = x_m; x0_blk0 <= x_M; x0_blk0 += x0_blk0_size) { for (int y0_blk0 = y_m; y0_blk0 <= y_M; y0_blk0 += y0_blk0_size) { for (int x = x0_blk0 - 2, xs = 0; x <= x0_blk0 + x0_blk0_size + 1; x += 1, xs += 1) { for (int y = y0_blk0 - 2, ys = 0; y <= y0_blk0 + y0_blk0_size + 1; y += 1, ys += 1) { #pragma omp simd aligned(u,v:32) for (int z = z_m - 2; z <= z_M + 2; z += 1) { float r187 = 8.33333346e-3F*(v[t0][x + 10][y + 8][z + 8] - v[t0][x + 10][y + 12][z + 8]) + 6.66666677e-2F*(-v[t0][x + 10][y + 9][z + 8] + v[t0][x + 10][y + 11][z + 8]); float r188 = 8.33333346e-3F*(v[t0][x + 8][y + 10][z + 8] - v[t0][x + 12][y + 10][z + 8]) + 6.66666677e-2F*(-v[t0][x + 9][y + 10][z + 8] + v[t0][x + 11][y + 10][z + 8]); float r189 = 8.33333346e-3F*(v[t0][x + 10][y + 10][z + 6] - v[t0][x + 10][y + 10][z + 10]) + 6.66666677e-2F*(-v[t0][x + 10][y + 10][z + 7] + v[t0][x + 10][y + 10][z + 9]); float r190 = 8.33333346e-3F*(u[t0][x + 10][y + 8][z + 8] - u[t0][x + 10][y + 12][z + 8]) + 6.66666677e-2F*(-u[t0][x + 10][y + 9][z + 8] + u[t0][x + 10][y + 11][z + 8]); float r191 = 8.33333346e-3F*(u[t0][x + 8][y + 10][z + 8] - u[t0][x + 12][y + 10][z + 8]) + 6.66666677e-2F*(-u[t0][x + 9][y + 10][z + 8] + u[t0][x + 11][y + 10][z + 8]); float r192 = 8.33333346e-3F*(u[t0][x + 10][y + 10][z + 6] - u[t0][x + 10][y + 10][z + 10]) + 6.66666677e-2F*(-u[t0][x + 10][y + 10][z + 7] + u[t0][x + 10][y + 10][z + 9]); r179[xs][ys][z + 2] = -(r187*r52[x + 2][y + 2][z + 2]*r53[x + 2][y + 2][z + 2] + r188*r51[x + 2][y + 2][z + 2]*r52[x + 2][y + 2][z + 2] + r189*r50[x + 2][y + 2][z + 2]); r178[xs][ys][z + 2] = -(r190*r52[x + 2][y + 2][z + 2]*r53[x + 2][y + 2][z + 2] + r191*r51[x + 2][y + 2][z + 2]*r52[x + 2][y + 2][z + 2] + r192*r50[x + 2][y + 2][z + 2]); } } } for (int x = x0_blk0, xs = 0; x <= x0_blk0 + x0_blk0_size - 1; x += 1, xs += 1) { for (int y = y0_blk0, ys = 0; y <= y0_blk0 + y0_blk0_size - 1; y += 1, ys += 1) { #pragma omp simd aligned(damp,epsilon,u,v,vp:32) for (int z = z_m; z <= z_M; z += 1) { float r186 = dt*dt; float r185 = dt*damp[x + 1][y + 1][z + 1]; float r184 = r179[xs + 1][ys + 2][z + 2]*r51[x + 1][y + 2][z + 2]*r52[x + 1][y + 2][z + 2] + r179[xs + 2][ys + 1][z + 2]*r52[x + 2][y + 1][z + 2]*r53[x + 2][y + 1][z + 2] + r179[xs + 2][ys + 2][z + 1]*r50[x + 2][y + 2][z + 1] - r179[xs + 2][ys + 2][z + 3]*r50[x + 2][y + 2][z + 3] - r179[xs + 2][ys + 3][z + 2]*r52[x + 2][y + 3][z + 2]*r53[x + 2][y + 3][z + 2] - r179[xs + 3][ys + 2][z + 2]*r51[x + 3][y + 2][z + 2]*r52[x + 3][y + 2][z + 2]; float r183 = -r179[xs][ys + 2][z + 2]*r51[x][y + 2][z + 2]*r52[x][y + 2][z + 2] - r179[xs + 2][ys][z + 2]*r52[x + 2][y][z + 2]*r53[x + 2][y][z + 2] - r179[xs + 2][ys + 2][z]*r50[x + 2][y + 2][z] + r179[xs + 2][ys + 2][z + 4]*r50[x + 2][y + 2][z + 4] + r179[xs + 2][ys + 4][z + 2]*r52[x + 2][y + 4][z + 2]*r53[x + 2][y + 4][z + 2] + r179[xs + 4][ys + 2][z + 2]*r51[x + 4][y + 2][z + 2]*r52[x + 4][y + 2][z + 2]; float r182 = 6.66666677e-2F*(-r178[xs + 1][ys + 2][z + 2]*r51[x + 1][y + 2][z + 2]*r52[x + 1][y + 2][z + 2] - r178[xs + 2][ys + 1][z + 2]*r52[x + 2][y + 1][z + 2]*r53[x + 2][y + 1][z + 2] - r178[xs + 2][ys + 2][z + 1]*r50[x + 2][y + 2][z + 1] + r178[xs + 2][ys + 2][z + 3]*r50[x + 2][y + 2][z + 3] + r178[xs + 2][ys + 3][z + 2]*r52[x + 2][y + 3][z + 2]*r53[x + 2][y + 3][z + 2] + r178[xs + 3][ys + 2][z + 2]*r51[x + 3][y + 2][z + 2]*r52[x + 3][y + 2][z + 2]); float r181 = 8.33333346e-3F*(r178[xs][ys + 2][z + 2]*r51[x][y + 2][z + 2]*r52[x][y + 2][z + 2] + r178[xs + 2][ys][z + 2]*r52[x + 2][y][z + 2]*r53[x + 2][y][z + 2] + r178[xs + 2][ys + 2][z]*r50[x + 2][y + 2][z] - r178[xs + 2][ys + 2][z + 4]*r50[x + 2][y + 2][z + 4] - r178[xs + 2][ys + 4][z + 2]*r52[x + 2][y + 4][z + 2]*r53[x + 2][y + 4][z + 2] - r178[xs + 4][ys + 2][z + 2]*r51[x + 4][y + 2][z + 2]*r52[x + 4][y + 2][z + 2]); float r180 = 1.0/(vp[x + 8][y + 8][z + 8]*vp[x + 8][y + 8][z + 8]); float r151 = 2.0F*r180 + r185; float r152 = -2.0F*r180 + r185; float r153 = -1.78571425e-5F*(u[t0][x + 4][y + 8][z + 8] + u[t0][x + 8][y + 4][z + 8] + u[t0][x + 8][y + 8][z + 4] + u[t0][x + 8][y + 8][z + 12] + u[t0][x + 8][y + 12][z + 8] + u[t0][x + 12][y + 8][z + 8]) + 2.53968248e-4F*(u[t0][x + 5][y + 8][z + 8] + u[t0][x + 8][y + 5][z + 8] + u[t0][x + 8][y + 8][z + 5] + u[t0][x + 8][y + 8][z + 11] + u[t0][x + 8][y + 11][z + 8] + u[t0][x + 11][y + 8][z + 8]) - 1.99999996e-3F*(u[t0][x + 6][y + 8][z + 8] + u[t0][x + 8][y + 6][z + 8] + u[t0][x + 8][y + 8][z + 6] + u[t0][x + 8][y + 8][z + 10] + u[t0][x + 8][y + 10][z + 8] + u[t0][x + 10][y + 8][z + 8]) + 1.59999996e-2F*(u[t0][x + 7][y + 8][z + 8] + u[t0][x + 8][y + 7][z + 8] + u[t0][x + 8][y + 8][z + 7] + u[t0][x + 8][y + 8][z + 9] + u[t0][x + 8][y + 9][z + 8] + u[t0][x + 9][y + 8][z + 8]) - 8.54166647e-2F*u[t0][x + 8][y + 8][z + 8]; u[t1][x + 8][y + 8][z + 8] = 1.0F*(r152*u[t2][x + 8][y + 8][z + 8] + 4.0F*r180*u[t0][x + 8][y + 8][z + 8] + 2.0F*r186*((8.33333346e-3F*r183 + 6.66666677e-2F*r184)*r49[x + 2][y + 2][z + 2] + (2*epsilon[x + 8][y + 8][z + 8] + 1)*(r153 + r181 + r182)))/r151; v[t1][x + 8][y + 8][z + 8] = 1.0F*(r152*v[t2][x + 8][y + 8][z + 8] + 4.0F*r180*v[t0][x + 8][y + 8][z + 8] + r186*(1.66666669138067e-2F*r183 + 1.33333335310454e-1F*r184 + 2.0F*(r153 + r181 + r182)*r49[x + 2][y + 2][z + 2]))/r151; } } } } } } }
texture.c
/* * Texture Manager */ #include "zgl.h" static GLTexture* find_texture(GLint h) { GLTexture* t; GLContext* c = gl_get_context(); t = c->shared_state.texture_hash_table[h & TEXTURE_HASH_TABLE_MASK]; while (t != NULL) { if (t->handle == h) return t; t = t->next; } return NULL; } GLboolean glAreTexturesResident(GLsizei n, const GLuint* textures, GLboolean* residences) { #define RETVAL GL_FALSE GLboolean retval = GL_TRUE; GLint i; #include "error_check_no_context.h" for (i = 0; i < n; i++) if (find_texture(textures[i])) { residences[i] = GL_TRUE; } else { residences[i] = GL_FALSE; retval = GL_FALSE; } return retval; } GLboolean glIsTexture(GLuint texture) { GLContext* c = gl_get_context(); #define RETVAL GL_FALSE #include "error_check.h" if (find_texture(texture)) return GL_TRUE; return GL_FALSE; } void* glGetTexturePixmap(GLint text, GLint level, GLint* xsize, GLint* ysize) { GLTexture* tex; GLContext* c = gl_get_context(); #if TGL_FEATURE_ERROR_CHECK == 1 if (!(text >= 0 && level < MAX_TEXTURE_LEVELS)) #define ERROR_FLAG GL_INVALID_ENUM #define RETVAL NULL #include "error_check.h" #else /*assert(text >= 0 && level < MAX_TEXTURE_LEVELS);*/ #endif tex = find_texture(text); if (!tex) #if TGL_FEATURE_ERROR_CHECK == 1 #define ERROR_FLAG GL_INVALID_ENUM #define RETVAL NULL #include "error_check.h" #else return NULL; #endif *xsize = tex->images[level].xsize; *ysize = tex->images[level].ysize; return tex->images[level].pixmap; } static void free_texture(GLContext* c, GLint h) { GLTexture *t, **ht; t = find_texture(h); if (t->prev == NULL) { ht = &c->shared_state.texture_hash_table[t->handle & TEXTURE_HASH_TABLE_MASK]; *ht = t->next; } else { t->prev->next = t->next; } if (t->next != NULL) t->next->prev = t->prev; gl_free(t); } GLTexture* alloc_texture(GLint h) { GLContext* c = gl_get_context(); GLTexture *t, **ht; #define RETVAL NULL #include "error_check.h" t = gl_zalloc(sizeof(GLTexture)); if (!t) #if TGL_FEATURE_ERROR_CHECK == 1 #define ERROR_FLAG GL_OUT_OF_MEMORY #define RETVAL NULL #include "error_check.h" #else gl_fatal_error("GL_OUT_OF_MEMORY"); #endif ht = &c->shared_state.texture_hash_table[h & TEXTURE_HASH_TABLE_MASK]; t->next = *ht; t->prev = NULL; if (t->next != NULL) t->next->prev = t; *ht = t; t->handle = h; return t; } void glInitTextures() { /* textures */ GLContext* c = gl_get_context(); c->texture_2d_enabled = 0; c->current_texture = find_texture(0); } void glGenTextures(GLint n, GLuint* textures) { GLContext* c = gl_get_context(); GLint max, i; GLTexture* t; #include "error_check.h" max = 0; for (i = 0; i < TEXTURE_HASH_TABLE_SIZE; i++) { t = c->shared_state.texture_hash_table[i]; while (t != NULL) { if (t->handle > max) max = t->handle; t = t->next; } } for (i = 0; i < n; i++) { textures[i] = max + i + 1; /* MARK: How texture handles are created.*/ } } void glDeleteTextures(GLint n, const GLuint* textures) { GLint i; GLTexture* t; GLContext* c = gl_get_context(); #include "error_check.h" for (i = 0; i < n; i++) { t = find_texture(textures[i]); if (t != NULL && t != 0) { if (t == c->current_texture) { glBindTexture(GL_TEXTURE_2D, 0); #include "error_check.h" } free_texture(c, textures[i]); } } } void glopBindTexture(GLParam* p) { GLint target = p[1].i; GLint texture = p[2].i; GLTexture* t; GLContext* c = gl_get_context(); #if TGL_FEATURE_ERROR_CHECK == 1 if (!(target == GL_TEXTURE_2D && target > 0)) #define ERROR_FLAG GL_INVALID_ENUM #include "error_check.h" #else #endif t = find_texture(texture); if (t == NULL) { t = alloc_texture(texture); #include "error_check.h" } if (t == NULL) { #if TGL_FEATURE_ERROR_CHECK == 1 #define ERROR_FLAG GL_OUT_OF_MEMORY #include "error_check.h" #else gl_fatal_error("GL_OUT_OF_MEMORY"); #endif } c->current_texture = t; } void glCopyTexImage2D(GLenum target, GLint level, GLenum internalformat, GLint x, GLint y, GLsizei width, GLsizei height, GLint border) { GLParam p[9]; #include "error_check_no_context.h" p[0].op = OP_CopyTexImage2D; p[1].i = target; p[2].i = level; p[3].i = internalformat; p[4].i = x; p[5].i = y; p[6].i = width; p[7].i = height; p[8].i = border; gl_add_op(p); } void glopCopyTexImage2D(GLParam* p) { GLImage* im; PIXEL* data; GLint i, j; GLint target = p[1].i; GLint level = p[2].i; GLint x = p[4].i; GLint y = p[5].i; GLsizei w = p[6].i; GLsizei h = p[7].i; GLint border = p[8].i; GLContext* c = gl_get_context(); y -= h; if (c->readbuffer != GL_FRONT || c->current_texture == NULL || target != GL_TEXTURE_2D || border != 0 || w != TGL_FEATURE_TEXTURE_DIM || /*TODO Implement image interp*/ h != TGL_FEATURE_TEXTURE_DIM) { #if TGL_FEATURE_ERROR_CHECK == 1 #define ERROR_FLAG GL_INVALID_OPERATION #include "error_check.h" #else return; #endif } im = &c->current_texture->images[level]; data = c->current_texture->images[level].pixmap; im->xsize = TGL_FEATURE_TEXTURE_DIM; im->ysize = TGL_FEATURE_TEXTURE_DIM; /* TODO implement the scaling and stuff that the GL spec says it should have.*/ #if TGL_FEATURE_MULTITHREADED_COPY_TEXIMAGE_2D == 1 #ifdef _OPENMP #pragma omp parallel for #endif for (j = 0; j < h; j++) for (i = 0; i < w; i++) { data[i + j * w] = c->zb->pbuf[((i + x) % (c->zb->xsize)) + ((j + y) % (c->zb->ysize)) * (c->zb->xsize)]; } #else for (j = 0; j < h; j++) for (i = 0; i < w; i++) { data[i + j * w] = c->zb->pbuf[((i + x) % (c->zb->xsize)) + ((j + y) % (c->zb->ysize)) * (c->zb->xsize)]; } #endif } void glopTexImage1D(GLParam* p) { GLint target = p[1].i; GLint level = p[2].i; GLint components = p[3].i; GLint width = p[4].i; /* GLint height = p[5].i;*/ GLint height = 1; GLint border = p[5].i; GLint format = p[6].i; GLint type = p[7].i; void* pixels = p[8].p; GLImage* im; GLubyte* pixels1; GLint do_free=0; GLContext* c = gl_get_context(); { #if TGL_FEATURE_ERROR_CHECK == 1 if (!(c->current_texture != NULL && target == GL_TEXTURE_1D && level == 0 && components == 3 && border == 0 && format == GL_RGB && type == GL_UNSIGNED_BYTE)) #define ERROR_FLAG GL_INVALID_ENUM #include "error_check.h" #else if (!(c->current_texture != NULL && target == GL_TEXTURE_1D && level == 0 && components == 3 && border == 0 && format == GL_RGB && type == GL_UNSIGNED_BYTE)) gl_fatal_error("glTexImage2D: combination of parameters not handled!!"); #endif } if (width != TGL_FEATURE_TEXTURE_DIM || height != TGL_FEATURE_TEXTURE_DIM) { pixels1 = gl_malloc(TGL_FEATURE_TEXTURE_DIM * TGL_FEATURE_TEXTURE_DIM * 3); /* GUARDED*/ if (pixels1 == NULL) { #if TGL_FEATURE_ERROR_CHECK == 1 #define ERROR_FLAG GL_OUT_OF_MEMORY #include "error_check.h" #else gl_fatal_error("GL_OUT_OF_MEMORY"); #endif } /* no GLinterpolation is done here to respect the original image aliasing ! */ gl_resizeImageNoInterpolate(pixels1, TGL_FEATURE_TEXTURE_DIM, TGL_FEATURE_TEXTURE_DIM, pixels, width, height); do_free = 1; width = TGL_FEATURE_TEXTURE_DIM; height = TGL_FEATURE_TEXTURE_DIM; } else { pixels1 = pixels; } im = &c->current_texture->images[level]; im->xsize = width; im->ysize = height; #if TGL_FEATURE_RENDER_BITS == 32 gl_convertRGB_to_8A8R8G8B(im->pixmap, pixels1, width, height); #elif TGL_FEATURE_RENDER_BITS == 16 gl_convertRGB_to_5R6G5B(im->pixmap, pixels1, width, height); #else #error bad TGL_FEATURE_RENDER_BITS #endif if (do_free) gl_free(pixels1); } void glopTexImage2D(GLParam* p) { GLint target = p[1].i; GLint level = p[2].i; GLint components = p[3].i; GLint width = p[4].i; GLint height = p[5].i; GLint border = p[6].i; GLint format = p[7].i; GLint type = p[8].i; void* pixels = p[9].p; GLImage* im; GLubyte* pixels1; GLint do_free=0; GLContext* c = gl_get_context(); { #if TGL_FEATURE_ERROR_CHECK == 1 if (!(c->current_texture != NULL && target == GL_TEXTURE_2D && level == 0 && components == 3 && border == 0 && format == GL_RGB && type == GL_UNSIGNED_BYTE)) #define ERROR_FLAG GL_INVALID_ENUM #include "error_check.h" #else if (!(c->current_texture != NULL && target == GL_TEXTURE_2D && level == 0 && components == 3 && border == 0 && format == GL_RGB && type == GL_UNSIGNED_BYTE)) gl_fatal_error("glTexImage2D: combination of parameters not handled!!"); #endif } if (width != TGL_FEATURE_TEXTURE_DIM || height != TGL_FEATURE_TEXTURE_DIM) { pixels1 = gl_malloc(TGL_FEATURE_TEXTURE_DIM * TGL_FEATURE_TEXTURE_DIM * 3); /* GUARDED*/ if (pixels1 == NULL) { #if TGL_FEATURE_ERROR_CHECK == 1 #define ERROR_FLAG GL_OUT_OF_MEMORY #include "error_check.h" #else gl_fatal_error("GL_OUT_OF_MEMORY"); #endif } /* no GLinterpolation is done here to respect the original image aliasing ! */ gl_resizeImageNoInterpolate(pixels1, TGL_FEATURE_TEXTURE_DIM, TGL_FEATURE_TEXTURE_DIM, pixels, width, height); do_free = 1; width = TGL_FEATURE_TEXTURE_DIM; height = TGL_FEATURE_TEXTURE_DIM; } else { pixels1 = pixels; } im = &c->current_texture->images[level]; im->xsize = width; im->ysize = height; #if TGL_FEATURE_RENDER_BITS == 32 gl_convertRGB_to_8A8R8G8B(im->pixmap, pixels1, width, height); #elif TGL_FEATURE_RENDER_BITS == 16 gl_convertRGB_to_5R6G5B(im->pixmap, pixels1, width, height); #else #error Bad TGL_FEATURE_RENDER_BITS #endif if (do_free) gl_free(pixels1); } /* TODO: not all tests are done */ /* void glopTexEnv(GLContext* c, GLParam* p) { GLint target = p[1].i; GLint pname = p[2].i; GLint param = p[3].i; if (target != GL_TEXTURE_ENV) { error: #if TGL_FEATURE_ERROR_CHECK == 1 #define ERROR_FLAG GL_INVALID_ENUM #include "error_check.h" #else gl_fatal_error("glTexParameter: unsupported option"); #endif } if (pname != GL_TEXTURE_ENV_MODE) goto error; if (param != GL_DECAL) goto error; } */ /* TODO: not all tests are done */ /* void glopTexParameter(GLContext* c, GLParam* p) { GLint target = p[1].i; GLint pname = p[2].i; GLint param = p[3].i; if (target != GL_TEXTURE_2D && target != GL_TEXTURE_1D) { error: tgl_warning("glTexParameter: unsupported option"); return; } switch (pname) { case GL_TEXTURE_WRAP_S: case GL_TEXTURE_WRAP_T: if (param != GL_REPEAT) goto error; break; } } */ /* void glopPixelStore(GLContext* c, GLParam* p) { GLint pname = p[1].i; GLint param = p[2].i; if (pname != GL_UNPACK_ALIGNMENT || param != 1) { gl_fatal_error("glPixelStore: unsupported option"); } } */
calc.h
#ifndef DEF_NARROWBAND_H #define DEF_NARROWBAND_H /* ========================================================================= Copyright (c) 2008-2015, Institute for Microelectronics, TU Wien. ----------------- ViennaTS - The Vienna Topography Simulator ----------------- Contact: viennats@iue.tuwien.ac.at License: MIT (X11), see file LICENSE in the base directory ============================================================================= */ //IF PARALLEL_MODE IS ACTIVE #include "Cells.h" #include "Time.h" #ifdef _OPENMP #include <omp.h> #endif #include "Math.h" #include <algorithm> #include <vector> #include <numeric> #include "LSlib/vector.hpp" #include "message.h" #include "boundaries.h" #include <cmath> #include <cassert> #include "LSlib/math.hpp" ///Namespace for calculation helpers. namespace calc { template <int Dimensions> class Make3DVector { const double* v; public: inline double operator[](int i) const { return (i<Dimensions)?v[i]:0.; } Make3DVector(const double* v2):v(v2) {} }; template <class ParameterType> class PartitionTraits { public: typedef int IntCoordType; typedef unsigned int IntLinkType; //typedef const geom::cell<D>* CellRefType; typedef unsigned int CellRefType; static inline CellRefType UndefinedCellRef() { return std::numeric_limits<CellRefType>::max();}//return 0;} static inline IntLinkType UndefinedLink() { return std::numeric_limits<IntLinkType>::max();} //static const double SurfaceAreaHeuristicLambda=ParameterType::SurfaceAreaHeuristicLambda; //static const partition::SplittingModeType PartitionMode=ParameterType::PartitionMode; static const int Dimension=ParameterType::Dimension; }; template<int D, class LS, class NormalVectorVectorClass, class DistancesVectorClass> void SetupCells( const LS& l,geom::cells<D> &Cells, std::vector<lvlset::vec<int,D> > &CellCoordinates, const NormalVectorVectorClass& NormalVectors, const DistancesVectorClass& DistancesToReceiver, double RecepterRadius ) { Cells.clear(); //int cell_counter=0; //int cell_equal_signs_counter=0; //int cell_contains_disk_counter=0; //int cell_inserted_counter=0; for (typename LS::template const_iterator_cells_filtered<typename LS::filter_active> it(l);!it.is_finished();it.next()) { //cell_counter++; bool cell_contains_disk=false; int sgn_count=0; for (int i=0;i<(1<<D);i++) { sgn_count+=it.corner(i).sign(); } if ((sgn_count!=(1<<D)) && (sgn_count!=0)) { cell_contains_disk=true; // //std::cout << "cell_cont_disk! "; } else { //cell_equal_signs_counter++; //check all corners for (int i=0;i<(1<<D);++i) { //if corner is active if (it.corner(i).is_active()) { unsigned int id=it.corner(i).active_pt_id(); const double &d=DistancesToReceiver[id]; //check if disk of active grid point intersects corresponding cell //check for all dimensions bool cell_contains_disk2=true; for (int dir=0;dir < D;++dir) { const double &n=NormalVectors[id*D+dir]; double min=n*d+((i>>dir) & 1); double max=min; double tmp=std::sqrt(std::max(0.,1-n*n))*RecepterRadius; min-=tmp; max+=tmp; if ((max<0.) || (min>1.) ) { cell_contains_disk2=false; break; } } if (cell_contains_disk2) { // //std::cout << "cell_cont_disk2! "; cell_contains_disk=true; //cell_contains_disk_counter++; break; } } } } if (cell_contains_disk) { //cell_inserted_counter++; lvlset::vec<unsigned int, (1<<D)> points; CellCoordinates.push_back(lvlset::vec<int,D>(it.indices())); Cells.push_back(geom::cell<D>()); for (unsigned int i=0;i<(1<<D); i++ ) { // if (it.corner(i).pt_id()==1028) //std::cout << "it.corner("<<i<<").pt_id():" <<it.corner(i).pt_id() <<std::endl; Cells.back().Points[i]=it.corner(i).pt_id(); assert(it.corner(i).is_defined()); } } } ////std::cout << "num_cells=" << cell_counter << std::endl; ////std::cout << "num_cells_equal_signs=" << cell_equal_signs_counter << std::endl; ////std::cout << "num_cells_contains_disk=" << cell_contains_disk_counter << std::endl; ////std::cout << "num_cells_inserted=" << cell_inserted_counter << std::endl; } template <class LS> void CalculateNormalVectors( const LS& l, std::vector<double>& NormalVectors, std::vector<double>& DistancesToReceiver, int open_boundary_direction, bool is_open_boundary_negative, double ReceptorRadius, const lvlset::vec<double,LS::dimensions> & default_directions=lvlset::vec<double,LS::dimensions>(0)) { const int D=LS::dimensions; lvlset::vec<double,D> t=default_directions; double tmp=Norm(t); if (tmp==0) { t[open_boundary_direction]=(is_open_boundary_negative)?-1.:1.; } else { t/=tmp; } NormalVectors.clear(); NormalVectors.resize(l.num_active_pts()*D); DistancesToReceiver.clear(); DistancesToReceiver.resize(l.num_active_pts(),0.); //!Calculate Normalvectors typename LS::points_type segmentation=l.get_new_segmentation(); #pragma omp for schedule(static, 1) // parallelization - Iterations divided into chunks of size 1. Each chunk is assigned to a thread for (int p=0;p<= static_cast<int>(segmentation.size());++p) { typename LS::point_type begin_v=(p==0)?l.grid().min_point_index():segmentation[p-1]; typename LS::point_type end_v=(p!=static_cast<int>(segmentation.size()))?segmentation[p]:l.grid().increment_indices(l.grid().max_point_index()); for (typename LS::template const_iterator_neighbor_filtered<typename LS::filter_active, 1> it(l, typename LS::filter_active(), begin_v);it.indices()<end_v;it.next()) { double* n=&NormalVectors[it.center().active_pt_id2()*D]; double& dist=DistancesToReceiver[it.center().active_pt_id2()]; for (int i=0;i<D;i++) { double pos=it.neighbor(i).value()-it.center().value(); double neg=it.center().value()-it.neighbor(i+D).value(); n[i]=0; if ((pos > 0 && neg < 0) || (pos < 0 && neg > 0)) { if (default_directions[i]<0) { n[i]=std::min(neg,pos); } else if (default_directions[i]>0) { n[i]=std::max(neg,pos); } else { n[i]=(pos+neg)*0.5; } } else { n[i]=(pos+neg)*0.5; } } //for (int i=0;i<D;i++) n[i]=it.gradient2(i); double tmp_max=std::fabs(n[0]); for (int uu=1;uu<D;uu++) tmp_max=std::max(tmp_max,std::fabs(n[uu])); if (tmp_max==0.) { for (int uu=0;uu<D;uu++) n[uu]=t[uu]; dist=0.; } else { double no2=0.; for (int uu=0;uu<D;uu++) no2+=my::math::pow2(n[uu]/tmp_max); double no=tmp_max*std::sqrt(no2); for (int uu=0;uu<D;uu++) n[uu]/=no; dist=-it.center().value()/no; } //reduce distance if receptor disk would not completely lie inside of attached voxels for (int i=0;i<D;i++) { dist=my::math::Sign(dist)*std::max( 0., std::min( std::fabs(dist), (1.-ReceptorRadius*std::sqrt( std::max(0.,1-n[i]*n[i]) ))/std::fabs(n[i])) ); assert(!std::isnan(dist)); } //for (int i=0;i<D;++i) NormalVectors.push_back(n[i]); //DistancesToReceiver.push_back(dist); } } } template <class LS> void CalculateCurvatureVectors(const LS& l, std::vector<double>& CurvatureVectors, bool initialized) { ////std::cout << "here!\n"; const int D=LS::dimensions; typedef typename LS::index_type index_type; CurvatureVectors.clear(); CurvatureVectors.resize(l.num_active_pts()); typename LS::points_type segmentation=l.get_new_segmentation(); std::vector<typename LS::const_iterator_runs_offset> it_neighbors; #pragma omp for schedule(static, 1) // parallelization - Iterations divided into chunks of size 1. Each chunk is assigned to a thread for (int p=0;p<= static_cast<int>(segmentation.size());++p) { typename LS::point_type begin_v=(p==0)?l.grid().min_point_index():segmentation[p-1]; typename LS::point_type end_v=(p!=static_cast<int>(segmentation.size()))?segmentation[p]:l.grid().increment_indices(l.grid().max_point_index()); for (typename LS::template const_iterator_neighbor_filtered<typename LS::filter_active, 1> it(l, typename LS::filter_active(), begin_v);it.indices()<end_v;it.next()) { double* curv=&CurvatureVectors[it.center().active_pt_id2()]; if (initialized) { for (unsigned int i=0;i<it_neighbors.size();i++) it_neighbors[i].go_to_indices_sequential(it.indices()); } else { for (int i=-1;i<=1;++i) { for (int j=-1;j<=1;++j) { for (int k=-1;k<=1;++k) { if (((i!=0) || (j!=0) || (k!=0)) && ((i==0) || (j==0) || (k==0))) { lvlset::vec<index_type,D> v(i,j,k); ////std::cout << "v(" << v[0] << ", " << v[1] << ", " << v[2] << ")\n"; it_neighbors.push_back(typename LS::const_iterator_runs_offset(l, v,it.indices())); } } } } initialized=true; } const int XmYmZ0=0; const int XmY0Zm=1; const int XmY0Z0=2; const int XmY0Zp=3; const int XmYpZ0=4; const int X0YmZm=5; const int X0YmZ0=6; const int X0YmZp=7; const int X0Y0Zm=8; const int X0Y0Zp=9; const int X0YpZm=10; const int X0YpZ0=11; const int X0YpZp=12; const int XpYmZ0=13; const int XpY0Zm=14; const int XpY0Z0=15; const int XpY0Zp=16; const int XpYpZ0=17; double PhiX=(it_neighbors[XpY0Z0].value()-it_neighbors[XmY0Z0].value())*0.5; double PhiY=(it_neighbors[X0YpZ0].value()-it_neighbors[X0YmZ0].value())*0.5; double PhiZ=(it_neighbors[X0Y0Zp].value()-it_neighbors[X0Y0Zm].value())*0.5; double PhiXX=it_neighbors[XpY0Z0].value()+it_neighbors[XmY0Z0].value()-2*it.center().value(); double PhiYY=it_neighbors[X0YpZ0].value()+it_neighbors[X0YmZ0].value()-2*it.center().value(); double PhiZZ=it_neighbors[X0Y0Zp].value()+it_neighbors[X0Y0Zm].value()-2*it.center().value(); double PhiXY=(it_neighbors[XpYpZ0].value()+it_neighbors[XmYmZ0].value()-it_neighbors[XpYmZ0].value()-it_neighbors[XmYpZ0].value())*0.25; double PhiXZ=(it_neighbors[XpY0Zp].value()+it_neighbors[XmY0Zm].value()-it_neighbors[XpY0Zm].value()-it_neighbors[XmY0Zp].value())*0.25; double PhiYZ=(it_neighbors[X0YpZp].value()+it_neighbors[X0YmZm].value()-it_neighbors[X0YmZp].value()-it_neighbors[X0YpZm].value())*0.25; //const int mode=0; double denom=PhiX*PhiX+PhiY*PhiY+PhiZ*PhiZ; double num= 0.5*PhiX*PhiX*(PhiYY+PhiZZ)-PhiY*PhiZ*PhiYZ+ //mean curvature 0.5*PhiY*PhiY*(PhiXX+PhiZZ)-PhiX*PhiZ*PhiXZ+ 0.5*PhiZ*PhiZ*(PhiXX+PhiYY)-PhiX*PhiY*PhiXY; // double s=0.; //if (material<material_level) { if (denom!=0) { *curv=num/(denom*std::sqrt(denom)); ////std::cout << "*curv: " << *curv << std::endl; // if ((k>max_curvature) || (k<min_curvature)) s= -num/denom; } else { //std::cout << "warning!!!dlkajf" << std::endl; if (num>0) { *curv=-std::numeric_limits<double>::max(); } else { *curv=std::numeric_limits<double>::max(); } } //} // return s; // //std::cout << "curv: " << *curv << "\n"; } } //!Calculate Curvature vectors } namespace { template<int D, class PartitionType> class ClusterPositionType { public: double X[D]; typename PartitionType::subbox Subbox; }; } // [Josef] Main function where particles are tracked and collision with interface is checked! template <class ModelType, class ParameterType, class PartitionType, class LevelSetType> void CalculateRates( const ModelType &Model, const ParameterType &Parameter, const PartitionType &Partition, const LevelSetType &SurfaceLevelSet, const std::vector<double>& NormalVectors, const std::vector<double>& DistancesToReceiver, const std::vector<double>& Coverages, std::vector<double>& Rates, const std::vector<unsigned int>& PointMaterials, const geom::cells<ParameterType::Dimension>& Cells, double ProcessTime) { // std::cout << "1\n"; const int D=ParameterType::Dimension; typedef ClusterPositionType<D, PartitionType> ClusterPositionType; const double ReceptorRadius=Parameter.receptor_radius; const double ReceptorRadius2=ReceptorRadius*ReceptorRadius; const double further_tracking_distance=Parameter.further_tracking_distance; //default is 3 //Initialize Rates unsigned int num_active_points=SurfaceLevelSet.num_active_pts(); Rates.clear(); if(NormalVectors.size()!=num_active_points*D){ std::cout << "Assert normal vector size: " << NormalVectors.size() << " = " << num_active_points*D << "\n"; assert(0); } if(Coverages.size()<num_active_points*Model.CoverageStorageSize){ std::cout << "Assert Coverage size: " << Coverages.size() << " >= " << num_active_points*Model.CoverageStorageSize << "\n"; assert(0); } #ifdef _OPENMP const int max_threads=omp_get_max_threads(); #else const int max_threads=1; #endif std::vector<std::vector<double > > all_tmp_Rates( max_threads, std::vector<double>(num_active_points*Model.RatesStorageSize,0.) ); double RecepterArea=(D==3)?(Parameter.receptor_radius*Parameter.receptor_radius*my::math::Pi):(2.*Parameter.receptor_radius); if (!ModelType::SpatiallyEqualDistributedFlux) { RecepterArea*=Parameter.grid_delta; if (D==3) RecepterArea*=Parameter.grid_delta; } ////std::cout << "Recepter Area: " << RecepterArea << endl; #pragma omp parallel { //determine the number of different starting locations //in case of equally distributed flux the number of starting places is equal to the open surface area measured in grid spacings //in case of non-equally distributed flux the number of starting places is set to the number of threads #ifdef _OPENMP const int my_num_threads=omp_get_num_threads(); const int my_thread_num=omp_get_thread_num(); #else const int my_num_threads=1; const int my_thread_num=0; #endif const int NumStartingPlaces=(ModelType::SpatiallyEqualDistributedFlux)? static_cast<int>(Partition.AreaSize(Parameter.open_boundary)): //AXIS my_num_threads; //for each thread a vector is defined, where the rates are stored std::vector<double>& tmp_Rates=all_tmp_Rates[my_thread_num]; assert(tmp_Rates.size()==num_active_points*Model.RatesStorageSize); //std::cout << "assert temp rates size \n"; //stacks to store the particles and their positions std::stack<typename ModelType::ParticleType> ParticleStack; std::stack<ClusterPositionType> ParticlePositionsStack; //beginning of parallel section with dynamic scheduling #pragma omp for schedule(dynamic) //Chunks are dynamically assigned to threads on a first-come, first-serve basis as threads become available. for (int StartingPlace=0;StartingPlace<NumStartingPlaces;++StartingPlace) { //for each starting place do //used to store the partition subbox the particles starts from typename PartitionType::subbox starting_subbox; //the start position of the particle (in global coordinates) double StartPosition[3]; //if spatially equal distributed flux, determine the start_box if (ModelType::SpatiallyEqualDistributedFlux) { unsigned int tmp_s=StartingPlace; int tmp_dim=Parameter.open_boundary; //AXIS if (tmp_dim==0) tmp_dim=D; --tmp_dim; for (int i=0;i<D-2;++i) { StartPosition[tmp_dim]=tmp_s%Partition.Extension(tmp_dim); tmp_s/=Partition.Extension(tmp_dim); if (tmp_dim==0) tmp_dim=D; --tmp_dim; } StartPosition[tmp_dim]=tmp_s; starting_subbox=Partition.Access(StartPosition, Parameter.open_boundary, Parameter.open_boundary_negative); //std::cout << "equaldistributed \n"; } //for each involved particle type do for (unsigned int ParticleType=0;ParticleType<Model.NumberOfParticleTypes;++ParticleType) { //if (ParticleType==1) //std::cout << "AH!!!\n"; //determine the number of particles which have to be simulated const unsigned int NumOfParticles=(ModelType::SpatiallyEqualDistributedFlux)? Model.NumberOfParticleClusters[ParticleType]: Model.NumberOfParticleClusters[ParticleType]/my_num_threads +(my_thread_num < static_cast<int>(Model.NumberOfParticleClusters[ParticleType]%my_num_threads)?1:0); //for each particle do for (unsigned int ParticleCounter=0;ParticleCounter<NumOfParticles;++ParticleCounter) { //std::cout << "\nparticles\n"; //generate cluster energy and direction typename ModelType::ParticleType p; //typename ModelType::TipHeightType dist; Model.ParticleGeneration(p,ParticleType,ProcessTime, StartPosition); //std::cout << "\nparticlegeneration\n"; //if particle is not moving downwards if (Parameter.open_boundary_negative) { if(p.Direction[Parameter.open_boundary]<=0.) continue; } else { if(p.Direction[Parameter.open_boundary]>=0.) continue; } //calculate represented flux by that particle p.Flux/=Model.NumberOfParticleClusters[ParticleType]; // //std::cout<<"p.Flux1="<<p.Flux<<"\n"; p.Flux/=RecepterArea; // //std::cout<<"p.Flux2="<<p.Flux<<"\n"; //determine starting position and starting subbox ClusterPositionType cp; if (ModelType::SpatiallyEqualDistributedFlux) { //if flux is equal distributed //chose random start position for (int i=0;i<D;++i) { cp.X[i]=StartPosition[i]; if (i!=Parameter.open_boundary) cp.X[i]+=my::stat::RandomNumber(); } cp.Subbox=starting_subbox; //determine additional particles, which are necessary to account for extended boundaries int zmax[D-1]; int dir=Parameter.open_boundary; for (int i=0;i<D-1;++i) { dir=(Parameter.open_boundary+i+1)%D; zmax[i]=0; if (dir!=Parameter.open_boundary) { if ((Parameter.boundary_conditions[dir].min==bnc::EXTENDED_BOUNDARY) && (p.Direction[dir]>0)) { zmax[i]=static_cast<int>( std::ceil( ( -std::min( std::fabs((Partition.Extension(Parameter.open_boundary)*p.Direction[dir])/p.Direction[Parameter.open_boundary]), static_cast<double>(Parameter.max_extended_starting_position) ) -(cp.X[dir]+(cp.Subbox.Min(dir)-Partition.Min(dir))) )/Partition.Extension(dir) ) ); assert(zmax[i]<=0); } else if ((Parameter.boundary_conditions[dir].max==bnc::EXTENDED_BOUNDARY) && (p.Direction[dir]<0)) { zmax[i]=static_cast<int>( std::floor( ( std::min( std::fabs((Partition.Extension(Parameter.open_boundary)*p.Direction[dir])/p.Direction[Parameter.open_boundary]), static_cast<double>(Parameter.max_extended_starting_position) ) -(cp.X[dir]+(cp.Subbox.Min(dir)-Partition.Min(dir))) )/Partition.Extension(dir) ) )+1; assert(zmax[i]>=0); } } } int counter[D-1]; for (int k=0;k<D-1;++k) counter[k]=0; //add additional particles to the stack while (true) { int h=0; for (;h<D-1;++h) { if (counter[h]!=zmax[h]) { if (zmax[h]>0) ++counter[h]; else --counter[h]; break; } else { counter[h]=0; } } if (h==D-1) break; ClusterPositionType new_cp; for (int g=0;g<D-1;++g) { int dir=(g+Parameter.open_boundary+1)%D; new_cp.X[dir]= cp.X[dir]+ static_cast<double>(cp.Subbox.Min(dir)-Partition.Min(dir))+ //TODO check!!! static_cast<double>(counter[g])*static_cast<double>(Partition.Extension(dir)); } new_cp.Subbox=Partition.Access(new_cp.X, Parameter.open_boundary, Parameter.open_boundary_negative); ParticlePositionsStack.push(new_cp); ParticleStack.push(p); } //std::cout << "again EDF\n"; } else { for (int i=0;i<D;++i) cp.X[i]=StartPosition[i]/Parameter.grid_delta; //scale starting position double t=-( cp.X[Parameter.open_boundary]- ((Parameter.open_boundary_negative)?Partition.Min(Parameter.open_boundary):Partition.Max(Parameter.open_boundary)) )/p.Direction[Parameter.open_boundary]; //Move cp.X to the top LS surface and update horizontal axis values (not open boundary value) for (int dir=0;dir<D;++dir) { if (dir!=Parameter.open_boundary) { bool ReverseSign; cp.X[dir]=Parameter.boundary_conditions[dir].map_coordinate(Partition.Min(dir), Partition.Max(dir),cp.X[dir]+p.Direction[dir]*t, ReverseSign); if (ReverseSign) p.Direction[dir]=-p.Direction[dir]; } } cp.Subbox=Partition.Access(cp.X, Parameter.open_boundary, Parameter.open_boundary_negative); //cp.X is now position within subbox after removing the "global components" } //loop until particle stack is empty while (true) { //initialize the travelled distance from the intersection with -oo double travelled_distance_from_intersection(-std::numeric_limits<double>::max()); //std::cout << "DTFI\n"; //the indices of the surface grid cell which was previously visited int last_surface_cell_indices[D]; for (int r=0;r<D;++r) last_surface_cell_indices[r]=Partition.Min(r)-2; //initialize with invalid indices //Iterate through the cells between LS.Max() and LS.Min() until surface reached or particle exits environment while (true) { //std::cout << "particleIteration\n"; //get reference to actual cluster const typename PartitionType::subbox &Subbox= cp.Subbox; //####################################################### //# find max distance within box # //####################################################### double max_distance_in_box=std::numeric_limits<double>::max(); int LeavingDirection=-1; //LeavingDirection : 0,1,2 particle leaves box in x,y,z direction respectively std::bitset<D> PositionStatusPos; //for each direction the bit is set if a particle is outside (in positive direction) of the regular simulation domain std::bitset<D> PositionStatusNeg; //for each direction the bit is set if a particle is outside (in negative direction) of the regular simulation domain //for each dimension do int i; for (i=0;i<D;i++) { //std::cout << "Dimension\n"; double t_temp=std::numeric_limits<double>::max(); //Subbox.Extension(dir) is the length of the subbox in the dir direction //Subbox.Min(dir) is the global coordinate (grid points) of the Subbox edge in the min dir direction //Subbox.Max(dir) is the global coordinate (grid points) of the Subbox edge in the max dir direction //Global coordinate is then found by Subbox.Min(dir)+cp.X[dir] //When outside the min extended boundary if ((cp.X[i]<=0) && (Parameter.boundary_conditions[i].min==bnc::EXTENDED_BOUNDARY)) { if (p.Direction[i]>0.) { if (cp.X[i]==0) { t_temp=(Subbox.Extension(i)-cp.X[i])/p.Direction[i]; } else { t_temp=-cp.X[i]/p.Direction[i]; PositionStatusNeg.set(i); } } else { if (cp.X[i]<0/*-Parameter.DomainExtension*/) break; PositionStatusNeg.set(i); } //When outside the max extended boundary } else if ((cp.X[i]>=Subbox.Extension(i)) && (Parameter.boundary_conditions[i].max==bnc::EXTENDED_BOUNDARY)) { if (p.Direction[i]<0.) { if (cp.X[i]==Subbox.Extension(i)) { t_temp=-cp.X[i]/p.Direction[i]; } else { t_temp=(Subbox.Extension(i)-cp.X[i])/p.Direction[i]; PositionStatusPos.set(i); } } else { if (cp.X[i]>Subbox.Extension(i)/*+Parameter.DomainExtension*/) break; PositionStatusPos.set(i); } } else { if (p.Direction[i]>0.) { //t_temp is the variable to determine time to reach Subbox.Extension(i) from cp.X in p.Direction[i] t_temp=(Subbox.Extension(i)-cp.X[i])/p.Direction[i]; } else if (p.Direction[i]<0.) { t_temp=-cp.X[i]/p.Direction[i]; } } //Determine which axis is the leaving direction of the particle if (t_temp<max_distance_in_box) { max_distance_in_box=t_temp; LeavingDirection=i; } } //cp.X remains unchanged at this point, only max_distance_in_box and LeavingDirection are found // //std::cout << "cp.X4 " << "(" << cp.X[0] << "," << cp.X[1] << "," << cp.X[2] << ")\n"; // //std::cout << "max_dinstance_in_box: " << max_distance_in_box << "\n"; // //std::cout << "LeavingDirection: " << LeavingDirection << "\n"; //Now have max_distance_in_box = distance from cp.X to point of exit in the i direction //where i is the leaving direction (x,y,z)=(0,1,2) if ((i!=D) || (LeavingDirection==-1)) break; //When the subbox which received the particle also contains within it the surface boundary if (Subbox.ContainsCell()) { // std::cout << "ContainsCell()\n"; //if subbox is a surface grid cell const geom::cell<D> &Cell=Cells[Subbox.Cell()]; //std::cout << "containsCell\n"; //Calculate the exit direction and distance as before to see if surface is intersected //####################################################### //# check for surface intersection # //####################################################### //[Josef] This is where the particle is tracked within a subbox for surface intersection //Check if Surface is intersected between position and position+max_distance_in_box*direction if (travelled_distance_from_intersection==-std::numeric_limits<double>::max()) { //get distances at corners double Rho[1<<D]; int sgn_count=0; for (int i=0;i<(1<<D);i++) { Rho[i]=SurfaceLevelSet.value2(Cell.Points[((std::bitset<D>(i) | PositionStatusPos) & (~PositionStatusNeg)).to_ulong()]); //std::cout << "Rho[" << i << "] = " << Rho[i] << "\n"; if (Rho[i]>0) sgn_count++; } // std::cout << "HERE!\n"; if (sgn_count!=(1<<D)) { my::math::TransformRho2<D>::exec(Rho); double relative_distance_to_intersection; if (sgn_count==0) { relative_distance_to_intersection=0.; } else { my::math::Polynom<double, D> poly; my::math::DetermineCoefficientsForImplicitRayTracing<D>( cp.X, p.Direction, Rho, &(poly.Coefficients()[0]) ); // std::cout << "cp.X5: (" << cp.X[0] << "," << cp.X[1] << "," << cp.X[2] << ")\n"; relative_distance_to_intersection=my::math::FindFirstTransitionFromPosToNegOfPolynomNewton(0., max_distance_in_box, poly,1e-6); } if (relative_distance_to_intersection < std::numeric_limits<double>::max()) { //if particle hits surface //std::cout << "hits\n"; travelled_distance_from_intersection=-relative_distance_to_intersection; ClusterPositionType new_cp=cp; for (int kk=0;kk<D;kk++) new_cp.X[kk]+=relative_distance_to_intersection*p.Direction[kk]; // std::cout << "RDI\n"; //p.Direction[kk] is unchanged at this point //new_cp.X contains the coordinates within the Subbox at the subbox exit point //determine normal vector double tmp_normalvec[3]; my::math::CalculateNormal<D>(tmp_normalvec,new_cp.X,Rho); if (D==2) tmp_normalvec[2]=0.; // std::cout << "tmp_normalvec(): " << tmp_normalvec[0] << ", " << tmp_normalvec[1] << ", " << tmp_normalvec[2] << "\n"; double dot=tmp_normalvec[0]*p.Direction[0]; for (int w=1;w<D;++w) dot+=tmp_normalvec[w]*p.Direction[w]; if (dot>=0.) { msg::print_warning("Particle hits negative side of surface! Particle is skipped."); break; } //calculate nearest active grid point to determine coverages and material (using Manhattan distance for speedup) unsigned int gp=0; unsigned int mat=0; if ((ModelType::CoverageStorageSize>0) || (ModelType::ReemissionIsMaterialDependent)) { // std::cout << "CSS>0 RMD\n"; double dist=std::numeric_limits<double>::max(); // std::cout << "D = " << D << "\n"; for (int g=0;g<(1<<D);g++) { // std::cout << "g = " << g << "\n"; unsigned int tmp_gp= SurfaceLevelSet.active_pt_id(Cell.Points[g]); // std::cout << "tmp_gp = " << tmp_gp << "\n"; // std::cout << "new_cp = " << new_cp.X[0] << ", " << new_cp.X[1] << ", " << new_cp.X[2] << "\n"; // std::cout << "LevelSetType::INACTIVE = " << LevelSetType::INACTIVE << "\n"; if (tmp_gp!=LevelSetType::INACTIVE) { // std::cout << "4\n"; double tmp_dist=0; // std::cout << "5\n"; for (int iii=0;iii<D;iii++) tmp_dist+=(((g & (1<<iii))==0)?(new_cp.X[iii]):(1.-new_cp.X[iii])); // std::cout << "6\n"; // std::cout << "dist = " << dist << "\n"; // std::cout << "tmp_dist = " << tmp_dist << "\n"; if (tmp_dist<dist) { // std::cout << "7\n"; dist=tmp_dist; gp=tmp_gp; // std::cout << "gp = " << gp << "\n"; } } } // std::cout << "PointMaterials["<<gp<<"] = "<<PointMaterials[gp]<<"\n"; mat=PointMaterials[gp]; } //perform particle reemission // std::cout << "Reflection\n"; Model.ParticleReflexion( p, ParticleStack, tmp_normalvec, &Coverages[gp*Model.CoverageStorageSize], mat//, D, dot ); while (ParticleStack.size()>ParticlePositionsStack.size()) ParticlePositionsStack.push(new_cp); } } } //std::cout << "positionStatusPos\n"; if (PositionStatusPos.none() && PositionStatusNeg.none()) { //##################################################################### //# determine corners which have to be checked for disk intersections # //##################################################################### std::bitset<(1<<D)> corners; for (int dir=0;dir<D;++dir) { switch(Subbox.Min(dir)-last_surface_cell_indices[dir]) { case 0: break; case 1: corners>>= (1<<dir); corners|= ((dir<2)?((dir<1)?0xAA:0xCC):0xF0); break; case -1: corners<<= (1<<dir); corners|= ((dir<2)?((dir<1)?0x55:0x33):0x0F); break; default: corners.set(); } } for (int s=0;s<D;++s) last_surface_cell_indices[s]=Subbox.Min(s); //####################################################### //# check for disk intersections # //####################################################### //[Josef] This is where the four corners of the box containing the particle are checked for intersection //all 8 neighbors have to be checked if they are active and their disks are hit for (int g=0;g<(1<<D);g++) { if(corners.test(g)) { unsigned int gp= SurfaceLevelSet.active_pt_id(Cell.Points[g]); if (gp!=LevelSetType::INACTIVE) { unsigned int gpD=gp*D; double cos=-NormalVectors[gpD]*p.Direction[0]; for (int kk=1;kk<D;kk++) cos-=NormalVectors[gpD+kk]*p.Direction[kk]; if (cos > 0.) { //calculate relative position to disk midpoint double rel_pos[D]; for (int kk=0;kk<D;kk++) rel_pos[kk]=cp.X[kk]-((g>>kk) & 1)-NormalVectors[gpD+kk]*DistancesToReceiver[gp]; //rel_pos holds cp.X not cp_new.X //calculate rel_pos*disk_normal double rel_pos_dot_normal=rel_pos[0]*NormalVectors[gpD]; for (int kk=1;kk<D;kk++) rel_pos_dot_normal+=rel_pos[kk]*NormalVectors[gpD+kk]; if ( rel_pos_dot_normal <= (further_tracking_distance-travelled_distance_from_intersection)*cos ) { double tmpx=my::math::pow2(rel_pos[0]*cos+p.Direction[0]*rel_pos_dot_normal); for (int kk=1;kk<D;kk++) tmpx+=my::math::pow2(rel_pos[kk]*cos+p.Direction[kk]*rel_pos_dot_normal); if (tmpx<=cos*cos*ReceptorRadius2) { int Factor=1; for (int kk=0;kk<D;kk++) { if (kk!=Parameter.open_boundary) { if ((Parameter.boundary_conditions[kk].min==bnc::REFLECTIVE_BOUNDARY) || (Parameter.boundary_conditions[kk].min==bnc::EXTENDED_BOUNDARY)) { if ((g & (1<<kk))==0) { if (Partition.Min(kk)==Subbox.Min(kk)) { if (cp.X[kk]*cos+p.Direction[kk]*rel_pos_dot_normal>=0.) { Factor<<=1; } else { Factor=0; break; } } } else { if (Partition.Max(kk)==Subbox.Max(kk)) { if (cp.X[kk]*cos+p.Direction[kk]*rel_pos_dot_normal<=cos) { Factor<<=1; } else { Factor=0; break; } } } } } } for (;Factor>0;--Factor) { //[Josef] Here, the particle has collided with the surface so the model's function to deal with this is called. // int mat = 0; // if ((ModelType::CoverageStorageSize>0) || (ModelType::ReemissionIsMaterialDependent)) // mat = PointMaterials[gp]; //std::cout << "Collision\n"; Model.ParticleCollision( p, Make3DVector<D>(&NormalVectors[gpD]), //&NormalVectors[gpD], &(tmp_Rates[gp*Model.RatesStorageSize]), &(Coverages[gp*Model.CoverageStorageSize]), ProcessTime//, // mat ); } } } } } } } } } //Dealing with particles beyond simulation boundaries and boundary conditions: //###################################################################### //# check if calculation of particle cluster trajectory can be stopped # //###################################################################### if (travelled_distance_from_intersection!=-std::numeric_limits<double>::max()) { travelled_distance_from_intersection+=max_distance_in_box; if (travelled_distance_from_intersection>=further_tracking_distance) break; } //####################################################### //# calculate exit point # //####################################################### for (int kk=0;kk<D;kk++) { if (kk!=LeavingDirection) cp.X[kk]+=p.Direction[kk]*max_distance_in_box; } if (PositionStatusNeg.test(LeavingDirection)) { //particle enters regular simulation domain from negative side cp.X[LeavingDirection]=0; } else if (PositionStatusPos.test(LeavingDirection)) { //particle enters regular simulation domain from positive side cp.X[LeavingDirection]=cp.Subbox.Extension(LeavingDirection); } else { //####################################################### //# get next box # //####################################################### int old_min=cp.Subbox.Min(LeavingDirection); bool IsDirectionPositive=(p.Direction[LeavingDirection]>=0.); if (Partition.GoToNeighborBox(cp.Subbox,cp.X,LeavingDirection, IsDirectionPositive)) { if (IsDirectionPositive) { if (Parameter.boundary_conditions[LeavingDirection].max==bnc::INFINITE_BOUNDARY) break; if (Parameter.boundary_conditions[LeavingDirection].max==bnc::REFLECTIVE_BOUNDARY) p.Direction[LeavingDirection]=-p.Direction[LeavingDirection]; cp.X[LeavingDirection]=cp.Subbox.Extension(LeavingDirection); } else { if (Parameter.boundary_conditions[LeavingDirection].min==bnc::INFINITE_BOUNDARY) break; if (Parameter.boundary_conditions[LeavingDirection].min==bnc::REFLECTIVE_BOUNDARY) p.Direction[LeavingDirection]=-p.Direction[LeavingDirection]; cp.X[LeavingDirection]=0; } last_surface_cell_indices[LeavingDirection]=Partition.Min(LeavingDirection)-2; } else { cp.X[LeavingDirection]=(IsDirectionPositive)?0:cp.Subbox.Extension(LeavingDirection); if ((Parameter.boundary_conditions[LeavingDirection].min==bnc::PERIODIC_BOUNDARY) && PositionStatusPos.none() && PositionStatusNeg.none()) { if (IsDirectionPositive) { if (old_min>=cp.Subbox.Min(LeavingDirection)) last_surface_cell_indices[LeavingDirection]-=Partition.Extension(LeavingDirection); } else { if (old_min<=cp.Subbox.Min(LeavingDirection)) last_surface_cell_indices[LeavingDirection]+=Partition.Extension(LeavingDirection); } } } } } // std::cout << ParticleStack.size() << std::endl; if (ParticleStack.empty()) break; //####################################################### //# retrieve particle from stack # //####################################################### p=ParticleStack.top(); ParticleStack.pop(); cp=ParticlePositionsStack.top(); ParticlePositionsStack.pop(); } // end while loop: until particle stack is empty }//end of particle loop }//end of particle type loop } #pragma omp single //run by a single available thread. { Rates.swap(all_tmp_Rates[0]); } #pragma omp for for (int i=0;i<static_cast<int>(Rates.size());i++) { for (int j=1;j<my_num_threads;j++) { Rates[i]+=all_tmp_Rates[j][i]; } } // [josef] now that all thead-exclusive thread rates have been merged, we can output them if (Model.OutputFluxes) { { std::ofstream outputfile("rates.csv"); for (typename LevelSetType::const_iterator_runs it(SurfaceLevelSet); !it.is_finished(); it.next()) { if(it.active_pt_id() != LevelSetType::INACTIVE) { for (int j=0;j<D;j++) outputfile << (it.start_indices()[j]) << " "; outputfile << Rates[it.active_pt_id()] << std::endl; } } outputfile.close(); } { std::ofstream outputfile("rates_griddelta.csv"); for (typename LevelSetType::const_iterator_runs it(SurfaceLevelSet); !it.is_finished(); it.next()) { if(it.active_pt_id() != LevelSetType::INACTIVE) { for (int j=0;j<D;j++) outputfile << (it.start_indices()[j])*Parameter.grid_delta << " "; outputfile << Rates[it.active_pt_id()] << std::endl; } } outputfile.close(); } } } //local_time=my::time::GetTime()-StartTime; } // template<class ModelType> void UpdateCoverages(const std::vector<double>& Rates, std::vector<double>& Coverages, const ModelType& Model) { template<class ModelType> void UpdateCoverages(const std::vector<double>& Rates, std::vector<double>& Coverages, const ModelType& Model, double &time_step) {//, double &current_time) { double* c=&Coverages[0]; const double* r=&Rates[0]; while (r!=&(*(Rates.end()))) { Model.UpdateCoverage(c, r, time_step);//, current_time); // //std::cout << "time_step = " << time_step << "\n"; // else Model.UpdateCoverage(c, r); c+=Model.CoverageStorageSize; r+=Model.RatesStorageSize; } } template<class ModelType> void UpdateCoverages(const std::vector<double>& Rates, std::vector<double>& Coverages, const ModelType& Model) { double* c=&Coverages[0]; const double* r=&Rates[0]; while (r!=&(*(Rates.end()))) { // if (time_step != 0) Model.UpdateCoverage(c, r, time_step); Model.UpdateCoverage(c, r); c+=Model.CoverageStorageSize; r+=Model.RatesStorageSize; } } } #endif //DEF_NARROWBAND_H
omp_ex_11.c
#include <stdio.h> #include <omp.h> /* MIT License Copyright (c) 2019 NOUREDDINE DAGHBOUDJ Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ int main() { unsigned int a = 90; #pragma omp parallel { a += 10 + omp_get_thread_num(); printf("a=%i\n", a); } return 0; }
GB_binop__lt_uint16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__lt_uint16) // A.*B function (eWiseMult): GB (_AemultB_01__lt_uint16) // A.*B function (eWiseMult): GB (_AemultB_02__lt_uint16) // A.*B function (eWiseMult): GB (_AemultB_03__lt_uint16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__lt_uint16) // A*D function (colscale): GB (_AxD__lt_uint16) // D*A function (rowscale): GB (_DxB__lt_uint16) // C+=B function (dense accum): GB (_Cdense_accumB__lt_uint16) // C+=b function (dense accum): GB (_Cdense_accumb__lt_uint16) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__lt_uint16) // C=scalar+B GB (_bind1st__lt_uint16) // C=scalar+B' GB (_bind1st_tran__lt_uint16) // C=A+scalar GB (_bind2nd__lt_uint16) // C=A'+scalar GB (_bind2nd_tran__lt_uint16) // C type: bool // A type: uint16_t // B,b type: uint16_t // BinaryOp: cij = (aij < bij) #define GB_ATYPE \ uint16_t #define GB_BTYPE \ uint16_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint16_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint16_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x < y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LT || GxB_NO_UINT16 || GxB_NO_LT_UINT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__lt_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__lt_uint16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__lt_uint16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type uint16_t uint16_t bwork = (*((uint16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__lt_uint16) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__lt_uint16) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__lt_uint16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__lt_uint16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__lt_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__lt_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__lt_uint16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__lt_uint16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; uint16_t x = (*((uint16_t *) x_input)) ; uint16_t *Bx = (uint16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint16_t bij = GBX (Bx, p, false) ; Cx [p] = (x < bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__lt_uint16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; uint16_t *Ax = (uint16_t *) Ax_input ; uint16_t y = (*((uint16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint16_t aij = GBX (Ax, p, false) ; Cx [p] = (aij < y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x < aij) ; \ } GrB_Info GB (_bind1st_tran__lt_uint16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t x = (*((const uint16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij < y) ; \ } GrB_Info GB (_bind2nd_tran__lt_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t y = (*((const uint16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
main.c
#include<stdio.h> #include<omp.h> int main(){ // Define the domain float x_len = 2.0; // Length of the domain int x_points = 101; // Number of points to consider float del_x = x_len/(x_points-1); // Length of an element float x[x_points]; #pragma omp parallel for for (int i = 0; i < x_points; i++){ x[i] = i * del_x; // x co-ordinates } printf("\n The value of x \n"); for (int i = 0; i < x_points; i++){ printf("%f \t", x[i]); } // Define the parameters int t_itrs = 2500; // number of time iterations float del_t = 0.001; float c = 1.0; // speed of wave float u[x_points]; // Velocity at current time float u_new[x_points]; // Velocity at next time interval for (int i = 0; i < x_points; i++){ if (x[i] > 0.5 && x[i] < 1.0){ u[i] = 2.0; u_new[i] = 2.0; } else{ u[i] = 1.0; u_new[i] = 1.0; } } printf("\n The initial value of u is \n"); for (int i = 0; i < x_points; i++){ printf("%f \t", u[i]); } // Loop iterations #pragma omp parallel for (int it = 0; it < t_itrs; it++){ #pragma omp for nowait for (int i = 1; i < x_points; i++){ u_new[i] = u[i] - (c*del_t/del_x)*(u[i] - u[i-1]); } #pragma omp for for (int i = 0; i < x_points; i++){ u[i] = u_new[i]; } } printf("\n The value of u at the end of the iterations \n"); for (int i = 0; i < x_points; i++){ printf("%f \t", u[i]); } }
bst.cpp.pluto.c
#include <omp.h> #include <math.h> #include <stdio.h> #include <stdlib.h> #define min(x,y) ((x) < (y) ? (x) : (y)) #define max(x,y) ((x) > (y) ? (x) : (y)) #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) (((n)<0) ? -((-(n)+(d)-1)/(d)) : (n)/(d)) #define S0(a, i, j, k) c[i][j] = c[i][k] + c[k][j] #define match(b1, b2) (((b1)+(b2)) == 3 ? 1 : 0) #define max_score(s1, s2) ((s1 >= s2) ? s1 : s2) void printMatrix(int**, int, int); int** allocateMatrix(int); void deallocateMatrix(int**, int); void write_results(int , double , char ); void write_results(int , double ); void computeSEQ0(int* p, int n) { int** w = allocateMatrix(n + 1); int i, j, k, m, h, optimal_w; for (i = 0; i < n; i++) { for (j = 0; j < n; j++) w[i][j] = 0; if (i<n-1) { w[i][i+1] = p[i]; } } for (i = 0; i < n; i++) for (j = i+1; j < n; j++) w[i][j] = 99999; double start = omp_get_wtime(); int t1, t2, t3, t4, t5, t6; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; if (n >= 3) { for (t1=0;t1<=floord(44*n-69,475);t1++) { lbp=max(ceild(19*t1-n+2,19),ceild(19*t1-23,44)); ubp=min(floord(n-1,25),t1); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(1,19*t1-19*t2);t3<=min(min(n-2,25*t2+23),19*t1-19*t2+18);t3++) { for (t4=max(25*t2,t3+1);t4<=min(n-1,25*t2+24);t4++) { for (t6=-t3+t4;t6<=t4;t6++) { w[(-t3+t4)][(-t3+t4)+t3] = min(w[(-t3+t4)][t6-1] + w[t6+1][(-t3+t4)+t3], w[(-t3+t4)][(-t3+t4)+t3]);; } for (t6=-t3+t4;t6<=t4;t6++) { w[(-t3+t4)][(-t3+t4)+t3] += p[t6];; } } } } } } double execution_time = omp_get_wtime() - start; printf("normal: %lf\n", execution_time); write_results(n, execution_time); printMatrix(w, n, 0); deallocateMatrix(w, n + 1); } void computeSEQ0Pluto(int* p, int n) { int** w = allocateMatrix(n + 1); int i, j, k, m, h, optimal_w; for (i = 0; i < n; i++) { for (j = 0; j < n; j++) w[i][j] = 0; if (i<n-1) { w[i][i+1] = p[i]; } } for (i = 0; i < n; i++) for (j = i+1; j < n; j++) w[i][j] = 99999; double start = omp_get_wtime(); int t1, t2, t3, t4, t5, t6, t7, t8, t9; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; if (n >= 2) { for (t1=1;t1<=n-1;t1++) { for (t3=0;t3<=-t1+n-1;t3++) { for (t5=t3;t5<=t1+t3;t5++) { optimal_w = w[t3][t5-1] + w[t5+1][t3+t1];; w[t3][t3+t1] = min(optimal_w, w[t3][t3+t1]);; } } lbp=0; ubp=floord(-t1+n-1,19); #pragma omp parallel for private(lbv,ubv,t4,t5,t6,t7,t8,t9) for (t3=lbp;t3<=ubp;t3++) { for (t5=ceild(19*t3-24,25);t5<=min(floord(n-1,25),floord(t1+19*t3+18,25));t5++) { for (t6=max(19*t3,-t1+25*t5);t6<=min(min(19*t3+18,25*t5+24),-t1+n-1);t6++) { for (t8=max(25*t5,t6);t8<=min(t1+t6,25*t5+24);t8++) { w[t6][t6+t1] += p[t8];; } } } } } } double execution_time = omp_get_wtime() - start; printf("pluto: %lf\n", execution_time); write_results(n, execution_time); printMatrix(w, n, 0); deallocateMatrix(w, n + 1); } void computeSEQ1(int* p, int n) { int** w = allocateMatrix(n + 1); int i, j, k, m, h, optimal_w; for (i = 0; i < n; i++) { for (j = 0; j < n; j++) w[i][j] = 0; if (i<n-1) { w[i][i+1] = p[i]; } } for (i = 0; i < n; i++) for (j = i+1; j < n; j++) w[i][j] = 99999; double start = omp_get_wtime(); for (j = 1; j < n; j++) for (i=0; i < n-j; i++){ for (k = i; k <= i+j; k++){ optimal_w = w[i][k-1] + w[k+1][i+j]; if (optimal_w < w[i][i+j]){ w[i][i+j] = optimal_w; } } for (k = i; k <= i+j; w[i][i+j] += p[k++]) ; } double execution_time = omp_get_wtime() - start; printf("paralell: %lf\n", execution_time); write_results(n, execution_time); printMatrix(w, n, 0); deallocateMatrix(w, n + 1); } void computeSEQ2(int* p, int n) { int** w = allocateMatrix(n + 1); int i, j, k, m, h, optimal_w; for (i = 0; i < n; i++) { for (j = 0; j < n; j++) w[i][j] = 0; if (i<n-1) { w[i][i+1] = p[i]; } } for (i = 0; i < n; i++) for (j = i+1; j < n; j++) w[i][j] = 99999; double start = omp_get_wtime(); for (int c0 = floord(-n + 1, 16) + 2; c0 <= floord(n - 3, 16) + 1; c0 += 1) { #pragma omp parallel for for (int c1 = -c0 - (n - 16 * c0 + 46) / 32 + 1; c1 <= min(-1, -c0); c1 += 1) { for (int c4 = -16 * c1 - 15; c4 <= min(min(n - 2, n + 16 * c0 + 16 * c1 - 1), -16 * c1); c4 += 1) { for (int c5 = max(1, -16 * c0 - 16 * c1); c5 <= min(-16 * c0 - 16 * c1 + 15, n - c4 - 1); c5 += 1) { for (int c7 = c5; c7 <= c4 + c5; c7 += 1) { w[c5][c4 + c5] = (((w[c5][c7 - 1] + w[c7 + 1][c4 + c5]) < w[c5][c4 + c5]) ? (w[c5][c7 - 1] + w[c7 + 1][c4 + c5]) : w[c5][c4 + c5]); } for (int c7 = c5; c7 <= c4 + c5; c7 += 1) { w[c5][c4 + c5] += p[c7]; } } } } if (c0 == 0) { } } double execution_time = omp_get_wtime() - start; printf("stencil: %lf\n", execution_time); write_results(n, execution_time); printMatrix(w, n, 0); deallocateMatrix(w, n + 1); } void printMatrix(int** matrix, int N, int fileno) { char filename[10]; sprintf(filename, "nontiled%d", fileno); FILE* f = fopen(filename, "wt"); for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) { fprintf(f, "%d ", matrix[i][j]); //printf("%4d ", matrix[i][j]); } fprintf(f, "\n"); // printf("\n"); } fclose(f); } int** allocateMatrix(int N) { int** t = (int**)malloc(sizeof(int*) * N); for (int i = 0; i < N; i++) { t[i] = (int*)malloc(sizeof(int) * N); } return t; } int* allocateVector(int N) { int* t = (int*)malloc(sizeof(int) * N); return t; } void deallocateMatrix(int **t, int N) { for (int i = 0; i < N; i++) { free(t[i]); } free(t); } void write_results(int n, double execution_time, char end_char) { FILE* f = fopen("results.txt", "at"); fprintf(f, "%d:%lf%c", n, execution_time, end_char); fclose(f); } void write_results(int n, double execution_time) { write_results(n, execution_time, ';'); } int main(void) {//vector const int ZMAX = 7; int* seq = allocateVector(ZMAX); for (int i = 0; i < ZMAX; i++) seq[i] = i; int N = ZMAX; while (N <= ZMAX) { computeSEQ0(seq, N); computeSEQ0Pluto(seq, N); computeSEQ1(seq, N); computeSEQ2(seq, N); N += 10; } free(seq); return 0; }
_quadratic_ld.c
/* The batman package: fast computation of exoplanet transit light curves * Copyright (C) 2015 Laura Kreidberg * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION #include <Python.h> #include "numpy/arrayobject.h" #if defined (_OPENACC) && defined(__PGI) # include <accelmath.h> #else # include <math.h> #endif #if defined (_OPENMP) && !defined(_OPENACC) # include <omp.h> #endif #ifndef M_PI #define M_PI 3.14159265358979323846 #endif #define MIN(x, y) (((x) < (y)) ? (x) : (y)) #define MAX(x, y) (((x) > (y)) ? (x) : (y)) inline double ellpic_bulirsch(double n, double k); inline double ellec(double k); inline double ellk(double k); static PyObject *_quadratic_ld(PyObject *self, PyObject *args); static PyObject *_quadratic_ld(PyObject *self, PyObject *args) { /* Input: ************************************* ds array of impact parameters in units of rs c1 linear limb-darkening coefficient (gamma_1 in Mandel & Agol 2002) c2 quadratic limb-darkening coefficient (gamma_2) p occulting star size in units of rs Output: *********************************** flux fraction of flux at each ds for a limb-darkened source Limb darkening has the form: I(r) = [1 - c1 * (1 - sqrt(1 - (r/rs)^2)) - c2*(1 - sqrt(1 - (r/rs)^2))^2]/(1 - c1/3 - c2/6)/pi */ const int nthreads; const double c1, c2, p; PyArrayObject *ds, *flux; npy_intp dims[1]; if(!PyArg_ParseTuple(args,"Odddi", &ds, &p, &c1, &c2, &nthreads)) return NULL; dims[0] = PyArray_DIMS(ds)[0]; flux = (PyArrayObject *) PyArray_SimpleNew(1, dims, PyArray_TYPE(ds)); //creates numpy array to store return flux values double *f_array = PyArray_DATA(flux); double *d_array = PyArray_DATA(ds); /* NOTE: the safest way to access numpy arrays is to use the PyArray_GETITEM and PyArray_SETITEM functions. Here we use a trick for faster access and more convenient access, where we set a pointer to the beginning of the array with the PyArray_DATA (e.g., f_array) and access elements with e.g., f_array[i]. Success of this operation depends on the numpy array storing data in blocks equal in size to a C double. If you run into trouble along these lines, I recommend changing the array access to something like: d = PyFloat_AsDouble(PyArray_GETITEM(ds, PyArray_GetPtr(ds, &i))); where ds is a numpy array object. Laura Kreidberg 07/2015 */ const double omega = 1.0 - c1/3.0 - c2/6.0; // double precision equality tolerance for corner case issues const double tol = 1.0e-14; #if defined (_OPENMP) && !defined(_OPENACC) omp_set_num_threads(nthreads); //specifies number of threads (if OpenMP is supported) #endif #if defined (_OPENACC) #pragma acc parallel loop copyin(d_array[:dims[0]]) copyout(f_array[:dims[0]]) #elif defined (_OPENMP) #pragma omp parallel for #endif for (int i = 0; i < dims[0]; i++) { double kap0 = 0.0, kap1 = 0.0; double lambdad, lambdae, etad; double d = d_array[i]; // allow for negative impact parameters d = fabs(d); // check the corner cases if(fabs(p - d) < tol) { d = p; } if(fabs(p - 1.0 - d) < tol) { d = p - 1.0; } if(fabs(1.0 - p - d) < tol) { d = 1.0 - p; } if(d < tol) { d = 0.0; } double x1 = pow((p - d), 2.0); double x2 = pow((p + d), 2.0); double x3 = p*p - d*d; //source is unocculted: if(d >= 1.0 + p) { //printf("zone 1\n"); f_array[i] = 1.0; continue; } //source is completely occulted: if(p >= 1.0 && d <= p - 1.0) { //printf("zone 2\n"); lambdad = 0.0; etad = 0.5; //error in Fortran code corrected here, following Jason Eastman's python code lambdae = 1.0; f_array[i] = 1.0 - ((1.0 - c1 - 2.0*c2)*lambdae + (c1 + 2.0*c2)*(lambdad + 2.0/3.0) + c2*etad)/omega; continue; } //source is partly occulted and occulting object crosses the limb: if(d >= fabs(1.0 - p) && d <= 1.0 + p) { //printf("zone 3\n"); kap1 = acos(MIN((1.0 - p*p + d*d)/2.0/d, 1.0)); kap0 = acos(MIN((p*p + d*d - 1.0)/2.0/p/d, 1.0)); lambdae = p*p*kap0 + kap1; lambdae = (lambdae - 0.50*sqrt(MAX(4.0*d*d - pow((1.0 + d*d - p*p), 2.0), 0.0)))/M_PI; } //edge of the occulting star lies at the origin if(d == p) { //printf("zone 5\n"); if(d < 0.5) { //printf("zone 5.2\n"); double q = 2.0*p; double Kk = ellk(q); double Ek = ellec(q); lambdad = 1.0/3.0 + 2.0/9.0/M_PI*(4.0*(2.0*p*p - 1.0)*Ek + (1.0 - 4.0*p*p)*Kk); etad = p*p/2.0*(p*p + 2.0*d*d); f_array[i] = 1.0 - ((1.0 - c1 - 2.0*c2)*lambdae + (c1 + 2.0*c2)*lambdad + c2*etad)/omega; continue; } else if(d > 0.5) { //printf("zone 5.1\n"); double q = 0.5/p; double Kk = ellk(q); double Ek = ellec(q); lambdad = 1.0/3.0 + 16.0*p/9.0/M_PI*(2.0*p*p - 1.0)*Ek - \ (32.0*pow(p, 4.0) - 20.0*p*p + 3.0)/9.0/M_PI/p*Kk; etad = 1.0/2.0/M_PI*(kap1 + p*p*(p*p + 2.0*d*d)*kap0 - \ (1.0 + 5.0*p*p + d*d)/4.0*sqrt((1.0 - x1)*(x2 - 1.0))); // continue; } else { //printf("zone 6\n"); lambdad = 1.0/3.0 - 4.0/M_PI/9.0; etad = 3.0/32.0; f_array[i] = 1.0 - ((1.0 - c1 - 2.0*c2)*lambdae + (c1 + 2.0*c2)*lambdad + c2*etad)/omega; continue; } f_array[i] = 1.0 - ((1.0 - c1 - 2.0*c2)*lambdae + (c1 + 2.0*c2)*lambdad + c2*etad)/omega; continue; } //occulting star partly occults the source and crosses the limb: //if((d > 0.5 + fabs(p - 0.5) && d < 1.0 + p) || (p > 0.5 && d > fabs(1.0 - p)*1.0001 \ //&& d < p)) //the factor of 1.0001 is from the Mandel/Agol Fortran routine, but gave bad output for d near fabs(1-p) if((d > 0.5 + fabs(p - 0.5) && d < 1.0 + p) || (p > 0.5 && d > fabs(1.0 - p) \ && d < p)) { //printf("zone 3.1\n"); double q = sqrt((1.0 - x1)/4.0/d/p); double Kk = ellk(q); double Ek = ellec(q); double n = 1.0/x1 - 1.0; double Pk = ellpic_bulirsch(n, q); lambdad = 1.0/9.0/M_PI/sqrt(p*d)*(((1.0 - x2)*(2.0*x2 + \ x1 - 3.0) - 3.0*x3*(x2 - 2.0))*Kk + 4.0*p*d*(d*d + \ 7.0*p*p - 4.0)*Ek - 3.0*x3/x1*Pk); if(d < p) lambdad += 2.0/3.0; etad = 1.0/2.0/M_PI*(kap1 + p*p*(p*p + 2.0*d*d)*kap0 - \ (1.0 + 5.0*p*p + d*d)/4.0*sqrt((1.0 - x1)*(x2 - 1.0))); f_array[i] = 1.0 - ((1.0 - c1 - 2.0*c2)*lambdae + (c1 + 2.0*c2)*lambdad + c2*etad)/omega; continue; } //occulting star transits the source: if(p <= 1.0 && d <= (1.0 - p)) { etad = p*p/2.0*(p*p + 2.0*d*d); lambdae = p*p; //printf("zone 4.1\n"); double q = sqrt((x2 - x1)/(1.0 - x1)); double Kk = ellk(q); double Ek = ellec(q); double n = x2/x1 - 1.0; double Pk = ellpic_bulirsch(n, q); lambdad = 2.0/9.0/M_PI/sqrt(1.0 - x1)*((1.0 - 5.0*d*d + p*p + \ x3*x3)*Kk + (1.0 - x1)*(d*d + 7.0*p*p - 4.0)*Ek - 3.0*x3/x1*Pk); // edge of planet hits edge of star if(fabs(p + d - 1.0) <= tol) { lambdad = 2.0/3.0/M_PI*acos(1.0 - 2.0*p) - 4.0/9.0/M_PI* \ sqrt(p*(1.0 - p))*(3.0 + 2.0*p - 8.0*p*p); } if(d < p) lambdad += 2.0/3.0; } f_array[i] = 1.0 - ((1.0 - c1 - 2.0*c2)*lambdae + (c1 + 2.0*c2)*lambdad + c2*etad)/omega; } return PyArray_Return((PyArrayObject *)flux); } /* Computes the complete elliptical integral of the third kind using the algorithm of Bulirsch (1965): Bulirsch 1965, Numerische Mathematik, 7, 78 Bulirsch 1965, Numerische Mathematik, 7, 353 INPUTS: n,k - int(dtheta/((1-n*sin(theta)^2)*sqrt(1-k^2*sin(theta)^2)),0, pi/2) RESULT: The complete elliptical integral of the third kind -- translated from the ellpic_bulirsch.pro routine from EXOFAST (Eastman et al. 2013, PASP 125, 83) by Laura Kreidberg (7/22/15) */ inline double ellpic_bulirsch(double n, double k) { double kc = sqrt(1.-k*k); double p = sqrt(n + 1.); double m0 = 1.; double c = 1.; double d = 1./p; double e = kc; double f, g; int nit = 0; while(nit < 10000) { f = c; c = d/p + c; g = e/p; d = 2.*(f*g + d); p = g + p; g = m0; m0 = kc + m0; if(fabs(1.-kc/g) > 1.0e-8) { kc = 2.*sqrt(e); e = kc*m0; } else { return 0.5*M_PI*(c*m0+d)/(m0*(m0+p)); } nit++; } #ifndef _OPENACC printf("Convergence failure in ellpic_bulirsch\n"); #endif return 0; } inline double ellec(double k) { double m1, a1, a2, a3, a4, b1, b2, b3, b4, ee1, ee2, ellec; // Computes polynomial approximation for the complete elliptic // integral of the first kind (Hasting's approximation): m1 = 1.0 - k*k; a1 = 0.44325141463; a2 = 0.06260601220; a3 = 0.04757383546; a4 = 0.01736506451; b1 = 0.24998368310; b2 = 0.09200180037; b3 = 0.04069697526; b4 = 0.00526449639; ee1 = 1.0 + m1*(a1 + m1*(a2 + m1*(a3 + m1*a4))); ee2 = m1*(b1 + m1*(b2 + m1*(b3 + m1*b4)))*log(1.0/m1); ellec = ee1 + ee2; return ellec; } inline double ellk(double k) { double a0, a1, a2, a3, a4, b0, b1, b2, b3, b4, ellk, ek1, ek2, m1; // Computes polynomial approximation for the complete elliptic // integral of the second kind (Hasting's approximation): m1 = 1.0 - k*k; a0 = 1.38629436112; a1 = 0.09666344259; a2 = 0.03590092383; a3 = 0.03742563713; a4 = 0.01451196212; b0 = 0.5; b1 = 0.12498593597; b2 = 0.06880248576; b3 = 0.03328355346; b4 = 0.00441787012; ek1 = a0 + m1*(a1 + m1*(a2 + m1*(a3 + m1*a4))); ek2 = (b0 + m1*(b1 + m1*(b2 + m1*(b3 + m1*b4))))*log(m1); ellk = ek1 - ek2; return ellk; } static char _quadratic_ld_doc[] = "This extension module returns a limb darkened light curve for a quadratic stellar intensity profile."; static PyMethodDef _quadratic_ld_methods[] = { {"_quadratic_ld", _quadratic_ld, METH_VARARGS, _quadratic_ld_doc},{NULL}}; #if PY_MAJOR_VERSION >= 3 static struct PyModuleDef _quadratic_ld_module = { PyModuleDef_HEAD_INIT, "_quadratic_ld", _quadratic_ld_doc, -1, _quadratic_ld_methods }; PyMODINIT_FUNC PyInit__quadratic_ld(void) { PyObject* module = PyModule_Create(&_quadratic_ld_module); if(!module) { return NULL; } import_array(); return module; } #else void init_quadratic_ld(void) { Py_InitModule("_quadratic_ld", _quadratic_ld_methods); import_array(); } #endif
matvec_float.c
#include <stdio.h> #include <stdlib.h> #include <time.h> #include <sys/timeb.h> #include <malloc.h> #include <math.h> #define N_RUNS 20 #define N 10240 // read timer in second double read_timer() { struct timeb tm; ftime(&tm); return (double) tm.time + (double) tm.millitm / 1000.0; } //Create a matrix and a vector and fill with random numbers void init(float *matrix, float *vector) { for (int i = 0; i<N; i++) { for (int j = 0; j<N; j++) { matrix[i*N+j] = (float)rand()/(float)(RAND_MAX/10.0); } vector[i] = (float)rand()/(float)(RAND_MAX/10.0); } } void matvec_simd(float *matrix, float *vector, float *dest) { for (int i = 0; i<N; i++) { float tmp = 0; #pragma omp simd reduction(+: tmp) for (int j = 0; j<N; j++) { tmp += matrix[i*N+j] * vector[j]; } dest[i] = tmp; } } // Debug functions void matvec_serial(float *matrix, float *vector, float *dest) { for (int i = 0; i<N; i++) { float tmp = 0; for (int j = 0; j<N; j++) { tmp += matrix[i*N+j] * vector[j]; } dest[i] = tmp; } } void print_matrix(float *matrix) { for (int i = 0; i<8; i++) { printf("["); for (int j = 0; j<8; j++) { printf("%.2f ", matrix[i*N+j]); } puts("]"); } puts(""); } void print_vector(float *vector) { printf("["); for (int i = 0; i<8; i++) { printf("%.2f ", vector[i]); } puts("]"); } float check(float *A, float *B){ float difference = 0; for(int i = 0;i<N; i++){ difference += fabsf(A[i]- B[i]); } return difference; } int main(int argc, char **argv) { //Set everything up float *dest_vector = malloc(sizeof(float*)*N); float *serial_vector = malloc(sizeof(float*)*N); float *matrix = malloc(sizeof(float*)*N*N); float *vector = malloc(sizeof(float)*N); srand(time(NULL)); init(matrix, vector); //warming up matvec_simd(matrix, vector, dest_vector); double t = 0; double start = read_timer(); for (int i = 0; i<N_RUNS; i++) matvec_simd(matrix, vector, dest_vector); t += (read_timer() - start); double t_serial = 0; double start_serial = read_timer(); for (int i = 0; i<N_RUNS; i++) matvec_serial(matrix, vector, serial_vector); t_serial += (read_timer() - start_serial); print_matrix(matrix); print_vector(vector); puts("=\n"); print_vector(dest_vector); puts("---------------------------------"); print_vector(serial_vector); double gflops = ((2.0 * N) * N * N_RUNS) / (1.0e9 * t); double gflops_serial = ((2.0 * N) * N * N_RUNS) / (1.0e9 * t_serial); printf("==================================================================\n"); printf("Performance:\t\t\tRuntime (s)\t GFLOPS\n"); printf("------------------------------------------------------------------\n"); printf("Matrix-vector (SIMD):\t\t%4f\t%4f\n", t/N_RUNS, gflops); printf("Matrix-vector (Serial):\t\t%4f\t%4f\n", t_serial/N_RUNS, gflops_serial); printf("Correctness check: %f\n", check(dest_vector,serial_vector)); free(dest_vector); free(serial_vector); free(matrix); free(vector); return 0; }
GB_binop__bor_int8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__bor_int8) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__bor_int8) // A.*B function (eWiseMult): GB (_AemultB_03__bor_int8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__bor_int8) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((node)) // C+=B function (dense accum): GB (_Cdense_accumB__bor_int8) // C+=b function (dense accum): GB (_Cdense_accumb__bor_int8) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bor_int8) // C=scalar+B GB (_bind1st__bor_int8) // C=scalar+B' GB (_bind1st_tran__bor_int8) // C=A+scalar GB (_bind2nd__bor_int8) // C=A'+scalar GB (_bind2nd_tran__bor_int8) // C type: int8_t // A type: int8_t // B,b type: int8_t // BinaryOp: cij = (aij) | (bij) #define GB_ATYPE \ int8_t #define GB_BTYPE \ int8_t #define GB_CTYPE \ int8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int8_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x) | (y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BOR || GxB_NO_INT8 || GxB_NO_BOR_INT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__bor_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__bor_int8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__bor_int8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int8_t int8_t bwork = (*((int8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *restrict Cx = (int8_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((node)) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *restrict Cx = (int8_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__bor_int8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__bor_int8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__bor_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__bor_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__bor_int8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__bor_int8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *Cx = (int8_t *) Cx_output ; int8_t x = (*((int8_t *) x_input)) ; int8_t *Bx = (int8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; int8_t bij = Bx [p] ; Cx [p] = (x) | (bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__bor_int8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int8_t *Cx = (int8_t *) Cx_output ; int8_t *Ax = (int8_t *) Ax_input ; int8_t y = (*((int8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int8_t aij = Ax [p] ; Cx [p] = (aij) | (y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = Ax [pA] ; \ Cx [pC] = (x) | (aij) ; \ } GrB_Info GB (_bind1st_tran__bor_int8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t x = (*((const int8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = Ax [pA] ; \ Cx [pC] = (aij) | (y) ; \ } GrB_Info GB (_bind2nd_tran__bor_int8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t y = (*((const int8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unaryop__identity_int32_fp64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_int32_fp64 // op(A') function: GB_tran__identity_int32_fp64 // C type: int32_t // A type: double // cast: int32_t cij ; GB_CAST_SIGNED(cij,aij,32) // unaryop: cij = aij #define GB_ATYPE \ double #define GB_CTYPE \ int32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, aij) \ int32_t z ; GB_CAST_SIGNED(z,aij,32) ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT32 || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_int32_fp64 ( int32_t *Cx, // Cx and Ax may be aliased double *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_int32_fp64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
coordinate_common.h
/*! * Copyright 2018 by Contributors * \author Rory Mitchell */ #pragma once #include <algorithm> #include <string> #include <utility> #include <vector> #include <limits> #include "../common/random.h" namespace xgboost { namespace linear { /** * \brief Calculate change in weight for a given feature. Applies l1/l2 penalty normalised by the * number of training instances. * * \param sum_grad The sum gradient. * \param sum_hess The sum hess. * \param w The weight. * \param reg_alpha Unnormalised L1 penalty. * \param reg_lambda Unnormalised L2 penalty. * * \return The weight update. */ inline double CoordinateDelta(double sum_grad, double sum_hess, double w, double reg_alpha, double reg_lambda) { if (sum_hess < 1e-5f) return 0.0f; const double sum_grad_l2 = sum_grad + reg_lambda * w; const double sum_hess_l2 = sum_hess + reg_lambda; const double tmp = w - sum_grad_l2 / sum_hess_l2; if (tmp >= 0) { return std::max(-(sum_grad_l2 + reg_alpha) / sum_hess_l2, -w); } else { return std::min(-(sum_grad_l2 - reg_alpha) / sum_hess_l2, -w); } } /** * \brief Calculate update to bias. * * \param sum_grad The sum gradient. * \param sum_hess The sum hess. * * \return The weight update. */ inline double CoordinateDeltaBias(double sum_grad, double sum_hess) { return -sum_grad / sum_hess; } /** * \brief Get the gradient with respect to a single feature. * * \param group_idx Zero-based index of the group. * \param num_group Number of groups. * \param fidx The target feature. * \param gpair Gradients. * \param p_fmat The feature matrix. * * \return The gradient and diagonal Hessian entry for a given feature. */ inline std::pair<double, double> GetGradient(int group_idx, int num_group, int fidx, const std::vector<GradientPair> &gpair, DMatrix *p_fmat) { double sum_grad = 0.0, sum_hess = 0.0; auto iter = p_fmat->ColIterator(); while (iter->Next()) { auto &batch = iter->Value(); auto col = batch[fidx]; const auto ndata = static_cast<bst_omp_uint>(col.length); for (bst_omp_uint j = 0; j < ndata; ++j) { const bst_float v = col[j].fvalue; auto &p = gpair[col[j].index * num_group + group_idx]; if (p.GetHess() < 0.0f) continue; sum_grad += p.GetGrad() * v; sum_hess += p.GetHess() * v * v; } } return std::make_pair(sum_grad, sum_hess); } /** * \brief Get the gradient with respect to a single feature. Row-wise multithreaded. * * \param group_idx Zero-based index of the group. * \param num_group Number of groups. * \param fidx The target feature. * \param gpair Gradients. * \param p_fmat The feature matrix. * * \return The gradient and diagonal Hessian entry for a given feature. */ inline std::pair<double, double> GetGradientParallel(int group_idx, int num_group, int fidx, const std::vector<GradientPair> &gpair, DMatrix *p_fmat) { double sum_grad = 0.0, sum_hess = 0.0; auto iter = p_fmat->ColIterator(); while (iter->Next()) { auto &batch = iter->Value(); auto col = batch[fidx]; const auto ndata = static_cast<bst_omp_uint>(col.length); #pragma omp parallel for schedule(static) reduction(+ : sum_grad, sum_hess) for (bst_omp_uint j = 0; j < ndata; ++j) { const bst_float v = col[j].fvalue; auto &p = gpair[col[j].index * num_group + group_idx]; if (p.GetHess() < 0.0f) continue; sum_grad += p.GetGrad() * v; sum_hess += p.GetHess() * v * v; } } return std::make_pair(sum_grad, sum_hess); } /** * \brief Get the gradient with respect to the bias. Row-wise multithreaded. * * \param group_idx Zero-based index of the group. * \param num_group Number of groups. * \param gpair Gradients. * \param p_fmat The feature matrix. * * \return The gradient and diagonal Hessian entry for the bias. */ inline std::pair<double, double> GetBiasGradientParallel(int group_idx, int num_group, const std::vector<GradientPair> &gpair, DMatrix *p_fmat) { const RowSet &rowset = p_fmat->BufferedRowset(); double sum_grad = 0.0, sum_hess = 0.0; const auto ndata = static_cast<bst_omp_uint>(rowset.Size()); #pragma omp parallel for schedule(static) reduction(+ : sum_grad, sum_hess) for (bst_omp_uint i = 0; i < ndata; ++i) { auto &p = gpair[rowset[i] * num_group + group_idx]; if (p.GetHess() >= 0.0f) { sum_grad += p.GetGrad(); sum_hess += p.GetHess(); } } return std::make_pair(sum_grad, sum_hess); } /** * \brief Updates the gradient vector with respect to a change in weight. * * \param fidx The feature index. * \param group_idx Zero-based index of the group. * \param num_group Number of groups. * \param dw The change in weight. * \param in_gpair The gradient vector to be updated. * \param p_fmat The input feature matrix. */ inline void UpdateResidualParallel(int fidx, int group_idx, int num_group, float dw, std::vector<GradientPair> *in_gpair, DMatrix *p_fmat) { if (dw == 0.0f) return; auto iter = p_fmat->ColIterator(); while (iter->Next()) { auto &batch = iter->Value(); auto col = batch[fidx]; // update grad value const auto num_row = static_cast<bst_omp_uint>(col.length); #pragma omp parallel for schedule(static) for (bst_omp_uint j = 0; j < num_row; ++j) { GradientPair &p = (*in_gpair)[col[j].index * num_group + group_idx]; if (p.GetHess() < 0.0f) continue; p += GradientPair(p.GetHess() * col[j].fvalue * dw, 0); } } } /** * \brief Updates the gradient vector based on a change in the bias. * * \param group_idx Zero-based index of the group. * \param num_group Number of groups. * \param dbias The change in bias. * \param in_gpair The gradient vector to be updated. * \param p_fmat The input feature matrix. */ inline void UpdateBiasResidualParallel(int group_idx, int num_group, float dbias, std::vector<GradientPair> *in_gpair, DMatrix *p_fmat) { if (dbias == 0.0f) return; const RowSet &rowset = p_fmat->BufferedRowset(); const auto ndata = static_cast<bst_omp_uint>(p_fmat->Info().num_row_); #pragma omp parallel for schedule(static) for (bst_omp_uint i = 0; i < ndata; ++i) { GradientPair &g = (*in_gpair)[rowset[i] * num_group + group_idx]; if (g.GetHess() < 0.0f) continue; g += GradientPair(g.GetHess() * dbias, 0); } } /** * \brief Abstract class for stateful feature selection or ordering * in coordinate descent algorithms. */ class FeatureSelector { public: /*! \brief factory method */ static FeatureSelector *Create(int choice); /*! \brief virtual destructor */ virtual ~FeatureSelector() = default; /** * \brief Setting up the selector state prior to looping through features. * * \param model The model. * \param gpair The gpair. * \param p_fmat The feature matrix. * \param alpha Regularisation alpha. * \param lambda Regularisation lambda. * \param param A parameter with algorithm-dependent use. */ virtual void Setup(const gbm::GBLinearModel &model, const std::vector<GradientPair> &gpair, DMatrix *p_fmat, float alpha, float lambda, int param) {} /** * \brief Select next coordinate to update. * * \param iteration The iteration in a loop through features * \param model The model. * \param group_idx Zero-based index of the group. * \param gpair The gpair. * \param p_fmat The feature matrix. * \param alpha Regularisation alpha. * \param lambda Regularisation lambda. * * \return The index of the selected feature. -1 indicates none selected. */ virtual int NextFeature(int iteration, const gbm::GBLinearModel &model, int group_idx, const std::vector<GradientPair> &gpair, DMatrix *p_fmat, float alpha, float lambda) = 0; }; /** * \brief Deterministic selection by cycling through features one at a time. */ class CyclicFeatureSelector : public FeatureSelector { public: int NextFeature(int iteration, const gbm::GBLinearModel &model, int group_idx, const std::vector<GradientPair> &gpair, DMatrix *p_fmat, float alpha, float lambda) override { return iteration % model.param.num_feature; } }; /** * \brief Similar to Cyclyc but with random feature shuffling prior to each update. * \note Its randomness is controllable by setting a random seed. */ class ShuffleFeatureSelector : public FeatureSelector { public: void Setup(const gbm::GBLinearModel &model, const std::vector<GradientPair> &gpair, DMatrix *p_fmat, float alpha, float lambda, int param) override { if (feat_index_.size() == 0) { feat_index_.resize(model.param.num_feature); std::iota(feat_index_.begin(), feat_index_.end(), 0); } std::shuffle(feat_index_.begin(), feat_index_.end(), common::GlobalRandom()); } int NextFeature(int iteration, const gbm::GBLinearModel &model, int group_idx, const std::vector<GradientPair> &gpair, DMatrix *p_fmat, float alpha, float lambda) override { return feat_index_[iteration % model.param.num_feature]; } protected: std::vector<bst_uint> feat_index_; }; /** * \brief A random (with replacement) coordinate selector. * \note Its randomness is controllable by setting a random seed. */ class RandomFeatureSelector : public FeatureSelector { public: int NextFeature(int iteration, const gbm::GBLinearModel &model, int group_idx, const std::vector<GradientPair> &gpair, DMatrix *p_fmat, float alpha, float lambda) override { return common::GlobalRandom()() % model.param.num_feature; } }; /** * \brief Select coordinate with the greatest gradient magnitude. * \note It has O(num_feature^2) complexity. It is fully deterministic. * * \note It allows restricting the selection to top_k features per group with * the largest magnitude of univariate weight change, by passing the top_k value * through the `param` argument of Setup(). That would reduce the complexity to * O(num_feature*top_k). */ class GreedyFeatureSelector : public FeatureSelector { public: void Setup(const gbm::GBLinearModel &model, const std::vector<GradientPair> &gpair, DMatrix *p_fmat, float alpha, float lambda, int param) override { top_k_ = static_cast<bst_uint>(param); const bst_uint ngroup = model.param.num_output_group; if (param <= 0) top_k_ = std::numeric_limits<bst_uint>::max(); if (counter_.size() == 0) { counter_.resize(ngroup); gpair_sums_.resize(model.param.num_feature * ngroup); } for (bst_uint gid = 0u; gid < ngroup; ++gid) { counter_[gid] = 0u; } } int NextFeature(int iteration, const gbm::GBLinearModel &model, int group_idx, const std::vector<GradientPair> &gpair, DMatrix *p_fmat, float alpha, float lambda) override { // k-th selected feature for a group auto k = counter_[group_idx]++; // stop after either reaching top-K or going through all the features in a group if (k >= top_k_ || counter_[group_idx] == model.param.num_feature) return -1; const int ngroup = model.param.num_output_group; const bst_omp_uint nfeat = model.param.num_feature; // Calculate univariate gradient sums std::fill(gpair_sums_.begin(), gpair_sums_.end(), std::make_pair(0., 0.)); auto iter = p_fmat->ColIterator(); while (iter->Next()) { auto &batch = iter->Value(); #pragma omp parallel for schedule(static) for (bst_omp_uint i = 0; i < nfeat; ++i) { const auto col = batch[i]; const bst_uint ndata = col.length; auto &sums = gpair_sums_[group_idx * nfeat + i]; for (bst_uint j = 0u; j < ndata; ++j) { const bst_float v = col[j].fvalue; auto &p = gpair[col[j].index * ngroup + group_idx]; if (p.GetHess() < 0.f) continue; sums.first += p.GetGrad() * v; sums.second += p.GetHess() * v * v; } } } // Find a feature with the largest magnitude of weight change int best_fidx = 0; double best_weight_update = 0.0f; for (bst_omp_uint fidx = 0; fidx < nfeat; ++fidx) { auto &s = gpair_sums_[group_idx * nfeat + fidx]; float dw = std::abs(static_cast<bst_float>( CoordinateDelta(s.first, s.second, model[fidx][group_idx], alpha, lambda))); if (dw > best_weight_update) { best_weight_update = dw; best_fidx = fidx; } } return best_fidx; } protected: bst_uint top_k_; std::vector<bst_uint> counter_; std::vector<std::pair<double, double>> gpair_sums_; }; /** * \brief Thrifty, approximately-greedy feature selector. * * \note Prior to cyclic updates, reorders features in descending magnitude of * their univariate weight changes. This operation is multithreaded and is a * linear complexity approximation of the quadratic greedy selection. * * \note It allows restricting the selection to top_k features per group with * the largest magnitude of univariate weight change, by passing the top_k value * through the `param` argument of Setup(). */ class ThriftyFeatureSelector : public FeatureSelector { public: void Setup(const gbm::GBLinearModel &model, const std::vector<GradientPair> &gpair, DMatrix *p_fmat, float alpha, float lambda, int param) override { top_k_ = static_cast<bst_uint>(param); if (param <= 0) top_k_ = std::numeric_limits<bst_uint>::max(); const bst_uint ngroup = model.param.num_output_group; const bst_omp_uint nfeat = model.param.num_feature; if (deltaw_.size() == 0) { deltaw_.resize(nfeat * ngroup); sorted_idx_.resize(nfeat * ngroup); counter_.resize(ngroup); gpair_sums_.resize(nfeat * ngroup); } // Calculate univariate gradient sums std::fill(gpair_sums_.begin(), gpair_sums_.end(), std::make_pair(0., 0.)); auto iter = p_fmat->ColIterator(); while (iter->Next()) { auto &batch = iter->Value(); // column-parallel is usually faster than row-parallel #pragma omp parallel for schedule(static) for (bst_omp_uint i = 0; i < nfeat; ++i) { const auto col = batch[i]; const bst_uint ndata = col.length; for (bst_uint gid = 0u; gid < ngroup; ++gid) { auto &sums = gpair_sums_[gid * nfeat + i]; for (bst_uint j = 0u; j < ndata; ++j) { const bst_float v = col[j].fvalue; auto &p = gpair[col[j].index * ngroup + gid]; if (p.GetHess() < 0.f) continue; sums.first += p.GetGrad() * v; sums.second += p.GetHess() * v * v; } } } } // rank by descending weight magnitude within the groups std::fill(deltaw_.begin(), deltaw_.end(), 0.f); std::iota(sorted_idx_.begin(), sorted_idx_.end(), 0); bst_float *pdeltaw = &deltaw_[0]; for (bst_uint gid = 0u; gid < ngroup; ++gid) { // Calculate univariate weight changes for (bst_omp_uint i = 0; i < nfeat; ++i) { auto ii = gid * nfeat + i; auto &s = gpair_sums_[ii]; deltaw_[ii] = static_cast<bst_float>(CoordinateDelta( s.first, s.second, model[i][gid], alpha, lambda)); } // sort in descending order of deltaw abs values auto start = sorted_idx_.begin() + gid * nfeat; std::sort(start, start + nfeat, [pdeltaw](size_t i, size_t j) { return std::abs(*(pdeltaw + i)) > std::abs(*(pdeltaw + j)); }); counter_[gid] = 0u; } } int NextFeature(int iteration, const gbm::GBLinearModel &model, int group_idx, const std::vector<GradientPair> &gpair, DMatrix *p_fmat, float alpha, float lambda) override { // k-th selected feature for a group auto k = counter_[group_idx]++; // stop after either reaching top-N or going through all the features in a group if (k >= top_k_ || counter_[group_idx] == model.param.num_feature) return -1; // note that sorted_idx stores the "long" indices const size_t grp_offset = group_idx * model.param.num_feature; return static_cast<int>(sorted_idx_[grp_offset + k] - grp_offset); } protected: bst_uint top_k_; std::vector<bst_float> deltaw_; std::vector<size_t> sorted_idx_; std::vector<bst_uint> counter_; std::vector<std::pair<double, double>> gpair_sums_; }; /** * \brief A set of available FeatureSelector's */ enum FeatureSelectorEnum { kCyclic = 0, kShuffle, kThrifty, kGreedy, kRandom }; inline FeatureSelector *FeatureSelector::Create(int choice) { switch (choice) { case kCyclic: return new CyclicFeatureSelector(); case kShuffle: return new ShuffleFeatureSelector(); case kThrifty: return new ThriftyFeatureSelector(); case kGreedy: return new GreedyFeatureSelector(); case kRandom: return new RandomFeatureSelector(); default: LOG(FATAL) << "unknown coordinate selector: " << choice; } return nullptr; } } // namespace linear } // namespace xgboost
critical.c
#include <omp.h> #include <stdio.h> main() { int x; x = 0; #pragma omp parallel shared(x) { #pragma omp critical x = x + 1; } /* end of parallel section */ printf("out of the parallel region : X = %d\n",x); }
sort.c
/* This file is part of ParTI!. ParTI! is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. ParTI! is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with ParTI!. If not, see <http://www.gnu.org/licenses/>. */ #include <assert.h> #include <math.h> #include <time.h> #include <HiParTI.h> #include "sptensor.h" static void pti_QuickSortIndex(ptiSparseTensor *tsr, ptiNnzIndex l, ptiNnzIndex r); static void pti_QuickSortIndexRowBlock(ptiSparseTensor *tsr, ptiNnzIndex l, ptiNnzIndex r, ptiElementIndex sk_bits); static void pti_QuickSortIndexMorton2D(ptiSparseTensor *tsr, ptiNnzIndex l, ptiNnzIndex r, ptiElementIndex sb_bits, ptiIndex * mode_order); static void pti_QuickSortIndexMorton3D(ptiSparseTensor *tsr, ptiNnzIndex l, ptiNnzIndex r, ptiElementIndex sb_bits); static void pti_QuickSortIndexMorton4D(ptiSparseTensor *tsr, ptiNnzIndex l, ptiNnzIndex r, ptiElementIndex sb_bits); static void pti_QuickSortIndexSingleMode(ptiSparseTensor *tsr, ptiNnzIndex l, ptiNnzIndex r, ptiIndex mode); static void pti_QuickSortIndexExceptSingleMode(ptiSparseTensor *tsr, ptiNnzIndex l, ptiNnzIndex r, ptiIndex * mode_order, ptiIndex * eleinds_buf); static void pti_QuickSortIndexExceptSingleModeRowBlock(ptiSparseTensor *tsr, ptiNnzIndex l, ptiNnzIndex r, ptiIndex * mode_order, ptiElementIndex sk_bits); static const uint32_t MASKS[] = {0x55555555, 0x33333333, 0x0F0F0F0F, 0x00FF00FF}; static const uint32_t SHIFTS[] = {1, 2, 4, 8}; /* Mode order: X -> Y -> Z, x indices are sorted, y and z are Morton order sorted. */ static const uint32_t morton256_z[256] = { 0x00000000, 0x00000001, 0x00000008, 0x00000009, 0x00000040, 0x00000041, 0x00000048, 0x00000049, 0x00000200, 0x00000201, 0x00000208, 0x00000209, 0x00000240, 0x00000241, 0x00000248, 0x00000249, 0x00001000, 0x00001001, 0x00001008, 0x00001009, 0x00001040, 0x00001041, 0x00001048, 0x00001049, 0x00001200, 0x00001201, 0x00001208, 0x00001209, 0x00001240, 0x00001241, 0x00001248, 0x00001249, 0x00008000, 0x00008001, 0x00008008, 0x00008009, 0x00008040, 0x00008041, 0x00008048, 0x00008049, 0x00008200, 0x00008201, 0x00008208, 0x00008209, 0x00008240, 0x00008241, 0x00008248, 0x00008249, 0x00009000, 0x00009001, 0x00009008, 0x00009009, 0x00009040, 0x00009041, 0x00009048, 0x00009049, 0x00009200, 0x00009201, 0x00009208, 0x00009209, 0x00009240, 0x00009241, 0x00009248, 0x00009249, 0x00040000, 0x00040001, 0x00040008, 0x00040009, 0x00040040, 0x00040041, 0x00040048, 0x00040049, 0x00040200, 0x00040201, 0x00040208, 0x00040209, 0x00040240, 0x00040241, 0x00040248, 0x00040249, 0x00041000, 0x00041001, 0x00041008, 0x00041009, 0x00041040, 0x00041041, 0x00041048, 0x00041049, 0x00041200, 0x00041201, 0x00041208, 0x00041209, 0x00041240, 0x00041241, 0x00041248, 0x00041249, 0x00048000, 0x00048001, 0x00048008, 0x00048009, 0x00048040, 0x00048041, 0x00048048, 0x00048049, 0x00048200, 0x00048201, 0x00048208, 0x00048209, 0x00048240, 0x00048241, 0x00048248, 0x00048249, 0x00049000, 0x00049001, 0x00049008, 0x00049009, 0x00049040, 0x00049041, 0x00049048, 0x00049049, 0x00049200, 0x00049201, 0x00049208, 0x00049209, 0x00049240, 0x00049241, 0x00049248, 0x00049249, 0x00200000, 0x00200001, 0x00200008, 0x00200009, 0x00200040, 0x00200041, 0x00200048, 0x00200049, 0x00200200, 0x00200201, 0x00200208, 0x00200209, 0x00200240, 0x00200241, 0x00200248, 0x00200249, 0x00201000, 0x00201001, 0x00201008, 0x00201009, 0x00201040, 0x00201041, 0x00201048, 0x00201049, 0x00201200, 0x00201201, 0x00201208, 0x00201209, 0x00201240, 0x00201241, 0x00201248, 0x00201249, 0x00208000, 0x00208001, 0x00208008, 0x00208009, 0x00208040, 0x00208041, 0x00208048, 0x00208049, 0x00208200, 0x00208201, 0x00208208, 0x00208209, 0x00208240, 0x00208241, 0x00208248, 0x00208249, 0x00209000, 0x00209001, 0x00209008, 0x00209009, 0x00209040, 0x00209041, 0x00209048, 0x00209049, 0x00209200, 0x00209201, 0x00209208, 0x00209209, 0x00209240, 0x00209241, 0x00209248, 0x00209249, 0x00240000, 0x00240001, 0x00240008, 0x00240009, 0x00240040, 0x00240041, 0x00240048, 0x00240049, 0x00240200, 0x00240201, 0x00240208, 0x00240209, 0x00240240, 0x00240241, 0x00240248, 0x00240249, 0x00241000, 0x00241001, 0x00241008, 0x00241009, 0x00241040, 0x00241041, 0x00241048, 0x00241049, 0x00241200, 0x00241201, 0x00241208, 0x00241209, 0x00241240, 0x00241241, 0x00241248, 0x00241249, 0x00248000, 0x00248001, 0x00248008, 0x00248009, 0x00248040, 0x00248041, 0x00248048, 0x00248049, 0x00248200, 0x00248201, 0x00248208, 0x00248209, 0x00248240, 0x00248241, 0x00248248, 0x00248249, 0x00249000, 0x00249001, 0x00249008, 0x00249009, 0x00249040, 0x00249041, 0x00249048, 0x00249049, 0x00249200, 0x00249201, 0x00249208, 0x00249209, 0x00249240, 0x00249241, 0x00249248, 0x00249249 }; // pre-shifted table for Y coordinates (1 bit to the left) static const uint32_t morton256_y[256] = { 0x00000000, 0x00000002, 0x00000010, 0x00000012, 0x00000080, 0x00000082, 0x00000090, 0x00000092, 0x00000400, 0x00000402, 0x00000410, 0x00000412, 0x00000480, 0x00000482, 0x00000490, 0x00000492, 0x00002000, 0x00002002, 0x00002010, 0x00002012, 0x00002080, 0x00002082, 0x00002090, 0x00002092, 0x00002400, 0x00002402, 0x00002410, 0x00002412, 0x00002480, 0x00002482, 0x00002490, 0x00002492, 0x00010000, 0x00010002, 0x00010010, 0x00010012, 0x00010080, 0x00010082, 0x00010090, 0x00010092, 0x00010400, 0x00010402, 0x00010410, 0x00010412, 0x00010480, 0x00010482, 0x00010490, 0x00010492, 0x00012000, 0x00012002, 0x00012010, 0x00012012, 0x00012080, 0x00012082, 0x00012090, 0x00012092, 0x00012400, 0x00012402, 0x00012410, 0x00012412, 0x00012480, 0x00012482, 0x00012490, 0x00012492, 0x00080000, 0x00080002, 0x00080010, 0x00080012, 0x00080080, 0x00080082, 0x00080090, 0x00080092, 0x00080400, 0x00080402, 0x00080410, 0x00080412, 0x00080480, 0x00080482, 0x00080490, 0x00080492, 0x00082000, 0x00082002, 0x00082010, 0x00082012, 0x00082080, 0x00082082, 0x00082090, 0x00082092, 0x00082400, 0x00082402, 0x00082410, 0x00082412, 0x00082480, 0x00082482, 0x00082490, 0x00082492, 0x00090000, 0x00090002, 0x00090010, 0x00090012, 0x00090080, 0x00090082, 0x00090090, 0x00090092, 0x00090400, 0x00090402, 0x00090410, 0x00090412, 0x00090480, 0x00090482, 0x00090490, 0x00090492, 0x00092000, 0x00092002, 0x00092010, 0x00092012, 0x00092080, 0x00092082, 0x00092090, 0x00092092, 0x00092400, 0x00092402, 0x00092410, 0x00092412, 0x00092480, 0x00092482, 0x00092490, 0x00092492, 0x00400000, 0x00400002, 0x00400010, 0x00400012, 0x00400080, 0x00400082, 0x00400090, 0x00400092, 0x00400400, 0x00400402, 0x00400410, 0x00400412, 0x00400480, 0x00400482, 0x00400490, 0x00400492, 0x00402000, 0x00402002, 0x00402010, 0x00402012, 0x00402080, 0x00402082, 0x00402090, 0x00402092, 0x00402400, 0x00402402, 0x00402410, 0x00402412, 0x00402480, 0x00402482, 0x00402490, 0x00402492, 0x00410000, 0x00410002, 0x00410010, 0x00410012, 0x00410080, 0x00410082, 0x00410090, 0x00410092, 0x00410400, 0x00410402, 0x00410410, 0x00410412, 0x00410480, 0x00410482, 0x00410490, 0x00410492, 0x00412000, 0x00412002, 0x00412010, 0x00412012, 0x00412080, 0x00412082, 0x00412090, 0x00412092, 0x00412400, 0x00412402, 0x00412410, 0x00412412, 0x00412480, 0x00412482, 0x00412490, 0x00412492, 0x00480000, 0x00480002, 0x00480010, 0x00480012, 0x00480080, 0x00480082, 0x00480090, 0x00480092, 0x00480400, 0x00480402, 0x00480410, 0x00480412, 0x00480480, 0x00480482, 0x00480490, 0x00480492, 0x00482000, 0x00482002, 0x00482010, 0x00482012, 0x00482080, 0x00482082, 0x00482090, 0x00482092, 0x00482400, 0x00482402, 0x00482410, 0x00482412, 0x00482480, 0x00482482, 0x00482490, 0x00482492, 0x00490000, 0x00490002, 0x00490010, 0x00490012, 0x00490080, 0x00490082, 0x00490090, 0x00490092, 0x00490400, 0x00490402, 0x00490410, 0x00490412, 0x00490480, 0x00490482, 0x00490490, 0x00490492, 0x00492000, 0x00492002, 0x00492010, 0x00492012, 0x00492080, 0x00492082, 0x00492090, 0x00492092, 0x00492400, 0x00492402, 0x00492410, 0x00492412, 0x00492480, 0x00492482, 0x00492490, 0x00492492 }; // Pre-shifted table for x (2 bits to the left) static const uint32_t morton256_x[256] = { 0x00000000, 0x00000004, 0x00000020, 0x00000024, 0x00000100, 0x00000104, 0x00000120, 0x00000124, 0x00000800, 0x00000804, 0x00000820, 0x00000824, 0x00000900, 0x00000904, 0x00000920, 0x00000924, 0x00004000, 0x00004004, 0x00004020, 0x00004024, 0x00004100, 0x00004104, 0x00004120, 0x00004124, 0x00004800, 0x00004804, 0x00004820, 0x00004824, 0x00004900, 0x00004904, 0x00004920, 0x00004924, 0x00020000, 0x00020004, 0x00020020, 0x00020024, 0x00020100, 0x00020104, 0x00020120, 0x00020124, 0x00020800, 0x00020804, 0x00020820, 0x00020824, 0x00020900, 0x00020904, 0x00020920, 0x00020924, 0x00024000, 0x00024004, 0x00024020, 0x00024024, 0x00024100, 0x00024104, 0x00024120, 0x00024124, 0x00024800, 0x00024804, 0x00024820, 0x00024824, 0x00024900, 0x00024904, 0x00024920, 0x00024924, 0x00100000, 0x00100004, 0x00100020, 0x00100024, 0x00100100, 0x00100104, 0x00100120, 0x00100124, 0x00100800, 0x00100804, 0x00100820, 0x00100824, 0x00100900, 0x00100904, 0x00100920, 0x00100924, 0x00104000, 0x00104004, 0x00104020, 0x00104024, 0x00104100, 0x00104104, 0x00104120, 0x00104124, 0x00104800, 0x00104804, 0x00104820, 0x00104824, 0x00104900, 0x00104904, 0x00104920, 0x00104924, 0x00120000, 0x00120004, 0x00120020, 0x00120024, 0x00120100, 0x00120104, 0x00120120, 0x00120124, 0x00120800, 0x00120804, 0x00120820, 0x00120824, 0x00120900, 0x00120904, 0x00120920, 0x00120924, 0x00124000, 0x00124004, 0x00124020, 0x00124024, 0x00124100, 0x00124104, 0x00124120, 0x00124124, 0x00124800, 0x00124804, 0x00124820, 0x00124824, 0x00124900, 0x00124904, 0x00124920, 0x00124924, 0x00800000, 0x00800004, 0x00800020, 0x00800024, 0x00800100, 0x00800104, 0x00800120, 0x00800124, 0x00800800, 0x00800804, 0x00800820, 0x00800824, 0x00800900, 0x00800904, 0x00800920, 0x00800924, 0x00804000, 0x00804004, 0x00804020, 0x00804024, 0x00804100, 0x00804104, 0x00804120, 0x00804124, 0x00804800, 0x00804804, 0x00804820, 0x00804824, 0x00804900, 0x00804904, 0x00804920, 0x00804924, 0x00820000, 0x00820004, 0x00820020, 0x00820024, 0x00820100, 0x00820104, 0x00820120, 0x00820124, 0x00820800, 0x00820804, 0x00820820, 0x00820824, 0x00820900, 0x00820904, 0x00820920, 0x00820924, 0x00824000, 0x00824004, 0x00824020, 0x00824024, 0x00824100, 0x00824104, 0x00824120, 0x00824124, 0x00824800, 0x00824804, 0x00824820, 0x00824824, 0x00824900, 0x00824904, 0x00824920, 0x00824924, 0x00900000, 0x00900004, 0x00900020, 0x00900024, 0x00900100, 0x00900104, 0x00900120, 0x00900124, 0x00900800, 0x00900804, 0x00900820, 0x00900824, 0x00900900, 0x00900904, 0x00900920, 0x00900924, 0x00904000, 0x00904004, 0x00904020, 0x00904024, 0x00904100, 0x00904104, 0x00904120, 0x00904124, 0x00904800, 0x00904804, 0x00904820, 0x00904824, 0x00904900, 0x00904904, 0x00904920, 0x00904924, 0x00920000, 0x00920004, 0x00920020, 0x00920024, 0x00920100, 0x00920104, 0x00920120, 0x00920124, 0x00920800, 0x00920804, 0x00920820, 0x00920824, 0x00920900, 0x00920904, 0x00920920, 0x00920924, 0x00924000, 0x00924004, 0x00924020, 0x00924024, 0x00924100, 0x00924104, 0x00924120, 0x00924124, 0x00924800, 0x00924804, 0x00924820, 0x00924824, 0x00924900, 0x00924904, 0x00924920, 0x00924924 }; void pti_SwapValues(ptiSparseTensor *tsr, ptiNnzIndex ind1, ptiNnzIndex ind2) { for(ptiIndex i = 0; i < tsr->nmodes; ++i) { ptiIndex eleind1 = tsr->inds[i].data[ind1]; tsr->inds[i].data[ind1] = tsr->inds[i].data[ind2]; tsr->inds[i].data[ind2] = eleind1; } ptiValue val1 = tsr->values.data[ind1]; tsr->values.data[ind1] = tsr->values.data[ind2]; tsr->values.data[ind2] = val1; } /**************************** * Functions to determine mode order ****************************/ /** * Determine the best mode order. Sort order: [mode, (ordered by increasing dimension sizes)] * * @param[out] mode_order a pointer to the array to be filled, * @param[in] mode mode to do product * @param[in] ndims tensor dimension sizes * @param[in] nmodes tensor order * */ void ptiGetBestModeOrder( ptiIndex * mode_order, ptiIndex const mode, ptiIndex const * ndims, ptiIndex const nmodes) { ptiKeyValuePair * sorted_ndims = (ptiKeyValuePair*)malloc(nmodes * sizeof(*sorted_ndims)); for(ptiIndex m=0; m<nmodes; ++m) { sorted_ndims[m].key = m; sorted_ndims[m].value = ndims[m]; } /* Increasingly sort */ ptiPairArraySort(sorted_ndims, nmodes); for(ptiIndex m=0; m<nmodes; ++m) { mode_order[m] = sorted_ndims[m].key; } /* Find the location of mode */ ptiIndex mode_loc = 0; for(ptiIndex m=0; m<nmodes; ++m) { if(mode_order[m] == mode) { mode_loc = m; } } /* Shift mode to moder_order[0] */ if(mode_loc != 0) { for(ptiIndex m=mode_loc; m>=1; --m) { mode_order[m] = mode_order[m-1]; } mode_order[0] = mode; } free(sorted_ndims); } /** * Determine the worst mode order. Sort order: [(ordered by decreasing dimension sizes)] * * @param[out] mode_order a pointer to the array to be filled, * @param[in] mode mode to do product * @param[in] ndims tensor dimension sizes * @param[in] nmodes tensor order * */ void ptiGetWorstModeOrder( ptiIndex * mode_order, ptiIndex const mode, ptiIndex const * ndims, ptiIndex const nmodes) { ptiKeyValuePair * sorted_ndims = (ptiKeyValuePair*)malloc(nmodes * sizeof(*sorted_ndims)); for(ptiIndex m=0; m<nmodes; ++m) { sorted_ndims[m].key = m; sorted_ndims[m].value = ndims[m]; } /* Increasingly sort */ ptiPairArraySort(sorted_ndims, nmodes); for(ptiIndex m=0; m<nmodes; ++m) { mode_order[m] = sorted_ndims[nmodes - 1 - m].key; } /* Find the location of mode */ ptiIndex mode_loc = 0; for(ptiIndex m=0; m<nmodes; ++m) { if(mode_order[m] == mode) { mode_loc = m; } } /* Shift mode to moder_order[0] */ if(mode_loc != nmodes - 1) { for(ptiIndex m=mode_loc; m<nmodes; ++m) { mode_order[m] = mode_order[m+1]; } mode_order[nmodes - 1] = mode; } free(sorted_ndims); } /** * Sort COO sparse tensor by Z-Morton order. (The same with "ptiPreprocessSparseTensor" function in "convert.c" without setting kschr.) * Kernels in Row-major order, blocks and elements are in Z-Morton order. * @param tsr a pointer to a sparse tensor * @return mode pointers */ int ptiSparseTensorMixedOrder( ptiSparseTensor *tsr, const ptiElementIndex sb_bits, const ptiElementIndex sk_bits, int const tk) { ptiNnzIndex nnz = tsr->nnz; int result; /* Sort tsr in a Row-major Block order to get all kernels. Not use Morton-order for kernels: 1. better support for higher-order tensors by limiting kernel size, because Morton key bit <= 128; */ ptiSparseTensorSortIndexRowBlock(tsr, 1, 0, nnz, sk_bits, tk); ptiNnzIndexVector kptr, knnzs; result = ptiNewNnzIndexVector(&kptr, 0, 0); pti_CheckError(result, "HiSpTns New", NULL); result = ptiSetKernelPointers(&kptr, &knnzs, tsr, sk_bits); pti_CheckError(result, "HiSpTns Preprocess", NULL); /* Sort blocks in each kernel in Morton-order */ ptiNnzIndex k_begin, k_end; /* Loop for all kernels, 0-kptr.len for OMP code */ for(ptiNnzIndex k=0; k<kptr.len - 1; ++k) { k_begin = kptr.data[k]; k_end = kptr.data[k+1]; // exclusive /* Sort blocks in each kernel in Morton-order */ ptiSparseTensorSortIndexMorton(tsr, 1, k_begin, k_end, sb_bits, tk); } return 0; } /** * Sort COO sparse tensor by plain blocked order for modes except mode-n. Blocks are in Row-major order. * @param tsr a pointer to a sparse tensor * @return mode pointers */ int ptiSparseTensorSortPartialIndex( ptiSparseTensor *tsr, ptiIndex const * mode_order, const ptiElementIndex sb_bits, int const tk) { ptiNnzIndex nnz = tsr->nnz; ptiIndex * ndims = tsr->ndims; ptiIndex const mode = mode_order[0]; int result; ptiSparseTensorSortIndexCustomOrder(tsr, mode_order, 1, tk); ptiNnzIndexVector ptir; result = ptiNewNnzIndexVector(&ptir, 0, 0); pti_CheckError(result, "HiSpTns New", NULL); ptiNnzIndex slice_nnz = 0; ptiIndex pre_idx = tsr->inds[mode].data[0]; result = ptiAppendNnzIndexVector(&ptir, 0); for (ptiNnzIndex z = 0; z < nnz; ++z ) { ++ slice_nnz; if (tsr->inds[mode].data[z] > pre_idx ) { result = ptiAppendNnzIndexVector(&ptir, slice_nnz-1); pre_idx = tsr->inds[mode].data[z]; } } result = ptiAppendNnzIndexVector(&ptir, nnz); ptiDumpNnzIndexVector(&ptir, stdout); ptiNnzIndex s_begin, s_end; // Loop for slices for(ptiNnzIndex s = 0; s < ndims[mode]; ++ s) { s_begin = ptir.data[s]; s_end = ptir.data[s+1]; // exclusive /* Sort blocks in each kernel in plain row-order */ ptiSparseTensorSortIndexRowBlock(tsr, 1, s_begin, s_end, sb_bits, tk); } return 0; } /** * Randomly shuffle all nonzeros. * * @param[in] tsr tensor to be shuffled * */ void ptiGetRandomShuffleElements(ptiSparseTensor *tsr) { ptiNnzIndex const nnz = tsr->nnz; for(ptiNnzIndex z=0; z<nnz; ++z) { srand(z+1); ptiValue rand_val = (ptiValue) rand() / (ptiValue) RAND_MAX; ptiNnzIndex new_loc = (ptiNnzIndex) ( rand_val * nnz ) % nnz; pti_SwapValues(tsr, z, new_loc); } } /** * Randomly shuffle all indices. * * @param[in] tsr tensor to be shuffled * @param[out] map_inds records the randomly generated mapping * */ void ptiGetRandomShuffledIndices(ptiSparseTensor *tsr, ptiIndex ** map_inds) { /* Get randomly renumbering indices */ for(ptiIndex m = 0; m < tsr->nmodes; ++m) { ptiIndex dim_len = tsr->ndims[m]; for(long int i = dim_len - 1; i > 0; --i) { srand(m+i+1+time(NULL)); ptiIndex new_loc = (ptiIndex) (rand() % (i+1)); /* Swap i <-> new_loc */ ptiIndex tmp = map_inds[m][i]; map_inds[m][i] = map_inds[m][new_loc]; map_inds[m][new_loc] = tmp; } } } /**************************** * Sorting functions ****************************/ /** * Reorder the elements in a COO sparse tensor lexicographically, sorting by Morton-order. * @param hitsr the sparse tensor to operate on */ void ptiSparseTensorSortIndexMorton( ptiSparseTensor *tsr, int force, ptiNnzIndex begin, ptiNnzIndex end, ptiElementIndex sb_bits, int tk) { size_t m; int needsort = 0; for(m = 0; m < tsr->nmodes; ++m) { if(tsr->sortorder[m] != m) { tsr->sortorder[m] = m; needsort = 1; } } if(needsort || force) { #pragma omp parallel num_threads(tk) { #pragma omp single nowait { /* TODO: add support for other order tensors */ switch(tsr->nmodes) { case 3: pti_QuickSortIndexMorton3D(tsr, begin, end, sb_bits); break; case 4: pti_QuickSortIndexMorton4D(tsr, begin, end, sb_bits); break; default: printf("No support for more than 4th-order tensors yet.\n"); } } } } } /** * Reorder the elements in a COO sparse tensor lexicographically, sorting by row major order. * @param tsr the sparse tensor to operate on */ void ptiSparseTensorSortIndexExceptSingleModeRowBlock( ptiSparseTensor *tsr, int force, ptiNnzIndex begin, ptiNnzIndex end, ptiIndex * const mode_order, ptiElementIndex sk_bits, int tk) { size_t m; int needsort = 0; for(m = 0; m < tsr->nmodes; ++m) { if(tsr->sortorder[m] != m) { tsr->sortorder[m] = m; needsort = 1; } } if(needsort || force) { #pragma omp parallel num_threads(tk) { #pragma omp single nowait { pti_QuickSortIndexExceptSingleModeRowBlock(tsr, begin, end, mode_order, sk_bits); } } } } /** * Reorder the elements in a COO sparse tensor lexicographically, sorting by row major order. * @param tsr the sparse tensor to operate on */ void ptiSparseTensorSortIndexRowBlock( ptiSparseTensor *tsr, int force, ptiNnzIndex begin, ptiNnzIndex end, ptiElementIndex sk_bits, int tk) { size_t m; int needsort = 0; for(m = 0; m < tsr->nmodes; ++m) { if(tsr->sortorder[m] != m) { tsr->sortorder[m] = m; needsort = 1; } } if(needsort || force) { #pragma omp parallel num_threads(tk) { #pragma omp single nowait { pti_QuickSortIndexRowBlock(tsr, begin, end, sk_bits); } } } } /** * Reorder the elements in a sparse tensor lexicographically, sorting only one mode. * @param tsr the sparse tensor to operate on */ void ptiSparseTensorSortIndexSingleMode(ptiSparseTensor *tsr, int force, ptiIndex mode, int tk) { ptiIndex m; int needsort = 0; for(m = 0; m < tsr->nmodes; ++m) { if(tsr->sortorder[m] != m) { tsr->sortorder[m] = m; needsort = 1; } } if(needsort || force) { #pragma omp parallel num_threads(tk) { #pragma omp single nowait { pti_QuickSortIndexSingleMode(tsr, 0, tsr->nnz, mode); } } } } /** * Reorder the elements in a sparse tensor lexicographically, sorting all modes except one. The except mode is NOT ordered. * @param tsr the sparse tensor to operate on */ void ptiSparseTensorSortIndexExceptSingleMode(ptiSparseTensor *tsr, int force, ptiIndex * mode_order, int tk) { ptiIndex m; int needsort = 0; ptiIndex * eleinds_buf = NULL; for(m = 0; m < tsr->nmodes; ++m) { if(tsr->sortorder[m] != m) { tsr->sortorder[m] = m; needsort = 1; } } if(needsort || force) { #pragma omp parallel num_threads(tk) { #pragma omp single nowait { pti_QuickSortIndexExceptSingleMode(tsr, 0, tsr->nnz, mode_order, eleinds_buf); } } } } /** * Reorder the elements in a sparse tensor lexicographically, sorting all modes except one. The except mode is NOT ordered. * @param tsr the sparse tensor to operate on */ void ptiSparseTensorSortIndexExceptSingleModeMorton(ptiSparseTensor *tsr, int force, ptiIndex * mode_order, ptiElementIndex sb_bits, int tk) { ptiIndex m; int needsort = 0; for(m = 0; m < tsr->nmodes; ++m) { if(tsr->sortorder[m] != m) { tsr->sortorder[m] = m; needsort = 1; } } if(needsort || force) { #pragma omp parallel num_threads(tk) { #pragma omp single nowait { switch(tsr->nmodes) { case 3: pti_QuickSortIndexMorton2D(tsr, 0, tsr->nnz, sb_bits, mode_order); break; case 4: // pti_QuickSortIndexMorton3D(tsr, 0, tsr->nnz, sb_bits, mode_order); break; default: printf("No support for more than 4th-order tensors yet.\n"); } } } } } /** * Reorder the elements in a sparse tensor lexicographically in a customized order. * @param tsr the sparse tensor to operate on */ void ptiSparseTensorSortIndexCustomOrder(ptiSparseTensor *tsr, ptiIndex const * mode_order, int force, int tk) { ptiIndex nmodes = tsr->nmodes; ptiIndex m; ptiSparseTensor tsr_temp; // Only copy pointers, not real data. if(!force && memcmp(tsr->sortorder, mode_order, nmodes * sizeof (ptiIndex)) == 0) { return; } tsr_temp.nmodes = nmodes; tsr_temp.sortorder = tsr->sortorder; tsr_temp.ndims = malloc(nmodes * sizeof tsr_temp.ndims[0]); tsr_temp.nnz = tsr->nnz; tsr_temp.inds = malloc(nmodes * sizeof tsr_temp.inds[0]); tsr_temp.values = tsr->values; for(m = 0; m < nmodes; ++m) { tsr_temp.ndims[m] = tsr->ndims[mode_order[m]]; tsr_temp.inds[m] = tsr->inds[mode_order[m]]; } ptiSparseTensorSortIndex(&tsr_temp, 1, tk); free(tsr_temp.inds); free(tsr_temp.ndims); for(m = 0; m < nmodes; ++m) { tsr->sortorder[m] = mode_order[m]; } } /** * Reorder the elements in a sparse tensor lexicographically * @param tsr the sparse tensor to operate on */ void ptiSparseTensorSortIndex(ptiSparseTensor *tsr, int force, int tk) { ptiIndex m; int needsort = 0; for(m = 0; m < tsr->nmodes; ++m) { if(tsr->sortorder[m] != m) { tsr->sortorder[m] = m; needsort = 1; } } if(needsort || force) { #pragma omp parallel num_threads(tk) { #pragma omp single nowait { pti_QuickSortIndex(tsr, 0, tsr->nnz); } } } } /**************************** * Comparison functions ****************************/ /** * compare two indices from two identical or distinct sparse tensors lexicographically * @param tsr1 the first sparse tensor * @param loc1 the order of the element in the first sparse tensor whose index is to be compared * @param tsr2 the second sparse tensor * @param loc2 the order of the element in the second sparse tensor whose index is to be compared * @return -1 for less, 0 for equal, 1 for greater */ int pti_SparseTensorCompareIndices(ptiSparseTensor * const tsr1, ptiNnzIndex loc1, ptiSparseTensor * const tsr2, ptiNnzIndex loc2) { ptiIndex i; assert(tsr1->nmodes == tsr2->nmodes); for(i = 0; i < tsr1->nmodes; ++i) { ptiIndex eleind1 = tsr1->inds[i].data[loc1]; ptiIndex eleind2 = tsr2->inds[i].data[loc2]; if(eleind1 < eleind2) { return -1; } else if(eleind1 > eleind2) { return 1; } } return 0; } /** * compare two indices from two identical or distinct sparse tensors lexicographically in all modes except mode * @param tsr1 the first sparse tensor * @param loc1 the order of the element in the first sparse tensor whose index is to be compared * @param tsr2 the second sparse tensor * @param loc2 the order of the element in the second sparse tensor whose index is to be compared * @param mode the mode to be excluded in comparison * @return -1 for less, 0 for equal, 1 for greater */ int pti_SparseTensorCompareIndicesExceptSingleMode(ptiSparseTensor * const tsr1, ptiNnzIndex loc1, ptiSparseTensor * const tsr2, ptiNnzIndex loc2, ptiIndex * const mode_order) { ptiIndex i, m; ptiIndex eleind1, eleind2; assert(tsr1->nmodes == tsr2->nmodes); for(i = 0; i < tsr1->nmodes - 1; ++ i) { m = mode_order[i]; eleind1 = tsr1->inds[m].data[loc1]; eleind2 = tsr2->inds[m].data[loc2]; if(eleind1 < eleind2) { return -1; } else if(eleind1 > eleind2) { return 1; } } #if 0 switch(tsr1->nmodes) { case 3: m = mode_order[0]; eleind1 = tsr1->inds[m].data[loc1]; eleind2 = tsr2->inds[m].data[loc2]; if(eleind1 < eleind2) { return -1; } else if(eleind1 > eleind2) { return 1; } m = mode_order[1]; eleind1 = tsr1->inds[m].data[loc1]; eleind2 = tsr2->inds[m].data[loc2]; if(eleind1 < eleind2) { return -1; } else if(eleind1 > eleind2) { return 1; } break; case 4: m = mode_order[0]; eleind1 = tsr1->inds[m].data[loc1]; eleind2 = tsr2->inds[m].data[loc2]; if(eleind1 < eleind2) { return -1; } else if(eleind1 > eleind2) { return 1; } m = mode_order[1]; eleind1 = tsr1->inds[m].data[loc1]; eleind2 = tsr2->inds[m].data[loc2]; if(eleind1 < eleind2) { return -1; } else if(eleind1 > eleind2) { return 1; } m = mode_order[2]; eleind1 = tsr1->inds[m].data[loc1]; eleind2 = tsr2->inds[m].data[loc2]; if(eleind1 < eleind2) { return -1; } else if(eleind1 > eleind2) { return 1; } break; default: for(i = 0; i < tsr1->nmodes - 1; ++ i) { m = mode_order[i]; eleind1 = tsr1->inds[m].data[loc1]; eleind2 = tsr2->inds[m].data[loc2]; if(eleind1 < eleind2) { return -1; } else if(eleind1 > eleind2) { return 1; } } } #endif return 0; } /** * compare two indices from two identical or distinct sparse tensors lexicographically in the num_ncmodes modes under the specified mode_order. * @param tsr1 the first sparse tensor * @param loc1 the order of the element in the first sparse tensor whose index is to be compared * @param tsr2 the second sparse tensor * @param loc2 the order of the element in the second sparse tensor whose index is to be compared * @param mode the mode to be excluded in comparison * @return -1 for less, 0 for equal, 1 for greater */ int pti_SparseTensorCompareIndicesCustomize(ptiSparseTensor * const tsr1, ptiNnzIndex loc1, ptiIndex * const mode_order_1, ptiSparseTensor * const tsr2, ptiNnzIndex loc2, ptiIndex * const mode_order_2, ptiIndex num_ncmodes) { ptiIndex i, m1, m2; ptiIndex eleind1, eleind2; for(i = 0; i < num_ncmodes; ++ i) { m1 = mode_order_1[i]; m2 = mode_order_2[i]; eleind1 = tsr1->inds[m1].data[loc1]; eleind2 = tsr2->inds[m2].data[loc2]; if(eleind1 < eleind2) { return -1; } else if(eleind1 > eleind2) { return 1; } } return 0; } int pti_SparseTensorCompareIndicesExceptSingleModeCantor(ptiSparseTensor * const tsr1, ptiNnzIndex loc1, ptiSparseTensor * const tsr2, ptiNnzIndex loc2, ptiIndex * const mode_order) { ptiIndex i, m; ptiIndex eleind1, eleind2; double val1, val2; double prods, presum; double invfactorials[7] = {0, 1.0, 1.0/2.0, 1.0/6.0, 1.0/24.0, 1.0/120.0, 1.0/720.0}; /*we memorize factorials*/ assert(tsr1->nmodes == tsr2->nmodes); assert(tsr1->nmodes <= 6); /*just so that we memorize only 6 factorials*/ // printf("loc1: %lu, loc2: %lu\n", loc1, loc2); val1 = presum = 0.0; for(i = 0; i < tsr1->nmodes - 1; ++ i) { m = mode_order[i]; eleind1 = tsr1->inds[m].data[loc1]; // printf("mode %u: eleind1: %u\n", m, eleind1); presum = presum + eleind1; prods = presum; for (ptiIndex jj = 1; jj < i+1; jj ++) prods = prods * (presum + jj); // printf("val1: presum: %lf, prods: %lf \n", presum, prods); val1 += invfactorials[i+1] * prods; } // printf("val1: %lf \n", val1); val2 = presum = 0.0; for(i = 0; i < tsr2->nmodes - 1; ++ i) { m = mode_order[i]; eleind2 = tsr2->inds[m].data[loc2]; // printf("mode %u: eleind2: %u\n", m, eleind2); presum = presum + eleind2; prods = presum; for (ptiIndex jj=1; jj < i+1; jj ++) prods = prods * (presum+jj); // printf("val2: presum: %lf, prods: %lf \n", presum, prods); val2 += invfactorials[i+1] * prods; } // printf("val2: %lf \n\n", val2); if(val1 < val2) return -1; else if (val1 > val2) return 1; else return 0; } /** * Comapre two index arrays lexicographically * @param inds1 the first indices to be compared * @param inds2 the second indices to be compared * @param len the length of both inds1 and inds2 * @return 1 for in the range; otherwise return -1. */ int pti_SparseTensorCompareIndicesRange(ptiSparseTensor * const tsr, ptiNnzIndex loc, ptiIndex * const inds1, ptiIndex * const inds2) { ptiIndex i; for(i = 0; i < tsr->nmodes; ++i) { ptiIndex eleind = tsr->inds[i].data[loc]; if(eleind < inds1[i] || eleind >= inds2[i]) { return -1; } } return 1; } /** * compare two indices from two identical or distinct sparse tensors lexicographically, using block index as keywords. Compare all modes except one. Also inter- and intra- blocks are both sorted. * @param tsr1 the first sparse tensor * @param loc1 the order of the element in the first sparse tensor whose index is to be compared * @param tsr2 the second sparse tensor * @param loc2 the order of the element in the second sparse tensor whose index is to be compared * @return -1 for less, 0 for equal, 1 for greater */ int pti_SparseTensorCompareIndicesExceptSingleModeRowBlock( ptiSparseTensor * const tsr1, ptiNnzIndex loc1, ptiSparseTensor * const tsr2, ptiNnzIndex loc2, ptiIndex * const mode_order, ptiElementIndex sk_bits) { ptiIndex i, m; assert(tsr1->nmodes == tsr2->nmodes); for(i = 0; i < tsr1->nmodes - 1; ++i) { m = mode_order[i]; ptiIndex eleind1 = tsr1->inds[m].data[loc1]; ptiIndex eleind2 = tsr2->inds[m].data[loc2]; ptiIndex blkind1 = eleind1 >> sk_bits; ptiIndex blkind2 = eleind2 >> sk_bits; if(blkind1 < blkind2) { return -1; } else if(blkind1 > blkind2) { return 1; } else if(eleind1 < eleind2) { // if blkind1 == blkind2 return -1; } else if(eleind1 > eleind2) { // if blkind1 == blkind2 return 1; } } return 0; } /** * compare two indices from two identical or distinct sparse tensors lexicographically, using block index as keywords. * @param tsr1 the first sparse tensor * @param loc1 the order of the element in the first sparse tensor whose index is to be compared * @param tsr2 the second sparse tensor * @param loc2 the order of the element in the second sparse tensor whose index is to be compared * @return -1 for less, 0 for equal, 1 for greater */ int pti_SparseTensorCompareIndicesRowBlock( ptiSparseTensor * const tsr1, ptiNnzIndex loc1, ptiSparseTensor * const tsr2, ptiNnzIndex loc2, ptiElementIndex sk_bits) { ptiIndex i; assert(tsr1->nmodes == tsr2->nmodes); for(i = 0; i < tsr1->nmodes; ++i) { ptiIndex eleind1 = tsr1->inds[i].data[loc1]; ptiIndex eleind2 = tsr2->inds[i].data[loc2]; ptiIndex blkind1 = eleind1 >> sk_bits; ptiIndex blkind2 = eleind2 >> sk_bits; // printf("blkind1: %lu, blkind2: %lu\n", blkind1, blkind2); if(blkind1 < blkind2) { return -1; } else if(blkind1 > blkind2) { return 1; } } return 0; } /** * compare two indices from two identical or distinct sparse tensors lexicographically, using 2D Z-Morton ordering recursively. * @param tsr1 the first sparse tensor * @param loc1 the order of the element in the first sparse tensor whose index is to be compared * @param tsr2 the second sparse tensor * @param loc2 the order of the element in the second sparse tensor whose index is to be compared * @return -1 for less, 0 for equal, 1 for greater */ int pti_SparseTensorCompareIndicesMorton2D( ptiSparseTensor * const tsr1, uint64_t loc1, ptiSparseTensor * const tsr2, uint64_t loc2, ptiIndex * mode_order, ptiElementIndex sb_bits) { assert(tsr1->nmodes == tsr2->nmodes); uint64_t mkey1 = 0, mkey2 = 0; /* Only support 3-D tensors, with 32-bit indices. */ uint32_t x1 = tsr1->inds[mode_order[0]].data[loc1]; uint32_t y1 = tsr1->inds[mode_order[1]].data[loc1]; uint32_t x2 = tsr2->inds[mode_order[0]].data[loc2]; uint32_t y2 = tsr2->inds[mode_order[1]].data[loc2]; /* Compare block indices */ ptiIndex blk_x1 = x1 >> sb_bits; ptiIndex blk_y1 = y1 >> sb_bits; ptiIndex blk_x2 = x2 >> sb_bits; ptiIndex blk_y2 = y2 >> sb_bits; if(blk_x1 < blk_x2) { return -1; } else if(blk_x1 > blk_x2) { return 1; } else if(blk_y1 < blk_y2) { // if blk_x1 == blk_x2 return -1; } else if(blk_y1 > blk_y2) { // if blk_x1 == blk_x2 return 1; } /* blk_x1 == blk_x2, blk_y1 == blk_y2, sort inside a block in Z-Morton order */ uint64_t x = x1 - (blk_x1 << sb_bits); uint64_t y = y1 - (blk_y1 << sb_bits); x = (x | (x << SHIFTS[3])) & MASKS[3]; x = (x | (x << SHIFTS[2])) & MASKS[2]; x = (x | (x << SHIFTS[1])) & MASKS[1]; x = (x | (x << SHIFTS[0])) & MASKS[0]; y = (y | (y << SHIFTS[3])) & MASKS[3]; y = (y | (y << SHIFTS[2])) & MASKS[2]; y = (y | (y << SHIFTS[1])) & MASKS[1]; y = (y | (y << SHIFTS[0])) & MASKS[0]; mkey1 = y | (x << 1); x = x2 - (blk_x2 << sb_bits); y = y2 - (blk_y2 << sb_bits); x = (x | (x << SHIFTS[3])) & MASKS[3]; x = (x | (x << SHIFTS[2])) & MASKS[2]; x = (x | (x << SHIFTS[1])) & MASKS[1]; x = (x | (x << SHIFTS[0])) & MASKS[0]; y = (y | (y << SHIFTS[3])) & MASKS[3]; y = (y | (y << SHIFTS[2])) & MASKS[2]; y = (y | (y << SHIFTS[1])) & MASKS[1]; y = (y | (y << SHIFTS[0])) & MASKS[0]; mkey2 = y | (x << 1); if(mkey1 < mkey2) { return -1; } else if(mkey1 > mkey2) { return 1; } else { return 0; } } /** * compare two indices from two identical or distinct sparse tensors lexicographically, using Z-Morton ordering recursively, freely support 3-D, 4-D for uint32_t indices. * When tensor order is large than 5, index ranges are limited. * @param tsr1 the first sparse tensor * @param loc1 the order of the element in the first sparse tensor whose index is to be compared * @param tsr2 the second sparse tensor * @param loc2 the order of the element in the second sparse tensor whose index is to be compared * @return -1 for less, 0 for equal, 1 for greater */ static int pti_SparseTensorCompareIndicesMorton3D( ptiSparseTensor * const tsr1, uint64_t loc1, ptiSparseTensor * const tsr2, uint64_t loc2) { ptiMortonIndex mkey1 = 0, mkey2 = 0; assert(tsr1->nmodes == tsr2->nmodes); /* Only support 3-D tensors, with 32-bit indices. */ uint32_t x1 = tsr1->inds[0].data[loc1]; uint32_t y1 = tsr1->inds[1].data[loc1]; uint32_t z1 = tsr1->inds[2].data[loc1]; uint32_t x2 = tsr2->inds[0].data[loc2]; uint32_t y2 = tsr2->inds[1].data[loc2]; uint32_t z2 = tsr2->inds[2].data[loc2]; mkey1 = morton256_z[(z1 >> 24) & 0xFF ] | morton256_y[(y1 >> 24) & 0xFF ] | morton256_x[(x1 >> 24) & 0xFF ]; mkey1 = mkey1 << 72 | morton256_z[(z1 >> 16) & 0xFF ] | morton256_y[(y1 >> 16) & 0xFF ] | morton256_x[(x1 >> 16) & 0xFF ]; mkey1 = mkey1 << 48 | morton256_z[(z1 >> 8) & 0xFF ] | morton256_y[(y1 >> 8) & 0xFF ] | morton256_x[(x1 >> 8) & 0xFF ]; mkey1 = mkey1 << 24 | morton256_z[(z1) & 0xFF ] | morton256_y[(y1) & 0xFF ] | morton256_x[(x1) & 0xFF ]; mkey2 = morton256_z[(z2 >> 24) & 0xFF ] | morton256_y[(y2 >> 24) & 0xFF ] | morton256_x[(x2 >> 24) & 0xFF ]; mkey2 = mkey2 << 72 | morton256_z[(z2 >> 16) & 0xFF ] | morton256_y[(y2 >> 16) & 0xFF ] | morton256_x[(x2 >> 16) & 0xFF ]; mkey2 = mkey2 << 48 | morton256_z[(z2 >> 8) & 0xFF ] | morton256_y[(y2 >> 8) & 0xFF ] | morton256_x[(x2 >> 8) & 0xFF ]; mkey2 = mkey2 << 24 | morton256_z[(z2) & 0xFF ] | morton256_y[(y2) & 0xFF ] | morton256_x[(x2) & 0xFF ]; if(mkey1 < mkey2) { return -1; } else if(mkey1 > mkey2) { return 1; } else { return 0; } } /** * compare two indices from two identical or distinct sparse tensors lexicographically, using Z-Morton ordering recursively, freely support arbitrary tensor orders. * @param tsr1 the first sparse tensor * @param loc1 the order of the element in the first sparse tensor whose index is to be compared * @param tsr2 the second sparse tensor * @param loc2 the order of the element in the second sparse tensor whose index is to be compared * @return -1 for less, 0 for equal, 1 for greater */ static int pti_SparseTensorCompareIndicesMorton4D( ptiSparseTensor * const tsr1, uint64_t loc1, ptiSparseTensor * const tsr2, uint64_t loc2) { ptiMortonIndex mkey1, mkey2; assert(tsr1->nmodes == tsr2->nmodes); /* Only support 3-D tensors, with 32-bit indices. */ uint32_t x1 = tsr1->inds[0].data[loc1]; uint32_t y1 = tsr1->inds[1].data[loc1]; uint32_t z1 = tsr1->inds[2].data[loc1]; uint32_t w1 = tsr1->inds[3].data[loc1]; uint32_t x2 = tsr2->inds[0].data[loc2]; uint32_t y2 = tsr2->inds[1].data[loc2]; uint32_t z2 = tsr2->inds[2].data[loc2]; uint32_t w2 = tsr2->inds[3].data[loc2]; static const uint64_t MASKS_64[]={0x5555555555555555, 0x3333333333333333, 0x0F0F0F0F0F0F0F0F, 0x00FF00FF00FF00FF, 0x0000FFFF0000FFFF}; static const uint64_t SHIFTS_64[]= {1, 2, 4, 8, 16}; static ptiMortonIndex MASKS_128[] = { (ptiMortonIndex)0x5555555555555555 << 64 | 0x5555555555555555, (ptiMortonIndex)0x3333333333333333 << 64 | 0x3333333333333333, (ptiMortonIndex)0x0F0F0F0F0F0F0F0F << 64 | 0x0F0F0F0F0F0F0F0F, (ptiMortonIndex)0x00FF00FF00FF00FF << 64 | 0x00FF00FF00FF00FF, (ptiMortonIndex)0x0000FFFF0000FFFF << 64 | 0x0000FFFF0000FFFF, (ptiMortonIndex)0x00000000FFFFFFFF << 64 | 0x00000000FFFFFFFF}; static const uint64_t SHIFTS_128[]= {1, 2, 4, 8, 16, 32}; // ptiMortonIndex tmp_mask = MASKS_128[2]; // printf("tmp_mask: high: %"PRIX64 " ; low: %"PRIX64 " .\n", (uint64_t)(tmp_mask >> 64), (uint64_t)tmp_mask); uint64_t tmp_64; ptiMortonIndex x, y, z, w; /**** compute mkey1 ****/ /* compute correct x, 32bit -> 64bit first */ tmp_64 = x1; tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[4])) & MASKS_64[4]; tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[3])) & MASKS_64[3]; tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[2])) & MASKS_64[2]; tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[1])) & MASKS_64[1]; tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[0])) & MASKS_64[0]; /* compute correct x, 64bit -> 128bit */ x = tmp_64; x = (x | (x << SHIFTS_128[5])) & MASKS_128[5]; x = (x | (x << SHIFTS_128[4])) & MASKS_128[4]; x = (x | (x << SHIFTS_128[3])) & MASKS_128[3]; x = (x | (x << SHIFTS_128[2])) & MASKS_128[2]; x = (x | (x << SHIFTS_128[1])) & MASKS_128[1]; x = (x | (x << SHIFTS_128[0])) & MASKS_128[0]; /* compute correct y, 32bit -> 64bit first */ tmp_64 = y1; tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[4])) & MASKS_64[4]; tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[3])) & MASKS_64[3]; tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[2])) & MASKS_64[2]; tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[1])) & MASKS_64[1]; tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[0])) & MASKS_64[0]; /* compute correct y, 64bit -> 128bit */ y = tmp_64; y = (y | (y << SHIFTS_128[5])) & MASKS_128[5]; y = (y | (y << SHIFTS_128[4])) & MASKS_128[4]; y = (y | (y << SHIFTS_128[3])) & MASKS_128[3]; y = (y | (y << SHIFTS_128[2])) & MASKS_128[2]; y = (y | (y << SHIFTS_128[1])) & MASKS_128[1]; y = (y | (y << SHIFTS_128[0])) & MASKS_128[0]; /* compute correct z, 32bit -> 64bit first */ tmp_64 = z1; tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[4])) & MASKS_64[4]; tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[3])) & MASKS_64[3]; tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[2])) & MASKS_64[2]; tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[1])) & MASKS_64[1]; tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[0])) & MASKS_64[0]; /* compute correct z, 64bit -> 128bit */ z = tmp_64; z = (z | (z << SHIFTS_128[5])) & MASKS_128[5]; z = (z | (z << SHIFTS_128[4])) & MASKS_128[4]; z = (z | (z << SHIFTS_128[3])) & MASKS_128[3]; z = (z | (z << SHIFTS_128[2])) & MASKS_128[2]; z = (z | (z << SHIFTS_128[1])) & MASKS_128[1]; z = (z | (z << SHIFTS_128[0])) & MASKS_128[0]; /* compute correct w, 32bit -> 64bit first */ tmp_64 = w1; tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[4])) & MASKS_64[4]; tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[3])) & MASKS_64[3]; tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[2])) & MASKS_64[2]; tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[1])) & MASKS_64[1]; tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[0])) & MASKS_64[0]; /* compute correct w, 64bit -> 128bit */ w = tmp_64; w = (w | (w << SHIFTS_128[5])) & MASKS_128[5]; w = (w | (w << SHIFTS_128[4])) & MASKS_128[4]; w = (w | (w << SHIFTS_128[3])) & MASKS_128[3]; w = (w | (w << SHIFTS_128[2])) & MASKS_128[2]; w = (w | (w << SHIFTS_128[1])) & MASKS_128[1]; w = (w | (w << SHIFTS_128[0])) & MASKS_128[0]; // mkey1 = x | (y << 1) | (z << 2) | (w << 3); mkey1 = w | (z << 1) | (y << 2) | (x << 3); /**** compute mkey2 ****/ /* compute correct x, 32bit -> 64bit first */ tmp_64 = x2; tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[4])) & MASKS_64[4]; tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[3])) & MASKS_64[3]; tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[2])) & MASKS_64[2]; tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[1])) & MASKS_64[1]; tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[0])) & MASKS_64[0]; /* compute correct x, 64bit -> 128bit */ x = tmp_64; x = (x | (x << SHIFTS_128[5])) & MASKS_128[5]; x = (x | (x << SHIFTS_128[4])) & MASKS_128[4]; x = (x | (x << SHIFTS_128[3])) & MASKS_128[3]; x = (x | (x << SHIFTS_128[2])) & MASKS_128[2]; x = (x | (x << SHIFTS_128[1])) & MASKS_128[1]; x = (x | (x << SHIFTS_128[0])) & MASKS_128[0]; /* compute correct y, 32bit -> 64bit first */ tmp_64 = y2; tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[4])) & MASKS_64[4]; tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[3])) & MASKS_64[3]; tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[2])) & MASKS_64[2]; tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[1])) & MASKS_64[1]; tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[0])) & MASKS_64[0]; /* compute correct y, 64bit -> 128bit */ y = tmp_64; y = (y | (y << SHIFTS_128[5])) & MASKS_128[5]; y = (y | (y << SHIFTS_128[4])) & MASKS_128[4]; y = (y | (y << SHIFTS_128[3])) & MASKS_128[3]; y = (y | (y << SHIFTS_128[2])) & MASKS_128[2]; y = (y | (y << SHIFTS_128[1])) & MASKS_128[1]; y = (y | (y << SHIFTS_128[0])) & MASKS_128[0]; /* compute correct z, 32bit -> 64bit first */ tmp_64 = z2; tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[4])) & MASKS_64[4]; tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[3])) & MASKS_64[3]; tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[2])) & MASKS_64[2]; tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[1])) & MASKS_64[1]; tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[0])) & MASKS_64[0]; /* compute correct z, 64bit -> 128bit */ z = tmp_64; z = (z | (z << SHIFTS_128[5])) & MASKS_128[5]; z = (z | (z << SHIFTS_128[4])) & MASKS_128[4]; z = (z | (z << SHIFTS_128[3])) & MASKS_128[3]; z = (z | (z << SHIFTS_128[2])) & MASKS_128[2]; z = (z | (z << SHIFTS_128[1])) & MASKS_128[1]; z = (z | (z << SHIFTS_128[0])) & MASKS_128[0]; /* compute correct w, 32bit -> 64bit first */ tmp_64 = w2; tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[4])) & MASKS_64[4]; tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[3])) & MASKS_64[3]; tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[2])) & MASKS_64[2]; tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[1])) & MASKS_64[1]; tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[0])) & MASKS_64[0]; /* compute correct w, 64bit -> 128bit */ w = tmp_64; w = (w | (w << SHIFTS_128[5])) & MASKS_128[5]; w = (w | (w << SHIFTS_128[4])) & MASKS_128[4]; w = (w | (w << SHIFTS_128[3])) & MASKS_128[3]; w = (w | (w << SHIFTS_128[2])) & MASKS_128[2]; w = (w | (w << SHIFTS_128[1])) & MASKS_128[1]; w = (w | (w << SHIFTS_128[0])) & MASKS_128[0]; mkey2 = w | (z << 1) | (y << 2) | (x << 3); if(mkey1 < mkey2) { return -1; } else if(mkey1 > mkey2) { return 1; } else { return 0; } } /**************************** * Quicksort functions ****************************/ static void pti_QuickSortIndexMorton2D(ptiSparseTensor *tsr, ptiNnzIndex l, ptiNnzIndex r, ptiElementIndex sb_bits, ptiIndex * mode_order) { uint64_t i, j, p; if(r-l < 2) { return; } p = (l+r) / 2; for(i = l, j = r-1; ; ++i, --j) { while(pti_SparseTensorCompareIndicesMorton2D(tsr, i, tsr, p, mode_order, sb_bits) < 0) { // printf("(%lu, %lu) result: %d\n", i, p, pti_SparseTensorCompareIndicesMorton3D(tsr, i, tsr, p)); ++i; } while(pti_SparseTensorCompareIndicesMorton2D(tsr, p, tsr, j, mode_order, sb_bits) < 0) { // printf("(%lu, %lu) result: %d\n", p, j,pti_SparseTensorCompareIndicesMorton3D(tsr, p, tsr, j)); --j; } if(i >= j) { break; } pti_SwapValues(tsr, i, j); if(i == p) { p = j; } else if(j == p) { p = i; } } #pragma omp task firstprivate(l,i) shared(tsr, sb_bits) { pti_QuickSortIndexMorton2D(tsr, l, i, sb_bits, mode_order); } pti_QuickSortIndexMorton2D(tsr, i, r, sb_bits, mode_order); #pragma omp taskwait } static void pti_QuickSortIndexMorton3D(ptiSparseTensor *tsr, ptiNnzIndex l, ptiNnzIndex r, ptiElementIndex sb_bits) { uint64_t i, j, p; if(r-l < 2) { return; } p = (l+r) / 2; for(i = l, j = r-1; ; ++i, --j) { while(pti_SparseTensorCompareIndicesMorton3D(tsr, i, tsr, p) < 0) { // printf("(%lu, %lu) result: %d\n", i, p, pti_SparseTensorCompareIndicesMorton3D(tsr, i, tsr, p)); ++i; } while(pti_SparseTensorCompareIndicesMorton3D(tsr, p, tsr, j) < 0) { // printf("(%lu, %lu) result: %d\n", p, j,pti_SparseTensorCompareIndicesMorton3D(tsr, p, tsr, j)); --j; } if(i >= j) { break; } pti_SwapValues(tsr, i, j); if(i == p) { p = j; } else if(j == p) { p = i; } } #pragma omp task firstprivate(l,i) shared(tsr, sb_bits) { pti_QuickSortIndexMorton3D(tsr, l, i, sb_bits); } pti_QuickSortIndexMorton3D(tsr, i, r, sb_bits); #pragma omp taskwait } static void pti_QuickSortIndexMorton4D(ptiSparseTensor *tsr, ptiNnzIndex l, ptiNnzIndex r, ptiElementIndex sb_bits) { uint64_t i, j, p; if(r-l < 2) { return; } p = (l+r) / 2; for(i = l, j = r-1; ; ++i, --j) { while(pti_SparseTensorCompareIndicesMorton4D(tsr, i, tsr, p) < 0) { // printf("(%lu, %lu) result: %d\n", i, p, pti_SparseTensorCompareIndicesMorton(tsr, i, tsr, p)); ++i; } while(pti_SparseTensorCompareIndicesMorton4D(tsr, p, tsr, j) < 0) { // printf("(%lu, %lu) result: %d\n", p, j,pti_SparseTensorCompareIndicesMorton(tsr, p, tsr, j)); --j; } if(i >= j) { break; } pti_SwapValues(tsr, i, j); if(i == p) { p = j; } else if(j == p) { p = i; } } #pragma omp task firstprivate(l,i) shared(tsr, sb_bits) { pti_QuickSortIndexMorton4D(tsr, l, i, sb_bits); } pti_QuickSortIndexMorton4D(tsr, i, r, sb_bits); #pragma omp taskwait } static void pti_QuickSortIndexExceptSingleModeRowBlock(ptiSparseTensor *tsr, ptiNnzIndex l, ptiNnzIndex r, ptiIndex * const mode_order, ptiElementIndex sk_bits) { ptiNnzIndex i, j, p; if(r-l < 2) { return; } p = (l+r) / 2; for(i = l, j = r-1; ; ++i, --j) { while(pti_SparseTensorCompareIndicesExceptSingleModeRowBlock(tsr, i, tsr, p, mode_order, sk_bits) < 0) { ++i; } while(pti_SparseTensorCompareIndicesExceptSingleModeRowBlock(tsr, p, tsr, j, mode_order, sk_bits) < 0) { --j; } if(i >= j) { break; } pti_SwapValues(tsr, i, j); if(i == p) { p = j; } else if(j == p) { p = i; } } #pragma omp task firstprivate(l,i) shared(tsr, sk_bits) { pti_QuickSortIndexExceptSingleModeRowBlock(tsr, l, i, mode_order, sk_bits); } pti_QuickSortIndexExceptSingleModeRowBlock(tsr, i, r, mode_order, sk_bits); #pragma omp taskwait } static void pti_QuickSortIndexRowBlock(ptiSparseTensor *tsr, ptiNnzIndex l, ptiNnzIndex r, ptiElementIndex sk_bits) { ptiNnzIndex i, j, p; if(r-l < 2) { return; } p = (l+r) / 2; for(i = l, j = r-1; ; ++i, --j) { while(pti_SparseTensorCompareIndicesRowBlock(tsr, i, tsr, p, sk_bits) < 0) { ++i; } while(pti_SparseTensorCompareIndicesRowBlock(tsr, p, tsr, j, sk_bits) < 0) { --j; } if(i >= j) { break; } pti_SwapValues(tsr, i, j); if(i == p) { p = j; } else if(j == p) { p = i; } } #pragma omp task firstprivate(l,i) shared(tsr, sk_bits) { pti_QuickSortIndexRowBlock(tsr, l, i, sk_bits); } pti_QuickSortIndexRowBlock(tsr, i, r, sk_bits); #pragma omp taskwait } static void pti_QuickSortIndexSingleMode(ptiSparseTensor *tsr, ptiNnzIndex l, ptiNnzIndex r, ptiIndex mode) { ptiNnzIndex i, j, p; if(r-l < 2) { return; } p = (l+r) / 2; for(i = l, j = r-1; ; ++i, --j) { while(tsr->inds[mode].data[i] < tsr->inds[mode].data[p]) { ++i; } while(tsr->inds[mode].data[p] < tsr->inds[mode].data[j]) { --j; } if(i >= j) { break; } pti_SwapValues(tsr, i, j); if(i == p) { p = j; } else if(j == p) { p = i; } } #pragma omp task firstprivate(l,i) shared(tsr, mode) { pti_QuickSortIndexSingleMode(tsr, l, i, mode); } pti_QuickSortIndexSingleMode(tsr, i, r, mode); #pragma omp taskwait } static void pti_InsertSortIndexExceptSingleMode(ptiSparseTensor *tsr, ptiNnzIndex l, ptiNnzIndex r, ptiIndex * mode_order, ptiIndex * eleinds_buf) { long int j; for(ptiNnzIndex i = l; i < r; ++i) { j = i - 1; // for(ptiIndex m = 0; m < tsr->nmodes; ++m) { // eleinds_buf[m] = tsr->inds[m].data[i]; // } // ptiValue val = tsr->values.data[i]; while (j >= 0 && pti_SparseTensorCompareIndicesExceptSingleMode(tsr, i, tsr, j, mode_order) < 0) { // for(ptiIndex m = 0; m < tsr->nmodes; ++m) { // tsr->inds[m].data[j+1] = tsr->inds[m].data[j]; // } // tsr->values.data[j+1] = tsr->values.data[j]; /* Since j and j+1 are adjacent, so the extra overhead of assigning indices and value to j is trivial. */ pti_SwapValues(tsr, j+1, j); -- j; } // for(ptiIndex m = 0; m < tsr->nmodes; ++m) { // tsr->inds[m].data[j+1] = eleinds_buf[m]; // } // tsr->values.data[j+1] = val; } return; } static void pti_QuickSortIndexExceptSingleMode(ptiSparseTensor *tsr, ptiNnzIndex l, ptiNnzIndex r, ptiIndex * mode_order, ptiIndex * eleinds_buf) { ptiNnzIndex i, j, p; if(r-l < INSERTION_SORT_LENGTH) { // eleinds_buf = (ptiIndex *)malloc(tsr->nmodes * sizeof(*eleinds_buf)); pti_InsertSortIndexExceptSingleMode(tsr, l, r, mode_order, eleinds_buf); // free(eleinds_buf); return; } p = (l+r) / 2; for(i = l, j = r-1; ; ++i, --j) { while(pti_SparseTensorCompareIndicesExceptSingleMode(tsr, i, tsr, p, mode_order) < 0) { // while(pti_SparseTensorCompareIndicesExceptSingleModeCantor(tsr, i, tsr, p, mode_order) < 0) { ++i; } while(pti_SparseTensorCompareIndicesExceptSingleMode(tsr, p, tsr, j, mode_order) < 0) { // while(pti_SparseTensorCompareIndicesExceptSingleModeCantor(tsr, p, tsr, j, mode_order) < 0) { --j; } if(i >= j) { break; } pti_SwapValues(tsr, i, j); if(i == p) { p = j; } else if(j == p) { p = i; } } #pragma omp task firstprivate(l,i) shared(tsr, mode_order) { // int tid_tmp = omp_get_thread_num(); // printf("[%lu, %lu] tid_tmp: %d\n", l, i, tid_tmp); pti_QuickSortIndexExceptSingleMode(tsr, l, i, mode_order, eleinds_buf); } // int tid_tmp = omp_get_thread_num(); // printf("[%lu, %lu] tid_tmp: %d\n", i, r, tid_tmp); pti_QuickSortIndexExceptSingleMode(tsr, i, r, mode_order, eleinds_buf); #pragma omp taskwait } static void pti_QuickSortIndex(ptiSparseTensor *tsr, ptiNnzIndex l, ptiNnzIndex r) { ptiNnzIndex i, j, p; if(r-l < 2) { return; } p = (l+r) / 2; for(i = l, j = r-1; ; ++i, --j) { while(pti_SparseTensorCompareIndices(tsr, i, tsr, p) < 0) { ++i; } while(pti_SparseTensorCompareIndices(tsr, p, tsr, j) < 0) { --j; } if(i >= j) { break; } pti_SwapValues(tsr, i, j); if(i == p) { p = j; } else if(j == p) { p = i; } } #pragma omp task firstprivate(l,i) shared(tsr) { pti_QuickSortIndex(tsr, l, i); } pti_QuickSortIndex(tsr, i, r); #pragma omp taskwait }
smg2_setup_rap.c
/*BHEADER********************************************************************** * Copyright (c) 2008, Lawrence Livermore National Security, LLC. * Produced at the Lawrence Livermore National Laboratory. * This file is part of HYPRE. See file COPYRIGHT for details. * * HYPRE is free software; you can redistribute it and/or modify it under the * terms of the GNU Lesser General Public License (as published by the Free * Software Foundation) version 2.1 dated February 1999. * * $Revision: 2.10 $ ***********************************************************************EHEADER*/ #include "_hypre_struct_ls.h" #include "smg.h" /*-------------------------------------------------------------------------- * Sets up new coarse grid operator stucture. *--------------------------------------------------------------------------*/ hypre_StructMatrix * hypre_SMG2CreateRAPOp( hypre_StructMatrix *R, hypre_StructMatrix *A, hypre_StructMatrix *PT, hypre_StructGrid *coarse_grid ) { hypre_StructMatrix *RAP; hypre_Index *RAP_stencil_shape; hypre_StructStencil *RAP_stencil; HYPRE_Int RAP_stencil_size; HYPRE_Int RAP_stencil_dim; HYPRE_Int RAP_num_ghost[] = {1, 1, 1, 1, 0, 0}; HYPRE_Int j, i; HYPRE_Int stencil_rank; RAP_stencil_dim = 2; /*----------------------------------------------------------------------- * Define RAP_stencil *-----------------------------------------------------------------------*/ stencil_rank = 0; /*----------------------------------------------------------------------- * non-symmetric case *-----------------------------------------------------------------------*/ if (!hypre_StructMatrixSymmetric(A)) { /*-------------------------------------------------------------------- * 5 or 9 point fine grid stencil produces 9 point RAP *--------------------------------------------------------------------*/ RAP_stencil_size = 9; RAP_stencil_shape = hypre_CTAlloc(hypre_Index, RAP_stencil_size); for (j = -1; j < 2; j++) { for (i = -1; i < 2; i++) { /*-------------------------------------------------------------- * Storage for 9 elements (c,w,e,n,s,sw,se,nw,se) *--------------------------------------------------------------*/ hypre_SetIndex(RAP_stencil_shape[stencil_rank],i,j,0); stencil_rank++; } } } /*----------------------------------------------------------------------- * symmetric case *-----------------------------------------------------------------------*/ else { /*-------------------------------------------------------------------- * 5 or 9 point fine grid stencil produces 9 point RAP * Only store the lower triangular part + diagonal = 5 entries, * lower triangular means the lower triangular part on the matrix * in the standard lexicalgraphic ordering. *--------------------------------------------------------------------*/ RAP_stencil_size = 5; RAP_stencil_shape = hypre_CTAlloc(hypre_Index, RAP_stencil_size); for (j = -1; j < 1; j++) { for (i = -1; i < 2; i++) { /*-------------------------------------------------------------- * Store 5 elements in (c,w,s,sw,se) *--------------------------------------------------------------*/ if( i+j <=0 ) { hypre_SetIndex(RAP_stencil_shape[stencil_rank],i,j,0); stencil_rank++; } } } } RAP_stencil = hypre_StructStencilCreate(RAP_stencil_dim, RAP_stencil_size, RAP_stencil_shape); RAP = hypre_StructMatrixCreate(hypre_StructMatrixComm(A), coarse_grid, RAP_stencil); hypre_StructStencilDestroy(RAP_stencil); /*----------------------------------------------------------------------- * Coarse operator in symmetric iff fine operator is *-----------------------------------------------------------------------*/ hypre_StructMatrixSymmetric(RAP) = hypre_StructMatrixSymmetric(A); /*----------------------------------------------------------------------- * Set number of ghost points *-----------------------------------------------------------------------*/ if (hypre_StructMatrixSymmetric(A)) { RAP_num_ghost[1] = 0; RAP_num_ghost[3] = 0; } hypre_StructMatrixSetNumGhost(RAP, RAP_num_ghost); return RAP; } /*-------------------------------------------------------------------------- * Routines to build RAP. These routines are fairly general * 1) No assumptions about symmetry of A * 2) No assumption that R = transpose(P) * 3) 5 or 9-point fine grid A * * I am, however, assuming that the c-to-c interpolation is the identity. * * I've written two routines - hypre_SMG2BuildRAPSym to build the * lower triangular part of RAP (including the diagonal) and * hypre_SMG2BuildRAPNoSym to build the upper triangular part of RAP * (excluding the diagonal). So using symmetric storage, only the * first routine would be called. With full storage both would need to * be called. * *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SMG2BuildRAPSym( hypre_StructMatrix *A, hypre_StructMatrix *PT, hypre_StructMatrix *R, hypre_StructMatrix *RAP, hypre_Index cindex, hypre_Index cstride ) { hypre_Index index; hypre_StructStencil *fine_stencil; HYPRE_Int fine_stencil_size; hypre_StructGrid *fgrid; HYPRE_Int *fgrid_ids; hypre_StructGrid *cgrid; hypre_BoxArray *cgrid_boxes; HYPRE_Int *cgrid_ids; hypre_Box *cgrid_box; hypre_IndexRef cstart; hypre_Index stridec; hypre_Index fstart; hypre_IndexRef stridef; hypre_Index loop_size; HYPRE_Int fi, ci; hypre_Box *A_dbox; hypre_Box *PT_dbox; hypre_Box *R_dbox; hypre_Box *RAP_dbox; double *pa, *pb; double *ra, *rb; double *a_cc, *a_cw, *a_ce, *a_cs, *a_cn; double *a_csw, *a_cse, *a_cnw; double *rap_cc, *rap_cw, *rap_cs; double *rap_csw, *rap_cse; HYPRE_Int iA, iAm1, iAp1; HYPRE_Int iAc; HYPRE_Int iP, iP1; HYPRE_Int iR; HYPRE_Int yOffsetA; HYPRE_Int xOffsetP; HYPRE_Int yOffsetP; fine_stencil = hypre_StructMatrixStencil(A); fine_stencil_size = hypre_StructStencilSize(fine_stencil); stridef = cstride; hypre_SetIndex(stridec, 1, 1, 1); fgrid = hypre_StructMatrixGrid(A); fgrid_ids = hypre_StructGridIDs(fgrid); cgrid = hypre_StructMatrixGrid(RAP); cgrid_boxes = hypre_StructGridBoxes(cgrid); cgrid_ids = hypre_StructGridIDs(cgrid); fi = 0; hypre_ForBoxI(ci, cgrid_boxes) { while (fgrid_ids[fi] != cgrid_ids[ci]) { fi++; } cgrid_box = hypre_BoxArrayBox(cgrid_boxes, ci); cstart = hypre_BoxIMin(cgrid_box); hypre_StructMapCoarseToFine(cstart, cindex, cstride, fstart); A_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(A), fi); PT_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(PT), fi); R_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(R), fi); RAP_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(RAP), ci); /*----------------------------------------------------------------- * Extract pointers for interpolation operator: * pa is pointer for weight for f-point above c-point * pb is pointer for weight for f-point below c-point *-----------------------------------------------------------------*/ hypre_SetIndex(index,0,1,0); pa = hypre_StructMatrixExtractPointerByIndex(PT, fi, index); hypre_SetIndex(index,0,-1,0); pb = hypre_StructMatrixExtractPointerByIndex(PT, fi, index); /*----------------------------------------------------------------- * Extract pointers for restriction operator: * ra is pointer for weight for f-point above c-point * rb is pointer for weight for f-point below c-point *-----------------------------------------------------------------*/ hypre_SetIndex(index,0,1,0); ra = hypre_StructMatrixExtractPointerByIndex(R, fi, index); hypre_SetIndex(index,0,-1,0); rb = hypre_StructMatrixExtractPointerByIndex(R, fi, index); /*----------------------------------------------------------------- * Extract pointers for 5-point fine grid operator: * * a_cc is pointer for center coefficient * a_cw is pointer for west coefficient * a_ce is pointer for east coefficient * a_cs is pointer for south coefficient * a_cn is pointer for north coefficient *-----------------------------------------------------------------*/ hypre_SetIndex(index,0,0,0); a_cc = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex(index,-1,0,0); a_cw = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex(index,1,0,0); a_ce = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex(index,0,-1,0); a_cs = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex(index,0,1,0); a_cn = hypre_StructMatrixExtractPointerByIndex(A, fi, index); /*----------------------------------------------------------------- * Extract additional pointers for 9-point fine grid operator: * * a_csw is pointer for southwest coefficient * a_cse is pointer for southeast coefficient * a_cnw is pointer for northwest coefficient * a_cne is pointer for northeast coefficient *-----------------------------------------------------------------*/ if(fine_stencil_size > 5) { hypre_SetIndex(index,-1,-1,0); a_csw = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex(index,1,-1,0); a_cse = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex(index,-1,1,0); a_cnw = hypre_StructMatrixExtractPointerByIndex(A, fi, index); } /*----------------------------------------------------------------- * Extract pointers for coarse grid operator - always 9-point: * * We build only the lower triangular part (plus diagonal). * * rap_cc is pointer for center coefficient (etc.) *-----------------------------------------------------------------*/ hypre_SetIndex(index,0,0,0); rap_cc = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex(index,-1,0,0); rap_cw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex(index,0,-1,0); rap_cs = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex(index,-1,-1,0); rap_csw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex(index,1,-1,0); rap_cse = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); /*----------------------------------------------------------------- * Define offsets for fine grid stencil and interpolation * * In the BoxLoop below I assume iA and iP refer to data associated * with the point which we are building the stencil for. The below * Offsets are used in refering to data associated with other points. *-----------------------------------------------------------------*/ hypre_SetIndex(index,0,1,0); yOffsetA = hypre_BoxOffsetDistance(A_dbox,index); yOffsetP = hypre_BoxOffsetDistance(PT_dbox,index); hypre_SetIndex(index,1,0,0); xOffsetP = hypre_BoxOffsetDistance(PT_dbox,index); /*----------------------------------------------------------------- * Switch statement to direct control to apropriate BoxLoop depending * on stencil size. Default is full 9-point. *-----------------------------------------------------------------*/ switch (fine_stencil_size) { /*-------------------------------------------------------------- * Loop for symmetric 5-point fine grid operator; produces a * symmetric 9-point coarse grid operator. We calculate only the * lower triangular stencil entries: (southwest, south, southeast, * west, and center). *--------------------------------------------------------------*/ case 5: hypre_BoxGetSize(cgrid_box, loop_size); hypre_BoxLoop4Begin(hypre_StructMatrixDim(A), loop_size, PT_dbox, cstart, stridec, iP, R_dbox, cstart, stridec, iR, A_dbox, fstart, stridef, iA, RAP_dbox, cstart, stridec, iAc); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,iP,iR,iA,iAc,iAm1,iAp1,iP1) HYPRE_SMP_SCHEDULE #endif hypre_BoxLoop4For(iP, iR, iA, iAc) { iAm1 = iA - yOffsetA; iAp1 = iA + yOffsetA; iP1 = iP - yOffsetP - xOffsetP; rap_csw[iAc] = rb[iR] * a_cw[iAm1] * pa[iP1]; iP1 = iP - yOffsetP; rap_cs[iAc] = rb[iR] * a_cc[iAm1] * pa[iP1] + rb[iR] * a_cs[iAm1] + a_cs[iA] * pa[iP1]; iP1 = iP - yOffsetP + xOffsetP; rap_cse[iAc] = rb[iR] * a_ce[iAm1] * pa[iP1]; iP1 = iP - xOffsetP; rap_cw[iAc] = a_cw[iA] + rb[iR] * a_cw[iAm1] * pb[iP1] + ra[iR] * a_cw[iAp1] * pa[iP1]; rap_cc[iAc] = a_cc[iA] + rb[iR] * a_cc[iAm1] * pb[iP] + ra[iR] * a_cc[iAp1] * pa[iP] + rb[iR] * a_cn[iAm1] + ra[iR] * a_cs[iAp1] + a_cs[iA] * pb[iP] + a_cn[iA] * pa[iP]; } hypre_BoxLoop4End(iP, iR, iA, iAc); break; /*-------------------------------------------------------------- * Loop for symmetric 9-point fine grid operator; produces a * symmetric 9-point coarse grid operator. We calculate only the * lower triangular stencil entries: (southwest, south, southeast, * west, and center). *--------------------------------------------------------------*/ default: hypre_BoxGetSize(cgrid_box, loop_size); hypre_BoxLoop4Begin(hypre_StructMatrixDim(A), loop_size, PT_dbox, cstart, stridec, iP, R_dbox, cstart, stridec, iR, A_dbox, fstart, stridef, iA, RAP_dbox, cstart, stridec, iAc); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,iP,iR,iA,iAc,iAm1,iAp1,iP1) HYPRE_SMP_SCHEDULE #endif hypre_BoxLoop4For(iP, iR, iA, iAc) { iAm1 = iA - yOffsetA; iAp1 = iA + yOffsetA; iP1 = iP - yOffsetP - xOffsetP; rap_csw[iAc] = rb[iR] * a_cw[iAm1] * pa[iP1] + rb[iR] * a_csw[iAm1] + a_csw[iA] * pa[iP1]; iP1 = iP - yOffsetP; rap_cs[iAc] = rb[iR] * a_cc[iAm1] * pa[iP1] + rb[iR] * a_cs[iAm1] + a_cs[iA] * pa[iP1]; iP1 = iP - yOffsetP + xOffsetP; rap_cse[iAc] = rb[iR] * a_ce[iAm1] * pa[iP1] + rb[iR] * a_cse[iAm1] + a_cse[iA] * pa[iP1]; iP1 = iP - xOffsetP; rap_cw[iAc] = a_cw[iA] + rb[iR] * a_cw[iAm1] * pb[iP1] + ra[iR] * a_cw[iAp1] * pa[iP1] + rb[iR] * a_cnw[iAm1] + ra[iR] * a_csw[iAp1] + a_csw[iA] * pb[iP1] + a_cnw[iA] * pa[iP1]; rap_cc[iAc] = a_cc[iA] + rb[iR] * a_cc[iAm1] * pb[iP] + ra[iR] * a_cc[iAp1] * pa[iP] + rb[iR] * a_cn[iAm1] + ra[iR] * a_cs[iAp1] + a_cs[iA] * pb[iP] + a_cn[iA] * pa[iP]; } hypre_BoxLoop4End(iP, iR, iA, iAc); break; } /* end switch statement */ } /* end ForBoxI */ return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SMG2BuildRAPNoSym( hypre_StructMatrix *A, hypre_StructMatrix *PT, hypre_StructMatrix *R, hypre_StructMatrix *RAP, hypre_Index cindex, hypre_Index cstride ) { hypre_Index index; hypre_StructStencil *fine_stencil; HYPRE_Int fine_stencil_size; hypre_StructGrid *fgrid; HYPRE_Int *fgrid_ids; hypre_StructGrid *cgrid; hypre_BoxArray *cgrid_boxes; HYPRE_Int *cgrid_ids; hypre_Box *cgrid_box; hypre_IndexRef cstart; hypre_Index stridec; hypre_Index fstart; hypre_IndexRef stridef; hypre_Index loop_size; HYPRE_Int fi, ci; hypre_Box *A_dbox; hypre_Box *PT_dbox; hypre_Box *R_dbox; hypre_Box *RAP_dbox; double *pa, *pb; double *ra, *rb; double *a_cc, *a_cw, *a_ce, *a_cn; double *a_cse, *a_cnw, *a_cne; double *rap_ce, *rap_cn; double *rap_cnw, *rap_cne; HYPRE_Int iA, iAm1, iAp1; HYPRE_Int iAc; HYPRE_Int iP, iP1; HYPRE_Int iR; HYPRE_Int yOffsetA; HYPRE_Int xOffsetP; HYPRE_Int yOffsetP; fine_stencil = hypre_StructMatrixStencil(A); fine_stencil_size = hypre_StructStencilSize(fine_stencil); stridef = cstride; hypre_SetIndex(stridec, 1, 1, 1); fgrid = hypre_StructMatrixGrid(A); fgrid_ids = hypre_StructGridIDs(fgrid); cgrid = hypre_StructMatrixGrid(RAP); cgrid_boxes = hypre_StructGridBoxes(cgrid); cgrid_ids = hypre_StructGridIDs(cgrid); fi = 0; hypre_ForBoxI(ci, cgrid_boxes) { while (fgrid_ids[fi] != cgrid_ids[ci]) { fi++; } cgrid_box = hypre_BoxArrayBox(cgrid_boxes, ci); cstart = hypre_BoxIMin(cgrid_box); hypre_StructMapCoarseToFine(cstart, cindex, cstride, fstart); A_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(A), fi); PT_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(PT), fi); R_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(R), fi); RAP_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(RAP), ci); /*----------------------------------------------------------------- * Extract pointers for interpolation operator: * pa is pointer for weight for f-point above c-point * pb is pointer for weight for f-point below c-point *-----------------------------------------------------------------*/ hypre_SetIndex(index,0,1,0); pa = hypre_StructMatrixExtractPointerByIndex(PT, fi, index); hypre_SetIndex(index,0,-1,0); pb = hypre_StructMatrixExtractPointerByIndex(PT, fi, index); /*----------------------------------------------------------------- * Extract pointers for restriction operator: * ra is pointer for weight for f-point above c-point * rb is pointer for weight for f-point below c-point *-----------------------------------------------------------------*/ hypre_SetIndex(index,0,1,0); ra = hypre_StructMatrixExtractPointerByIndex(R, fi, index); hypre_SetIndex(index,0,-1,0); rb = hypre_StructMatrixExtractPointerByIndex(R, fi, index); /*----------------------------------------------------------------- * Extract pointers for 5-point fine grid operator: * * a_cc is pointer for center coefficient * a_cw is pointer for west coefficient * a_ce is pointer for east coefficient * a_cs is pointer for south coefficient * a_cn is pointer for north coefficient *-----------------------------------------------------------------*/ hypre_SetIndex(index,0,0,0); a_cc = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex(index,-1,0,0); a_cw = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex(index,1,0,0); a_ce = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex(index,0,1,0); a_cn = hypre_StructMatrixExtractPointerByIndex(A, fi, index); /*----------------------------------------------------------------- * Extract additional pointers for 9-point fine grid operator: * * a_csw is pointer for southwest coefficient * a_cse is pointer for southeast coefficient * a_cnw is pointer for northwest coefficient * a_cne is pointer for northeast coefficient *-----------------------------------------------------------------*/ if(fine_stencil_size > 5) { hypre_SetIndex(index,1,-1,0); a_cse = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex(index,-1,1,0); a_cnw = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex(index,1,1,0); a_cne = hypre_StructMatrixExtractPointerByIndex(A, fi, index); } /*----------------------------------------------------------------- * Extract pointers for coarse grid operator - always 9-point: * * We build only the upper triangular part. * * rap_ce is pointer for east coefficient (etc.) *-----------------------------------------------------------------*/ hypre_SetIndex(index,1,0,0); rap_ce = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex(index,0,1,0); rap_cn = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex(index,1,1,0); rap_cne = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex(index,-1,1,0); rap_cnw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); /*----------------------------------------------------------------- * Define offsets for fine grid stencil and interpolation * * In the BoxLoop below I assume iA and iP refer to data associated * with the point which we are building the stencil for. The below * Offsets are used in refering to data associated with other points. *-----------------------------------------------------------------*/ hypre_SetIndex(index,0,1,0); yOffsetA = hypre_BoxOffsetDistance(A_dbox,index); yOffsetP = hypre_BoxOffsetDistance(PT_dbox,index); hypre_SetIndex(index,1,0,0); xOffsetP = hypre_BoxOffsetDistance(PT_dbox,index); /*----------------------------------------------------------------- * Switch statement to direct control to apropriate BoxLoop depending * on stencil size. Default is full 27-point. *-----------------------------------------------------------------*/ switch (fine_stencil_size) { /*-------------------------------------------------------------- * Loop for 5-point fine grid operator; produces upper triangular * part of 9-point coarse grid operator - excludes diagonal. * stencil entries: (northeast, north, northwest, and east) *--------------------------------------------------------------*/ case 5: hypre_BoxGetSize(cgrid_box, loop_size); hypre_BoxLoop4Begin(hypre_StructMatrixDim(A), loop_size, PT_dbox, cstart, stridec, iP, R_dbox, cstart, stridec, iR, A_dbox, fstart, stridef, iA, RAP_dbox, cstart, stridec, iAc); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,iP,iR,iA,iAc,iAm1,iAp1,iP1) HYPRE_SMP_SCHEDULE #endif hypre_BoxLoop4For(iP, iR, iA, iAc) { iAm1 = iA - yOffsetA; iAp1 = iA + yOffsetA; iP1 = iP + yOffsetP + xOffsetP; rap_cne[iAc] = ra[iR] * a_ce[iAp1] * pb[iP1]; iP1 = iP + yOffsetP; rap_cn[iAc] = ra[iR] * a_cc[iAp1] * pb[iP1] + ra[iR] * a_cn[iAp1] + a_cn[iA] * pb[iP1]; iP1 = iP + yOffsetP - xOffsetP; rap_cnw[iAc] = ra[iR] * a_cw[iAp1] * pb[iP1]; iP1 = iP + xOffsetP; rap_ce[iAc] = a_ce[iA] + rb[iR] * a_ce[iAm1] * pb[iP1] + ra[iR] * a_ce[iAp1] * pa[iP1]; } hypre_BoxLoop4End(iP, iR, iA, iAc); break; /*-------------------------------------------------------------- * Loop for 9-point fine grid operator; produces upper triangular * part of 9-point coarse grid operator - excludes diagonal. * stencil entries: (northeast, north, northwest, and east) *--------------------------------------------------------------*/ default: hypre_BoxGetSize(cgrid_box, loop_size); hypre_BoxLoop4Begin(hypre_StructMatrixDim(A), loop_size, PT_dbox, cstart, stridec, iP, R_dbox, cstart, stridec, iR, A_dbox, fstart, stridef, iA, RAP_dbox, cstart, stridec, iAc); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,iP,iR,iA,iAc,iAm1,iAp1,iP1) HYPRE_SMP_SCHEDULE #endif hypre_BoxLoop4For(iP, iR, iA, iAc) { iAm1 = iA - yOffsetA; iAp1 = iA + yOffsetA; iP1 = iP + yOffsetP + xOffsetP; rap_cne[iAc] = ra[iR] * a_ce[iAp1] * pb[iP1] + ra[iR] * a_cne[iAp1] + a_cne[iA] * pb[iP1]; iP1 = iP + yOffsetP; rap_cn[iAc] = ra[iR] * a_cc[iAp1] * pb[iP1] + ra[iR] * a_cn[iAp1] + a_cn[iA] * pb[iP1]; iP1 = iP + yOffsetP - xOffsetP; rap_cnw[iAc] = ra[iR] * a_cw[iAp1] * pb[iP1] + ra[iR] * a_cnw[iAp1] + a_cnw[iA] * pb[iP1]; iP1 = iP + xOffsetP; rap_ce[iAc] = a_ce[iA] + rb[iR] * a_ce[iAm1] * pb[iP1] + ra[iR] * a_ce[iAp1] * pa[iP1] + rb[iR] * a_cne[iAm1] + ra[iR] * a_cse[iAp1] + a_cse[iA] * pb[iP1] + a_cne[iA] * pa[iP1]; } hypre_BoxLoop4End(iP, iR, iA, iAc); break; } /* end switch statement */ } /* end ForBoxI */ return hypre_error_flag; } /*-------------------------------------------------------------------------- * Collapses stencil in periodic direction on coarsest grid. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SMG2RAPPeriodicSym( hypre_StructMatrix *RAP, hypre_Index cindex, hypre_Index cstride ) { hypre_Index index; hypre_StructGrid *cgrid; hypre_BoxArray *cgrid_boxes; hypre_Box *cgrid_box; hypre_IndexRef cstart; hypre_Index stridec; hypre_Index loop_size; HYPRE_Int ci; hypre_Box *RAP_dbox; double *rap_cc, *rap_cw, *rap_cs; double *rap_csw, *rap_cse; HYPRE_Int iAc; HYPRE_Int iAcm1; HYPRE_Int xOffset; double zero = 0.0; hypre_SetIndex(stridec, 1, 1, 1); cgrid = hypre_StructMatrixGrid(RAP); cgrid_boxes = hypre_StructGridBoxes(cgrid); if (hypre_IndexY(hypre_StructGridPeriodic(cgrid)) == 1) { hypre_StructMatrixAssemble(RAP); hypre_ForBoxI(ci, cgrid_boxes) { cgrid_box = hypre_BoxArrayBox(cgrid_boxes, ci); cstart = hypre_BoxIMin(cgrid_box); RAP_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(RAP), ci); hypre_SetIndex(index,1,0,0); xOffset = hypre_BoxOffsetDistance(RAP_dbox,index); /*----------------------------------------------------------------- * Extract pointers for coarse grid operator - always 9-point: *-----------------------------------------------------------------*/ hypre_SetIndex(index,0,0,0); rap_cc = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex(index,-1,0,0); rap_cw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex(index,0,-1,0); rap_cs = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex(index,-1,-1,0); rap_csw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex(index,1,-1,0); rap_cse = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_BoxGetSize(cgrid_box, loop_size); hypre_BoxLoop1Begin(hypre_StructMatrixDim(RAP), loop_size, RAP_dbox, cstart, stridec, iAc); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,iAc,iAcm1) HYPRE_SMP_SCHEDULE #endif hypre_BoxLoop1For(iAc) { iAcm1 = iAc - xOffset; rap_cw[iAc] += (rap_cse[iAcm1] + rap_csw[iAc]); rap_cc[iAc] += (2.0 * rap_cs[iAc]); } hypre_BoxLoop1End(iAc); hypre_BoxLoop1Begin(hypre_StructMatrixDim(RAP), loop_size, RAP_dbox, cstart, stridec, iAc); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,iAc) HYPRE_SMP_SCHEDULE #endif hypre_BoxLoop1For(iAc) { rap_csw[iAc] = zero; rap_cs[iAc] = zero; rap_cse[iAc] = zero; } hypre_BoxLoop1End(iAc); } /* end ForBoxI */ } return hypre_error_flag; } /*-------------------------------------------------------------------------- * Collapses stencil in periodic direction on coarsest grid. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SMG2RAPPeriodicNoSym( hypre_StructMatrix *RAP, hypre_Index cindex, hypre_Index cstride ) { hypre_Index index; hypre_StructGrid *cgrid; hypre_BoxArray *cgrid_boxes; hypre_Box *cgrid_box; hypre_IndexRef cstart; hypre_Index stridec; hypre_Index loop_size; HYPRE_Int ci; hypre_Box *RAP_dbox; double *rap_cc, *rap_cw, *rap_cs; double *rap_csw, *rap_cse; double *rap_ce, *rap_cn; double *rap_cnw, *rap_cne; HYPRE_Int iAc; double zero = 0.0; hypre_SetIndex(stridec, 1, 1, 1); cgrid = hypre_StructMatrixGrid(RAP); cgrid_boxes = hypre_StructGridBoxes(cgrid); if (hypre_IndexY(hypre_StructGridPeriodic(cgrid)) == 1) { hypre_ForBoxI(ci, cgrid_boxes) { cgrid_box = hypre_BoxArrayBox(cgrid_boxes, ci); cstart = hypre_BoxIMin(cgrid_box); RAP_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(RAP), ci); /*----------------------------------------------------------------- * Extract pointers for coarse grid operator - always 9-point: *-----------------------------------------------------------------*/ hypre_SetIndex(index,0,0,0); rap_cc = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex(index,-1,0,0); rap_cw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex(index,0,-1,0); rap_cs = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex(index,-1,-1,0); rap_csw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex(index,1,-1,0); rap_cse = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex(index,1,0,0); rap_ce = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex(index,0,1,0); rap_cn = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex(index,1,1,0); rap_cne = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex(index,-1,1,0); rap_cnw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_BoxGetSize(cgrid_box, loop_size); hypre_BoxLoop1Begin(hypre_StructMatrixDim(RAP), loop_size, RAP_dbox, cstart, stridec, iAc); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,iAc) HYPRE_SMP_SCHEDULE #endif hypre_BoxLoop1For(iAc) { rap_cw[iAc] += (rap_cnw[iAc] + rap_csw[iAc]); rap_cnw[iAc] = zero; rap_csw[iAc] = zero; rap_cc[iAc] += (rap_cn[iAc] + rap_cs[iAc]); rap_cn[iAc] = zero; rap_cs[iAc] = zero; rap_ce[iAc] += (rap_cne[iAc] + rap_cse[iAc]); rap_cne[iAc] = zero; rap_cse[iAc] = zero; } hypre_BoxLoop1End(iAc); } /* end ForBoxI */ } return hypre_error_flag; }
parallel_utilities.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Riccardo Rossi // Denis Demidov // Philipp Bucher // #if !defined(KRATOS_PARALLEL_UTILITIES_H_INCLUDED) #define KRATOS_PARALLEL_UTILITIES_H_INCLUDED // System includes #include <iostream> #include <array> #include <vector> #include <tuple> #include <cmath> #include <limits> #include <future> #include <thread> // External includes #include <omp.h> // Project includes #include "includes/define.h" #include "includes/global_variables.h" #include "utilities/reduction_utilities.h" namespace Kratos { ///@addtogroup KratosCore //*********************************************************************************** //*********************************************************************************** //*********************************************************************************** /** @param TContainerType - the type of the container used in the loop (must provide random access iterators) * @param TIteratorType - type of iterator (by default as provided by the TContainerType) * @param TMaxThreads - maximum number of threads allowed in the partitioning. * must be known at compile time to avoid heap allocations in the partitioning */ template< class TContainerType, class TIteratorType=typename TContainerType::iterator, int TMaxThreads=Globals::MaxAllowedThreads > class BlockPartition { public: /** @param it_begin - iterator pointing at the beginning of the container * @param it_end - iterator pointing to the end of the container * @param Nchunks - number of threads to be used in the loop (must be lower than TMaxThreads) */ BlockPartition(TIteratorType it_begin, TIteratorType it_end, int Nchunks = omp_get_max_threads()) { KRATOS_ERROR_IF(Nchunks < 1) << "Number of chunks must be > 0 (and not " << Nchunks << ")" << std::endl; const std::ptrdiff_t size_container = it_end-it_begin; if (size_container == 0) { mNchunks = Nchunks; } else { // in case the container is smaller than the number of chunks mNchunks = std::min(static_cast<int>(size_container), Nchunks); } const std::ptrdiff_t block_partition_size = size_container / mNchunks; mBlockPartition[0] = it_begin; mBlockPartition[mNchunks] = it_end; for (int i=1; i<mNchunks; i++) { mBlockPartition[i] = mBlockPartition[i-1] + block_partition_size; } } /** @param rData - the continer to be iterated upon * @param Nchunks - number of threads to be used in the loop (must be lower than TMaxThreads) */ BlockPartition(TContainerType& rData, int Nchunks = omp_get_max_threads()) : BlockPartition(rData.begin(), rData.end(), Nchunks) {} virtual ~BlockPartition() = default; /** @brief simple iteration loop. f called on every entry in rData * @param f - must be a unary function accepting as input TContainerType::value_type& */ template <class TUnaryFunction> inline void for_each(TUnaryFunction&& f) { #pragma omp parallel for for (int i=0; i<mNchunks; ++i) { for (auto it = mBlockPartition[i]; it != mBlockPartition[i+1]; ++it) { f(*it); //note that we pass the value to the function, not the iterator } } } /** @brief loop allowing reductions. f called on every entry in rData * the function f needs to return the values to be used by the reducer * @param TReducer template parameter specifying the reduction operation to be done * @param f - must be a unary function accepting as input TContainerType::value_type& */ template <class TReducer, class TUnaryFunction> inline typename TReducer::value_type for_each(TUnaryFunction &&f) { TReducer global_reducer; #pragma omp parallel for for (int i=0; i<mNchunks; ++i) { TReducer local_reducer; for (auto it = mBlockPartition[i]; it != mBlockPartition[i+1]; ++it) { local_reducer.LocalReduce(f(*it)); } global_reducer.ThreadSafeReduce(local_reducer); } return global_reducer.GetValue(); } /** @brief loop with thread local storage (TLS). f called on every entry in rData * @param TThreadLocalStorage template parameter specifying the thread local storage * @param f - must be a function accepting as input TContainerType::value_type& and the thread local storage */ template <class TThreadLocalStorage, class TFunction> inline void for_each(const TThreadLocalStorage& rThreadLocalStoragePrototype, TFunction &&f) { static_assert(std::is_copy_constructible<TThreadLocalStorage>::value, "TThreadLocalStorage must be copy constructible!"); #pragma omp parallel { // copy the prototype to create the thread local storage TThreadLocalStorage thread_local_storage(rThreadLocalStoragePrototype); #pragma omp for for(int i=0; i<mNchunks; ++i){ for (auto it = mBlockPartition[i]; it != mBlockPartition[i+1]; ++it){ f(*it, thread_local_storage); // note that we pass the value to the function, not the iterator } } } } /** @brief loop with thread local storage (TLS) allowing reductions. f called on every entry in rData * the function f needs to return the values to be used by the reducer * @param TReducer template parameter specifying the reduction operation to be done * @param TThreadLocalStorage template parameter specifying the thread local storage * @param f - must be a function accepting as input TContainerType::value_type& and the thread local storage */ template <class TReducer, class TThreadLocalStorage, class TFunction> inline typename TReducer::value_type for_each(const TThreadLocalStorage& rThreadLocalStoragePrototype, TFunction &&f) { static_assert(std::is_copy_constructible<TThreadLocalStorage>::value, "TThreadLocalStorage must be copy constructible!"); TReducer global_reducer; #pragma omp parallel { // copy the prototype to create the thread local storage TThreadLocalStorage thread_local_storage(rThreadLocalStoragePrototype); #pragma omp for for (int i=0; i<mNchunks; ++i) { TReducer local_reducer; for (auto it = mBlockPartition[i]; it != mBlockPartition[i+1]; ++it) { local_reducer.LocalReduce(f(*it, thread_local_storage)); } global_reducer.ThreadSafeReduce(local_reducer); } } return global_reducer.GetValue(); } private: int mNchunks; std::array<TIteratorType, TMaxThreads> mBlockPartition; }; /** @brief simplified version of the basic loop (without reduction) to enable template type deduction * @param v - containers to be looped upon * @param func - must be a unary function accepting as input TContainerType::value_type& */ template <class TContainerType, class TFunctionType> void block_for_each(TContainerType &&v, TFunctionType &&func) { BlockPartition<typename std::decay<TContainerType>::type>(std::forward<TContainerType>(v)).for_each(std::forward<TFunctionType>(func)); } /** @brief simplified version of the basic loop with reduction to enable template type deduction * @param v - containers to be looped upon * @param func - must be a unary function accepting as input TContainerType::value_type& */ template <class TReducer, class TContainerType, class TFunctionType> typename TReducer::value_type block_for_each(TContainerType &&v, TFunctionType &&func) { return BlockPartition<typename std::decay<TContainerType>::type> (std::forward<TContainerType>(v)).template for_each<TReducer>(std::forward<TFunctionType>(func)); } /** @brief simplified version of the basic loop with thread local storage (TLS) to enable template type deduction * @param v - containers to be looped upon * @param tls - thread local storage * @param func - must be a function accepting as input TContainerType::value_type& and the thread local storage */ template <class TContainerType, class TThreadLocalStorage, class TFunctionType> void block_for_each(TContainerType &&v, const TThreadLocalStorage& tls, TFunctionType &&func) { BlockPartition<typename std::decay<TContainerType>::type>(std::forward<TContainerType>(v)).for_each(tls, std::forward<TFunctionType>(func)); } /** @brief simplified version of the basic loop with reduction and thread local storage (TLS) to enable template type deduction * @param v - containers to be looped upon * @param tls - thread local storage * @param func - must be a function accepting as input TContainerType::value_type& and the thread local storage */ template <class TReducer, class TContainerType, class TThreadLocalStorage, class TFunctionType> typename TReducer::value_type block_for_each(TContainerType &&v, const TThreadLocalStorage& tls, TFunctionType &&func) { return BlockPartition<typename std::decay<TContainerType>::type> (std::forward<TContainerType>(v)).template for_each<TReducer>(tls, std::forward<TFunctionType>(func)); } //*********************************************************************************** //*********************************************************************************** //*********************************************************************************** /** @brief This class is useful for index iteration over containers * @param TIndexType type of index to be used in the loop * @param TMaxThreads - maximum number of threads allowed in the partitioning. * must be known at compile time to avoid heap allocations in the partitioning */ template<class TIndexType=std::size_t, int TMaxThreads=Globals::MaxAllowedThreads> class IndexPartition { public: /** @brief constructor using the size of the partition to be used * @param Size - the size of the partition * @param Nchunks - number of threads to be used in the loop (must be lower than TMaxThreads) */ IndexPartition(TIndexType Size, int Nchunks = omp_get_max_threads()) { KRATOS_ERROR_IF(Nchunks < 1) << "Number of chunks must be > 0 (and not " << Nchunks << ")" << std::endl; if (Size == 0) { mNchunks = Nchunks; } else { // in case the container is smaller than the number of chunks mNchunks = std::min(static_cast<int>(Size), Nchunks); } const int block_partition_size = Size / mNchunks; mBlockPartition[0] = 0; mBlockPartition[mNchunks] = Size; for (int i=1; i<mNchunks; i++) { mBlockPartition[i] = mBlockPartition[i-1] + block_partition_size; } } virtual ~IndexPartition() = default; //NOT COMMENTING IN DOXYGEN - THIS SHOULD BE SORT OF HIDDEN UNTIL GIVEN PRIME TIME //pure c++11 version (can handle exceptions) template <class TUnaryFunction> inline void for_pure_c11(TUnaryFunction &&f) { std::vector< std::future<void> > runners(mNchunks); const auto& partition = mBlockPartition; for (int i=0; i<mNchunks; ++i) { runners[i] = std::async(std::launch::async, [&partition, i, &f]() { for (auto k = partition[i]; k < partition[i+1]; ++k) { f(k); } }); } //here we impose a syncronization and we check the exceptions for(int i=0; i<mNchunks; ++i) { try { runners[i].get(); } catch(Exception& e) { KRATOS_ERROR << std::endl << "THREAD number: " << i << " caught exception " << e.what() << std::endl; } catch(std::exception& e) { KRATOS_ERROR << std::endl << "THREAD number: " << i << " caught exception " << e.what() << std::endl; } catch(...) { KRATOS_ERROR << std::endl << "unknown error" << std::endl; } } } /** simple version of for_each (no reduction) to be called for each index in the partition * @param f - must be a unary function accepting as input IndexType */ template <class TUnaryFunction> inline void for_each(TUnaryFunction &&f) { #pragma omp parallel for for (int i=0; i<mNchunks; ++i) { for (auto k = mBlockPartition[i]; k < mBlockPartition[i+1]; ++k) { f(k); //note that we pass a reference to the value, not the iterator } } } /** version with reduction to be called for each index in the partition * function f is expected to return the values to be reduced * @param TReducer - template parameter specifying the type of reducer to be applied * @param f - must be a unary function accepting as input IndexType */ template <class TReducer, class TUnaryFunction> inline typename TReducer::value_type for_each(TUnaryFunction &&f) { TReducer global_reducer; #pragma omp parallel for for (int i=0; i<mNchunks; ++i) { TReducer local_reducer; for (auto k = mBlockPartition[i]; k < mBlockPartition[i+1]; ++k) { local_reducer.LocalReduce(f(k)); } global_reducer.ThreadSafeReduce(local_reducer); } return global_reducer.GetValue(); } /** @brief loop with thread local storage (TLS). f called on every entry in rData * @param TThreadLocalStorage template parameter specifying the thread local storage * @param f - must be a function accepting as input IndexType and the thread local storage */ template <class TThreadLocalStorage, class TFunction> inline void for_each(const TThreadLocalStorage& rThreadLocalStoragePrototype, TFunction &&f) { static_assert(std::is_copy_constructible<TThreadLocalStorage>::value, "TThreadLocalStorage must be copy constructible!"); #pragma omp parallel { // copy the prototype to create the thread local storage TThreadLocalStorage thread_local_storage(rThreadLocalStoragePrototype); #pragma omp for for (int i=0; i<mNchunks; ++i) { for (auto k = mBlockPartition[i]; k < mBlockPartition[i+1]; ++k) { f(k, thread_local_storage); //note that we pass a reference to the value, not the iterator } } } } /** version with reduction and thread local storage (TLS) to be called for each index in the partition * function f is expected to return the values to be reduced * @param TReducer - template parameter specifying the type of reducer to be applied * @param TThreadLocalStorage template parameter specifying the thread local storage * @param f - must be a function accepting as input IndexType and the thread local storage */ template <class TReducer, class TThreadLocalStorage, class TFunction> inline typename TReducer::value_type for_each(const TThreadLocalStorage& rThreadLocalStoragePrototype, TFunction &&f) { static_assert(std::is_copy_constructible<TThreadLocalStorage>::value, "TThreadLocalStorage must be copy constructible!"); TReducer global_reducer; #pragma omp parallel { // copy the prototype to create the thread local storage TThreadLocalStorage thread_local_storage(rThreadLocalStoragePrototype); #pragma omp for for (int i=0; i<mNchunks; ++i) { TReducer local_reducer; for (auto k = mBlockPartition[i]; k < mBlockPartition[i+1]; ++k) { local_reducer.LocalReduce(f(k, thread_local_storage)); } global_reducer.ThreadSafeReduce(local_reducer); } } return global_reducer.GetValue(); } private: int mNchunks; std::array<TIndexType, TMaxThreads> mBlockPartition; }; } // namespace Kratos. #endif // KRATOS_PARALLEL_UTILITIES_H_INCLUDED defined
timer.h
/* @copyright Russell Standish 2000-2013 @author Russell Standish This file is part of EcoLab Open source licensed under the MIT license. See LICENSE for details. */ #ifndef TIMER_H #define TIMER_H #if !defined(__MINGW32__) && !defined(__MINGW32_VERSION) #include <unistd.h> #include <sys/times.h> #endif #include <stdio.h> #include <iostream> #include <vector> #include <map> #include <string> #include <algorithm> namespace ecolab { struct Times { unsigned long counts; double elapsed, user, system; clock_t start_e, start_u, start_s; bool started; Times(): counts(0), elapsed(0), user(0), system(0), started(false) {} }; /// return the static timer map. The first call to this method /// creates the the map, and is not threadsafe. Using the returned /// map directly is not threadsafe. inline std::map<std::string, Times>& timers() { static std::map<std::string, Times> _timers; return _timers; } /// start the named timer. This call is threadsafe in an OpenMP /// parallel region. inline void start_timer(const std::string& s) { #ifdef _OPENMP #pragma omp critical(ecolab_timers) #endif { #if !defined(__MINGW32__) && !defined(__MINGW32_VERSION) Times& t=timers()[s]; if (!t.started) { t.started = true; struct tms tbuf; t.start_e = times(&tbuf); t.start_u = tbuf.tms_utime; t.start_s = tbuf.tms_stime; } #endif } } /// stop the named timer. This call is threadsafe in an OpenMP /// parallel region. inline void stop_timer(const std::string& s) { #if !defined(__MINGW32__) && !defined(__MINGW32_VERSION) Times& t=timers()[s]; if (!t.started) { t.started = true; struct tms tbuf; t.start_e = times(&tbuf); t.start_u = tbuf.tms_utime; t.start_s = tbuf.tms_stime; } #endif } /// RAII class for timing code blocks. class Timer { std::string name; public: Timer(const std::string& name): name(name) {start_timer(name);} ~Timer() {stop_timer(name);} }; struct SortElapsed { bool operator()(const std::pair<std::string, Times>& x, const std::pair<std::string, Times>& y) { return x.second.elapsed < y.second.elapsed; } }; // output the timer values, sorted in elapsed time order inline void print_timers() { std::vector<std::pair<std::string, Times> > times (timers().begin(), timers().end()); std::sort(times.begin(), times.end(), SortElapsed()); // use printf rather than cout, as we may wish to call this from a // static object destructor std::cout << "------------------ Times --------------------------------\n"; for (size_t i=0; i<times.size(); ++i) std::cout << "Elapsed: "<<times[i].second.elapsed<< " User: "<<times[i].second.user<<" System: "<<times[i].second.system << " Counts: "<<times[i].second.counts<<" "<<times[i].first<<std::endl; std::cout << "----------------------------------------------------------"<< std::endl; } } #endif
3d7pt_var.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 7 point stencil with variable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*7); for(m=0; m<7;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 32; tile_size[1] = 32; tile_size[2] = 24; tile_size[3] = 128; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<7; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) { for (t1=-1;t1<=floord(Nt-2,16);t1++) { lbp=max(ceild(t1,2),ceild(32*t1-Nt+3,32)); ubp=min(floord(Nt+Nz-4,32),floord(16*t1+Nz+13,32)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(0,ceild(2*t1-2,3)),ceild(32*t2-Nz-20,24));t3<=min(min(min(floord(Nt+Ny-4,24),floord(16*t1+Ny+29,24)),floord(32*t2+Ny+28,24)),floord(32*t1-32*t2+Nz+Ny+27,24));t3++) { for (t4=max(max(max(0,ceild(t1-7,8)),ceild(32*t2-Nz-124,128)),ceild(24*t3-Ny-124,128));t4<=min(min(min(min(floord(Nt+Nx-4,128),floord(16*t1+Nx+29,128)),floord(32*t2+Nx+28,128)),floord(24*t3+Nx+20,128)),floord(32*t1-32*t2+Nz+Nx+27,128));t4++) { for (t5=max(max(max(max(max(0,16*t1),32*t1-32*t2+1),32*t2-Nz+2),24*t3-Ny+2),128*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,16*t1+31),32*t2+30),24*t3+22),128*t4+126),32*t1-32*t2+Nz+29);t5++) { for (t6=max(max(32*t2,t5+1),-32*t1+32*t2+2*t5-31);t6<=min(min(32*t2+31,-32*t1+32*t2+2*t5),t5+Nz-2);t6++) { for (t7=max(24*t3,t5+1);t7<=min(24*t3+23,t5+Ny-2);t7++) { lbv=max(128*t4,t5+1); ubv=min(128*t4+127,t5+Nx-2); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = (((((((coef[0][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (coef[1][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)])) + (coef[2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)])) + (coef[3][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1])) + (coef[4][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)])) + (coef[5][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)])) + (coef[6][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1]));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "variable no-symmetry") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<7;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
main.c
#include <stdio.h> #include <string.h> #include <time.h> #include <stdlib.h> #include <unistd.h> #include <stdbool.h> #include <omp.h> #include <cblas.h> //Threshold for testing validity of matrix matrix multiplication #define ERROR_THRESHOLD 0.0001 //For measuring wall time using omp_get_wtime() static double start; static double end; //Serial version. Do not change this! void serial_mxm(const double *A, const double *B, double *C, int m, int n, int k) { clock_t start, end; start = clock(); for (int i = 0; i < m; i++) { for (int j = 0; j < n; j++) { C[i * n + j] = 0; for (int l = 0; l < k; l++) { C[i * n + j] += A[i * k + l] * B[l * n + j]; } } } end = clock(); printf("\nUsage: %f", (((double)end - (double)start) / (double)CLOCKS_PER_SEC)); } void omp_mxm(double *A, double *B, double *C, int m, int n, int k) { clock_t start, end; start = clock(); #pragma omp parallell for for (int i = 0; i < m; i++) { for (int j = 0; j < n; j++) { C[i * n + j] = 0; for (int l = 0; l < k; l++) { C[i * n + j] += A[i * k + l] * B[l * n + j]; } } } end = clock(); printf("\nUsage: %f", (((double)end - (double)start) / (double)CLOCKS_PER_SEC)); } void blas_mxm(double *A, double *B, double *C, int m, int n, int k) { clock_t start, end; start = clock(); cblas_dgemm(CblasRowMajor, CblasNoTrans, CblasNoTrans, m, n, k, 1.0, A, k, B, n, 1.0, C, n); end = clock(); printf("\nUsage: %f", (((double)end - (double)start) / (double)CLOCKS_PER_SEC)); } int main(const unsigned int argc, char **argv) { if (argc <= optind) { printf("Please provide version:\n"); printf("\ts(serial),\n"); printf("\to(penmp) or\n"); printf("\tb(las)\n"); return 0; } char input = argv[optind][0]; optind++; //Simple assumptions that any additional arguments means we want to test the results bool test = !(argc <= optind); int m = 2000; int n = 1000; int k = 200; double *A = (double *)malloc(m * k * sizeof(double)); double *B = (double *)malloc(k * n * sizeof(double)); double *C = (double *)malloc(m * n * sizeof(double)); //Intializing matrix data for (int i = 0; i < (m * k); i++) { A[i] = (double)(i + 1); } for (int i = 0; i < (k * n); i++) { B[i] = (double)(-i - 1); } for (int i = 0; i < (m * n); i++) { C[i] = 0.0; } switch (input) { case 's': serial_mxm(A, B, C, m, n, k); break; case 'o': omp_mxm(A, B, C, m, n, k); break; case 'b': blas_mxm(A, B, C, m, n, k); break; default: printf("Please provide version:\n"); printf("\ts(serial),\n"); printf("\to(penmp) or\n"); printf("\tb(las)\n"); return 0; } printf("\nTop left of A:\n"); for (int i = 0; i < 5; i++) { for (int j = 0; j < 5; j++) { printf("%8.2f\t", A[i * k + j]); } printf("\n"); } printf("\nTop left of B:\n"); for (int i = 0; i < 5; i++) { for (int j = 0; j < 5; j++) { printf("%8.2f\t", B[i * n + j]); } printf("\n"); } printf("\nTop left of C:\n"); for (int i = 0; i < 5; i++) { for (int j = 0; j < 5; j++) { printf("%8.2f\t", C[i * n + j]); } printf("\n"); } if (test) { double *C2 = (double *)malloc(m * n * sizeof(double)); serial_mxm(A, B, C2, m, n, k); bool correct = true; for (int i = 0; i < (m * n); i++) { if (abs(C[i] - C2[i]) > ERROR_THRESHOLD) { correct = false; } } if (correct) { printf("\nMatrix multiplication succeeded\n"); } else { printf("\nMatrix multiplication failed\n"); printf("Top left of correct C:\n"); for (int i = 0; i < 5; i++) { for (int j = 0; j < 5; j++) { printf("%8.2f\t", C2[i * n + j]); } printf("\n"); } } } printf("\nVersion: %c, time: %.4f\n", input, end - start); return 0; }
Matrix_Add_ColumnMajor.c
#include<stdio.h> #include<stdlib.h> #include <sys/time.h> double rtclock() { struct timezone Tzp; struct timeval Tp; int stat; stat = gettimeofday (&Tp, &Tzp); if (stat != 0) printf("Error return from gettimeofday: %d",stat); return(Tp.tv_sec + Tp.tv_usec*1.0e-6); } #define size 10000 #define NT 8 int A[size][size]; int B[size][size]; int C[size][size]; int flag[size];//to set flag[i]==1 if arr[i] is maximum int main(int argc, char *argv[]){ if(argc!=2){ printf("Usage path-to-executable seedvalue (example usage: ./a.out 3)\n"); exit(0); } srand(atoi(argv[1]));//Seed for random number command line integer value //generates random number for(int i=0;i<size;i++){ for(int j=0;j<size;j++){ A[i][j]=rand()%1048576; B[i][j]=rand()%1048576; } } double t1=rtclock(); #pragma omp parallel for num_threads(8) for(int i=0;i<size;i++) for(int j=0;j<size;j++) C[j][i]=A[j][i]+B[j][i]; double t2=rtclock(); printf("\nTIME =%f \n",(t2-t1)*1000); } /*Run executable-path <integer-seed-value> *example: ./a.out 3 */
myMultiFactorial.c
#include <omp.h> #include <stdio.h> #include <unistd.h> int main(int argc, char *argv[]) { /* sequential code */ int i, N=7,ans, numThreads = omp_get_max_threads(); int ress[numThreads]; //Array to store pre answers (before joining)... //Initialize the array to 1s for(i = 0; i < numThreads; i++){ ress[i] = 1; } printf("DEBUG NUM THREADS %d", numThreads); /* sequential code */ #pragma omp parallel for for(i=1;i<N;i++) { //Note the for starts at 1 cause we're going to ignore 0 in the multiplication. ress[omp_get_thread_num()] = ress[omp_get_thread_num()]*(i+1); //Calculates the products and stores them seperately printf("Calculando entrada %d\n",i); //sleep(1); } printf("Integrating answer"); ans = 1; for(i = 0; i < numThreads; i++){ //note for starts at 1 ans = ans * ress[i]; } printf("\nEl resultado es! %d\n", ans); return 0; }
enhance.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % EEEEE N N H H AAA N N CCCC EEEEE % % E NN N H H A A NN N C E % % EEE N N N HHHHH AAAAA N N N C EEE % % E N NN H H A A N NN C E % % EEEEE N N H H A A N N CCCC EEEEE % % % % % % MagickCore Image Enhancement Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/accelerate-private.h" #include "magick/artifact.h" #include "magick/attribute.h" #include "magick/cache.h" #include "magick/cache-view.h" #include "magick/channel.h" #include "magick/color.h" #include "magick/color-private.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/composite-private.h" #include "magick/enhance.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/fx.h" #include "magick/gem.h" #include "magick/geometry.h" #include "magick/histogram.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/memory_.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/opencl.h" #include "magick/opencl-private.h" #include "magick/option.h" #include "magick/pixel-accessor.h" #include "magick/pixel-private.h" #include "magick/quantum.h" #include "magick/quantum-private.h" #include "magick/resample.h" #include "magick/resample-private.h" #include "magick/resource_.h" #include "magick/statistic.h" #include "magick/string_.h" #include "magick/string-private.h" #include "magick/thread-private.h" #include "magick/threshold.h" #include "magick/token.h" #include "magick/xml-tree.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A u t o G a m m a I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AutoGammaImage() extract the 'mean' from the image and adjust the image % to try make set its gamma appropriatally. % % The format of the AutoGammaImage method is: % % MagickBooleanType AutoGammaImage(Image *image) % MagickBooleanType AutoGammaImageChannel(Image *image, % const ChannelType channel) % % A description of each parameter follows: % % o image: The image to auto-level % % o channel: The channels to auto-level. If the special 'SyncChannels' % flag is set all given channels is adjusted in the same way using the % mean average of those channels. % */ MagickExport MagickBooleanType AutoGammaImage(Image *image) { return(AutoGammaImageChannel(image,DefaultChannels)); } MagickExport MagickBooleanType AutoGammaImageChannel(Image *image, const ChannelType channel) { double gamma, mean, logmean, sans; MagickStatusType status; logmean=log(0.5); if ((channel & SyncChannels) != 0) { /* Apply gamma correction equally accross all given channels */ (void) GetImageChannelMean(image,channel,&mean,&sans,&image->exception); gamma=log(mean*QuantumScale)/logmean; return(LevelImageChannel(image,channel,0.0,(double) QuantumRange,gamma)); } /* Auto-gamma each channel separateally */ status = MagickTrue; if ((channel & RedChannel) != 0) { (void) GetImageChannelMean(image,RedChannel,&mean,&sans, &image->exception); gamma=log(mean*QuantumScale)/logmean; status&=LevelImageChannel(image,RedChannel,0.0,(double) QuantumRange, gamma); } if ((channel & GreenChannel) != 0) { (void) GetImageChannelMean(image,GreenChannel,&mean,&sans, &image->exception); gamma=log(mean*QuantumScale)/logmean; status&=LevelImageChannel(image,GreenChannel,0.0,(double) QuantumRange, gamma); } if ((channel & BlueChannel) != 0) { (void) GetImageChannelMean(image,BlueChannel,&mean,&sans, &image->exception); gamma=log(mean*QuantumScale)/logmean; status&=LevelImageChannel(image,BlueChannel,0.0,(double) QuantumRange, gamma); } if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) { (void) GetImageChannelMean(image,OpacityChannel,&mean,&sans, &image->exception); gamma=log(mean*QuantumScale)/logmean; status&=LevelImageChannel(image,OpacityChannel,0.0,(double) QuantumRange, gamma); } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) { (void) GetImageChannelMean(image,IndexChannel,&mean,&sans, &image->exception); gamma=log(mean*QuantumScale)/logmean; status&=LevelImageChannel(image,IndexChannel,0.0,(double) QuantumRange, gamma); } return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A u t o L e v e l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AutoLevelImage() adjusts the levels of a particular image channel by % scaling the minimum and maximum values to the full quantum range. % % The format of the LevelImage method is: % % MagickBooleanType AutoLevelImage(Image *image) % MagickBooleanType AutoLevelImageChannel(Image *image, % const ChannelType channel) % % A description of each parameter follows: % % o image: The image to auto-level % % o channel: The channels to auto-level. If the special 'SyncChannels' % flag is set the min/max/mean value of all given channels is used for % all given channels, to all channels in the same way. % */ MagickExport MagickBooleanType AutoLevelImage(Image *image) { return(AutoLevelImageChannel(image,DefaultChannels)); } MagickExport MagickBooleanType AutoLevelImageChannel(Image *image, const ChannelType channel) { /* Convenience method for a min/max histogram stretch. */ return(MinMaxStretchImage(image,channel,0.0,0.0)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % B r i g h t n e s s C o n t r a s t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % BrightnessContrastImage() changes the brightness and/or contrast of an % image. It converts the brightness and contrast parameters into slope and % intercept and calls a polynomical function to apply to the image. % % The format of the BrightnessContrastImage method is: % % MagickBooleanType BrightnessContrastImage(Image *image, % const double brightness,const double contrast) % MagickBooleanType BrightnessContrastImageChannel(Image *image, % const ChannelType channel,const double brightness, % const double contrast) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o brightness: the brightness percent (-100 .. 100). % % o contrast: the contrast percent (-100 .. 100). % */ MagickExport MagickBooleanType BrightnessContrastImage(Image *image, const double brightness,const double contrast) { MagickBooleanType status; status=BrightnessContrastImageChannel(image,DefaultChannels,brightness, contrast); return(status); } MagickExport MagickBooleanType BrightnessContrastImageChannel(Image *image, const ChannelType channel,const double brightness,const double contrast) { #define BrightnessContastImageTag "BrightnessContast/Image" double alpha, intercept, coefficients[2], slope; MagickBooleanType status; /* Compute slope and intercept. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); alpha=contrast; slope=tan((double) (MagickPI*(alpha/100.0+1.0)/4.0)); if (slope < 0.0) slope=0.0; intercept=brightness/100.0+((100-brightness)/200.0)*(1.0-slope); coefficients[0]=slope; coefficients[1]=intercept; status=FunctionImageChannel(image,channel,PolynomialFunction,2,coefficients, &image->exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o l o r D e c i s i o n L i s t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ColorDecisionListImage() accepts a lightweight Color Correction Collection % (CCC) file which solely contains one or more color corrections and applies % the correction to the image. Here is a sample CCC file: % % <ColorCorrectionCollection xmlns="urn:ASC:CDL:v1.2"> % <ColorCorrection id="cc03345"> % <SOPNode> % <Slope> 0.9 1.2 0.5 </Slope> % <Offset> 0.4 -0.5 0.6 </Offset> % <Power> 1.0 0.8 1.5 </Power> % </SOPNode> % <SATNode> % <Saturation> 0.85 </Saturation> % </SATNode> % </ColorCorrection> % </ColorCorrectionCollection> % % which includes the slop, offset, and power for each of the RGB channels % as well as the saturation. % % The format of the ColorDecisionListImage method is: % % MagickBooleanType ColorDecisionListImage(Image *image, % const char *color_correction_collection) % % A description of each parameter follows: % % o image: the image. % % o color_correction_collection: the color correction collection in XML. % */ MagickExport MagickBooleanType ColorDecisionListImage(Image *image, const char *color_correction_collection) { #define ColorDecisionListCorrectImageTag "ColorDecisionList/Image" typedef struct _Correction { double slope, offset, power; } Correction; typedef struct _ColorCorrection { Correction red, green, blue; double saturation; } ColorCorrection; CacheView *image_view; char token[MaxTextExtent]; ColorCorrection color_correction; const char *content, *p; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; PixelPacket *cdl_map; register ssize_t i; ssize_t y; XMLTreeInfo *cc, *ccc, *sat, *sop; /* Allocate and initialize cdl maps. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (color_correction_collection == (const char *) NULL) return(MagickFalse); exception=(&image->exception); ccc=NewXMLTree((const char *) color_correction_collection,&image->exception); if (ccc == (XMLTreeInfo *) NULL) return(MagickFalse); cc=GetXMLTreeChild(ccc,"ColorCorrection"); if (cc == (XMLTreeInfo *) NULL) { ccc=DestroyXMLTree(ccc); return(MagickFalse); } color_correction.red.slope=1.0; color_correction.red.offset=0.0; color_correction.red.power=1.0; color_correction.green.slope=1.0; color_correction.green.offset=0.0; color_correction.green.power=1.0; color_correction.blue.slope=1.0; color_correction.blue.offset=0.0; color_correction.blue.power=1.0; color_correction.saturation=0.0; sop=GetXMLTreeChild(cc,"SOPNode"); if (sop != (XMLTreeInfo *) NULL) { XMLTreeInfo *offset, *power, *slope; slope=GetXMLTreeChild(sop,"Slope"); if (slope != (XMLTreeInfo *) NULL) { content=GetXMLTreeContent(slope); p=(const char *) content; for (i=0; (*p != '\0') && (i < 3); i++) { (void) GetNextToken(p,&p,MaxTextExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MaxTextExtent,token); switch (i) { case 0: { color_correction.red.slope=StringToDouble(token,(char **) NULL); break; } case 1: { color_correction.green.slope=StringToDouble(token, (char **) NULL); break; } case 2: { color_correction.blue.slope=StringToDouble(token, (char **) NULL); break; } } } } offset=GetXMLTreeChild(sop,"Offset"); if (offset != (XMLTreeInfo *) NULL) { content=GetXMLTreeContent(offset); p=(const char *) content; for (i=0; (*p != '\0') && (i < 3); i++) { (void) GetNextToken(p,&p,MaxTextExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MaxTextExtent,token); switch (i) { case 0: { color_correction.red.offset=StringToDouble(token, (char **) NULL); break; } case 1: { color_correction.green.offset=StringToDouble(token, (char **) NULL); break; } case 2: { color_correction.blue.offset=StringToDouble(token, (char **) NULL); break; } } } } power=GetXMLTreeChild(sop,"Power"); if (power != (XMLTreeInfo *) NULL) { content=GetXMLTreeContent(power); p=(const char *) content; for (i=0; (*p != '\0') && (i < 3); i++) { (void) GetNextToken(p,&p,MaxTextExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MaxTextExtent,token); switch (i) { case 0: { color_correction.red.power=StringToDouble(token,(char **) NULL); break; } case 1: { color_correction.green.power=StringToDouble(token, (char **) NULL); break; } case 2: { color_correction.blue.power=StringToDouble(token, (char **) NULL); break; } } } } } sat=GetXMLTreeChild(cc,"SATNode"); if (sat != (XMLTreeInfo *) NULL) { XMLTreeInfo *saturation; saturation=GetXMLTreeChild(sat,"Saturation"); if (saturation != (XMLTreeInfo *) NULL) { content=GetXMLTreeContent(saturation); p=(const char *) content; (void) GetNextToken(p,&p,MaxTextExtent,token); color_correction.saturation=StringToDouble(token,(char **) NULL); } } ccc=DestroyXMLTree(ccc); if (image->debug != MagickFalse) { (void) LogMagickEvent(TransformEvent,GetMagickModule(), " Color Correction Collection:"); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.red.slope: %g",color_correction.red.slope); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.red.offset: %g",color_correction.red.offset); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.red.power: %g",color_correction.red.power); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.green.slope: %g",color_correction.green.slope); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.green.offset: %g",color_correction.green.offset); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.green.power: %g",color_correction.green.power); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.blue.slope: %g",color_correction.blue.slope); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.blue.offset: %g",color_correction.blue.offset); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.blue.power: %g",color_correction.blue.power); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.saturation: %g",color_correction.saturation); } cdl_map=(PixelPacket *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*cdl_map)); if (cdl_map == (PixelPacket *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); for (i=0; i <= (ssize_t) MaxMap; i++) { cdl_map[i].red=ClampToQuantum((MagickRealType) ScaleMapToQuantum(( MagickRealType) (MaxMap*(pow(color_correction.red.slope*i/MaxMap+ color_correction.red.offset,color_correction.red.power))))); cdl_map[i].green=ClampToQuantum((MagickRealType) ScaleMapToQuantum(( MagickRealType) (MaxMap*(pow(color_correction.green.slope*i/MaxMap+ color_correction.green.offset,color_correction.green.power))))); cdl_map[i].blue=ClampToQuantum((MagickRealType) ScaleMapToQuantum(( MagickRealType) (MaxMap*(pow(color_correction.blue.slope*i/MaxMap+ color_correction.blue.offset,color_correction.blue.power))))); } if (image->storage_class == PseudoClass) { /* Apply transfer function to colormap. */ for (i=0; i < (ssize_t) image->colors; i++) { double luma; luma=0.212656*image->colormap[i].red+0.715158*image->colormap[i].green+ 0.072186*image->colormap[i].blue; image->colormap[i].red=ClampToQuantum(luma+color_correction.saturation* cdl_map[ScaleQuantumToMap(image->colormap[i].red)].red-luma); image->colormap[i].green=ClampToQuantum(luma+ color_correction.saturation*cdl_map[ScaleQuantumToMap( image->colormap[i].green)].green-luma); image->colormap[i].blue=ClampToQuantum(luma+color_correction.saturation* cdl_map[ScaleQuantumToMap(image->colormap[i].blue)].blue-luma); } } /* Apply transfer function to image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { double luma; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { luma=0.212656*GetPixelRed(q)+0.715158*GetPixelGreen(q)+ 0.072186*GetPixelBlue(q); SetPixelRed(q,ClampToQuantum(luma+color_correction.saturation* (cdl_map[ScaleQuantumToMap(GetPixelRed(q))].red-luma))); SetPixelGreen(q,ClampToQuantum(luma+color_correction.saturation* (cdl_map[ScaleQuantumToMap(GetPixelGreen(q))].green-luma))); SetPixelBlue(q,ClampToQuantum(luma+color_correction.saturation* (cdl_map[ScaleQuantumToMap(GetPixelBlue(q))].blue-luma))); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ColorDecisionListCorrectImageTag, progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); cdl_map=(PixelPacket *) RelinquishMagickMemory(cdl_map); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l u t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClutImage() replaces each color value in the given image, by using it as an % index to lookup a replacement color value in a Color Look UP Table in the % form of an image. The values are extracted along a diagonal of the CLUT % image so either a horizontal or vertial gradient image can be used. % % Typically this is used to either re-color a gray-scale image according to a % color gradient in the CLUT image, or to perform a freeform histogram % (level) adjustment according to the (typically gray-scale) gradient in the % CLUT image. % % When the 'channel' mask includes the matte/alpha transparency channel but % one image has no such channel it is assumed that that image is a simple % gray-scale image that will effect the alpha channel values, either for % gray-scale coloring (with transparent or semi-transparent colors), or % a histogram adjustment of existing alpha channel values. If both images % have matte channels, direct and normal indexing is applied, which is rarely % used. % % The format of the ClutImage method is: % % MagickBooleanType ClutImage(Image *image,Image *clut_image) % MagickBooleanType ClutImageChannel(Image *image, % const ChannelType channel,Image *clut_image) % % A description of each parameter follows: % % o image: the image, which is replaced by indexed CLUT values % % o clut_image: the color lookup table image for replacement color values. % % o channel: the channel. % */ MagickExport MagickBooleanType ClutImage(Image *image,const Image *clut_image) { return(ClutImageChannel(image,DefaultChannels,clut_image)); } MagickExport MagickBooleanType ClutImageChannel(Image *image, const ChannelType channel,const Image *clut_image) { #define ClutImageTag "Clut/Image" CacheView *clut_view, *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket *clut_map; register ssize_t i; ssize_t adjust, y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(clut_image != (Image *) NULL); assert(clut_image->signature == MagickCoreSignature); exception=(&image->exception); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); if ((IsGrayColorspace(image->colorspace) != MagickFalse) && (IsGrayColorspace(clut_image->colorspace) == MagickFalse)) (void) SetImageColorspace(image,sRGBColorspace); clut_map=(MagickPixelPacket *) AcquireQuantumMemory(MaxMap+1UL, sizeof(*clut_map)); if (clut_map == (MagickPixelPacket *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); /* Clut image. */ status=MagickTrue; progress=0; adjust=(ssize_t) (clut_image->interpolate == IntegerInterpolatePixel ? 0 : 1); clut_view=AcquireAuthenticCacheView(clut_image,exception); for (i=0; i <= (ssize_t) MaxMap; i++) { GetMagickPixelPacket(clut_image,clut_map+i); status=InterpolateMagickPixelPacket(clut_image,clut_view, UndefinedInterpolatePixel,(double) i*(clut_image->columns-adjust)/MaxMap, (double) i*(clut_image->rows-adjust)/MaxMap,clut_map+i,exception); if (status == MagickFalse) break; } clut_view=DestroyCacheView(clut_view); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickPixelPacket pixel; register IndexPacket *magick_restrict indexes; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); GetMagickPixelPacket(image,&pixel); for (x=0; x < (ssize_t) image->columns; x++) { SetMagickPixelPacket(image,q,indexes+x,&pixel); if ((channel & RedChannel) != 0) SetPixelRed(q,ClampPixelRed(clut_map+ ScaleQuantumToMap(GetPixelRed(q)))); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampPixelGreen(clut_map+ ScaleQuantumToMap(GetPixelGreen(q)))); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampPixelBlue(clut_map+ ScaleQuantumToMap(GetPixelBlue(q)))); if ((channel & OpacityChannel) != 0) { if (clut_image->matte == MagickFalse) SetPixelAlpha(q,MagickPixelIntensityToQuantum(clut_map+ ScaleQuantumToMap((Quantum) GetPixelAlpha(q)))); else if (image->matte == MagickFalse) SetPixelOpacity(q,ClampPixelOpacity(clut_map+ ScaleQuantumToMap((Quantum) MagickPixelIntensity(&pixel)))); else SetPixelOpacity(q,ClampPixelOpacity( clut_map+ScaleQuantumToMap(GetPixelOpacity(q)))); } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,ClampToQuantum((clut_map+(ssize_t) GetPixelIndex(indexes+x))->index)); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ClutImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); clut_map=(MagickPixelPacket *) RelinquishMagickMemory(clut_map); if ((clut_image->matte != MagickFalse) && ((channel & OpacityChannel) != 0)) (void) SetImageAlphaChannel(image,ActivateAlphaChannel); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o n t r a s t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ContrastImage() enhances the intensity differences between the lighter and % darker elements of the image. Set sharpen to a MagickTrue to increase the % image contrast otherwise the contrast is reduced. % % The format of the ContrastImage method is: % % MagickBooleanType ContrastImage(Image *image, % const MagickBooleanType sharpen) % % A description of each parameter follows: % % o image: the image. % % o sharpen: Increase or decrease image contrast. % */ static void Contrast(const int sign,Quantum *red,Quantum *green,Quantum *blue) { double brightness, hue, saturation; /* Enhance contrast: dark color become darker, light color become lighter. */ assert(red != (Quantum *) NULL); assert(green != (Quantum *) NULL); assert(blue != (Quantum *) NULL); hue=0.0; saturation=0.0; brightness=0.0; ConvertRGBToHSB(*red,*green,*blue,&hue,&saturation,&brightness); brightness+=0.5*sign*(0.5*(sin((double) (MagickPI*(brightness-0.5)))+1.0)- brightness); if (brightness > 1.0) brightness=1.0; else if (brightness < 0.0) brightness=0.0; ConvertHSBToRGB(hue,saturation,brightness,red,green,blue); } MagickExport MagickBooleanType ContrastImage(Image *image, const MagickBooleanType sharpen) { #define ContrastImageTag "Contrast/Image" CacheView *image_view; ExceptionInfo *exception; int sign; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); sign=sharpen != MagickFalse ? 1 : -1; if (image->storage_class == PseudoClass) { /* Contrast enhance colormap. */ for (i=0; i < (ssize_t) image->colors; i++) Contrast(sign,&image->colormap[i].red,&image->colormap[i].green, &image->colormap[i].blue); } /* Contrast enhance image. */ #if defined(MAGICKCORE_OPENCL_SUPPORT) status=AccelerateContrastImage(image,sharpen,&image->exception); if (status != MagickFalse) return status; #endif status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Quantum blue, green, red; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { red=GetPixelRed(q); green=GetPixelGreen(q); blue=GetPixelBlue(q); Contrast(sign,&red,&green,&blue); SetPixelRed(q,red); SetPixelGreen(q,green); SetPixelBlue(q,blue); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ContrastImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o n t r a s t S t r e t c h I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ContrastStretchImage() is a simple image enhancement technique that attempts % to improve the contrast in an image by `stretching' the range of intensity % values it contains to span a desired range of values. It differs from the % more sophisticated histogram equalization in that it can only apply a % linear scaling function to the image pixel values. As a result the % `enhancement' is less harsh. % % The format of the ContrastStretchImage method is: % % MagickBooleanType ContrastStretchImage(Image *image, % const char *levels) % MagickBooleanType ContrastStretchImageChannel(Image *image, % const size_t channel,const double black_point, % const double white_point) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o black_point: the black point. % % o white_point: the white point. % % o levels: Specify the levels where the black and white points have the % range of 0 to number-of-pixels (e.g. 1%, 10x90%, etc.). % */ MagickExport MagickBooleanType ContrastStretchImage(Image *image, const char *levels) { double black_point, white_point; GeometryInfo geometry_info; MagickBooleanType status; MagickStatusType flags; /* Parse levels. */ if (levels == (char *) NULL) return(MagickFalse); flags=ParseGeometry(levels,&geometry_info); black_point=geometry_info.rho; white_point=(double) image->columns*image->rows; if ((flags & SigmaValue) != 0) white_point=geometry_info.sigma; if ((flags & PercentValue) != 0) { black_point*=(double) QuantumRange/100.0; white_point*=(double) QuantumRange/100.0; } if ((flags & SigmaValue) == 0) white_point=(double) image->columns*image->rows-black_point; status=ContrastStretchImageChannel(image,DefaultChannels,black_point, white_point); return(status); } MagickExport MagickBooleanType ContrastStretchImageChannel(Image *image, const ChannelType channel,const double black_point,const double white_point) { #define MaxRange(color) ((MagickRealType) ScaleQuantumToMap((Quantum) (color))) #define ContrastStretchImageTag "ContrastStretch/Image" CacheView *image_view; double intensity; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket black, *histogram, white; QuantumPixelPacket *stretch_map; register ssize_t i; ssize_t y; /* Allocate histogram and stretch map. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); exception=(&image->exception); #if defined(MAGICKCORE_OPENCL_SUPPORT) && 0 /* Call OpenCL version */ status=AccelerateContrastStretchImageChannel(image,channel,black_point, white_point,&image->exception); if (status != MagickFalse) return status; #endif histogram=(MagickPixelPacket *) AcquireQuantumMemory(MaxMap+1UL, sizeof(*histogram)); stretch_map=(QuantumPixelPacket *) AcquireQuantumMemory(MaxMap+1UL, sizeof(*stretch_map)); if ((histogram == (MagickPixelPacket *) NULL) || (stretch_map == (QuantumPixelPacket *) NULL)) { if (stretch_map != (QuantumPixelPacket *) NULL) stretch_map=(QuantumPixelPacket *) RelinquishMagickMemory(stretch_map); if (histogram != (MagickPixelPacket *) NULL) histogram=(MagickPixelPacket *) RelinquishMagickMemory(histogram); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } /* Form histogram. */ if (SetImageGray(image,exception) != MagickFalse) (void) SetImageColorspace(image,GRAYColorspace); status=MagickTrue; (void) memset(histogram,0,(MaxMap+1)*sizeof(*histogram)); image_view=AcquireAuthenticCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *magick_restrict p; register IndexPacket *magick_restrict indexes; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); if ((channel & SyncChannels) != 0) for (x=0; x < (ssize_t) image->columns; x++) { Quantum intensity; intensity=ClampToQuantum(GetPixelIntensity(image,p)); histogram[ScaleQuantumToMap(intensity)].red++; histogram[ScaleQuantumToMap(intensity)].green++; histogram[ScaleQuantumToMap(intensity)].blue++; histogram[ScaleQuantumToMap(intensity)].index++; p++; } else for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) histogram[ScaleQuantumToMap(GetPixelRed(p))].red++; if ((channel & GreenChannel) != 0) histogram[ScaleQuantumToMap(GetPixelGreen(p))].green++; if ((channel & BlueChannel) != 0) histogram[ScaleQuantumToMap(GetPixelBlue(p))].blue++; if ((channel & OpacityChannel) != 0) histogram[ScaleQuantumToMap(GetPixelOpacity(p))].opacity++; if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) histogram[ScaleQuantumToMap(GetPixelIndex(indexes+x))].index++; p++; } } /* Find the histogram boundaries by locating the black/white levels. */ black.red=0.0; white.red=MaxRange(QuantumRange); if ((channel & RedChannel) != 0) { intensity=0.0; for (i=0; i <= (ssize_t) MaxMap; i++) { intensity+=histogram[i].red; if (intensity > black_point) break; } black.red=(MagickRealType) i; intensity=0.0; for (i=(ssize_t) MaxMap; i != 0; i--) { intensity+=histogram[i].red; if (intensity > ((double) image->columns*image->rows-white_point)) break; } white.red=(MagickRealType) i; } black.green=0.0; white.green=MaxRange(QuantumRange); if ((channel & GreenChannel) != 0) { intensity=0.0; for (i=0; i <= (ssize_t) MaxMap; i++) { intensity+=histogram[i].green; if (intensity > black_point) break; } black.green=(MagickRealType) i; intensity=0.0; for (i=(ssize_t) MaxMap; i != 0; i--) { intensity+=histogram[i].green; if (intensity > ((double) image->columns*image->rows-white_point)) break; } white.green=(MagickRealType) i; } black.blue=0.0; white.blue=MaxRange(QuantumRange); if ((channel & BlueChannel) != 0) { intensity=0.0; for (i=0; i <= (ssize_t) MaxMap; i++) { intensity+=histogram[i].blue; if (intensity > black_point) break; } black.blue=(MagickRealType) i; intensity=0.0; for (i=(ssize_t) MaxMap; i != 0; i--) { intensity+=histogram[i].blue; if (intensity > ((double) image->columns*image->rows-white_point)) break; } white.blue=(MagickRealType) i; } black.opacity=0.0; white.opacity=MaxRange(QuantumRange); if ((channel & OpacityChannel) != 0) { intensity=0.0; for (i=0; i <= (ssize_t) MaxMap; i++) { intensity+=histogram[i].opacity; if (intensity > black_point) break; } black.opacity=(MagickRealType) i; intensity=0.0; for (i=(ssize_t) MaxMap; i != 0; i--) { intensity+=histogram[i].opacity; if (intensity > ((double) image->columns*image->rows-white_point)) break; } white.opacity=(MagickRealType) i; } black.index=0.0; white.index=MaxRange(QuantumRange); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) { intensity=0.0; for (i=0; i <= (ssize_t) MaxMap; i++) { intensity+=histogram[i].index; if (intensity > black_point) break; } black.index=(MagickRealType) i; intensity=0.0; for (i=(ssize_t) MaxMap; i != 0; i--) { intensity+=histogram[i].index; if (intensity > ((double) image->columns*image->rows-white_point)) break; } white.index=(MagickRealType) i; } histogram=(MagickPixelPacket *) RelinquishMagickMemory(histogram); /* Stretch the histogram to create the stretched image mapping. */ (void) memset(stretch_map,0,(MaxMap+1)*sizeof(*stretch_map)); for (i=0; i <= (ssize_t) MaxMap; i++) { if ((channel & RedChannel) != 0) { if (i < (ssize_t) black.red) stretch_map[i].red=(Quantum) 0; else if (i > (ssize_t) white.red) stretch_map[i].red=QuantumRange; else if (black.red != white.red) stretch_map[i].red=ScaleMapToQuantum((MagickRealType) (MaxMap* (i-black.red)/(white.red-black.red))); } if ((channel & GreenChannel) != 0) { if (i < (ssize_t) black.green) stretch_map[i].green=0; else if (i > (ssize_t) white.green) stretch_map[i].green=QuantumRange; else if (black.green != white.green) stretch_map[i].green=ScaleMapToQuantum((MagickRealType) (MaxMap* (i-black.green)/(white.green-black.green))); } if ((channel & BlueChannel) != 0) { if (i < (ssize_t) black.blue) stretch_map[i].blue=0; else if (i > (ssize_t) white.blue) stretch_map[i].blue= QuantumRange; else if (black.blue != white.blue) stretch_map[i].blue=ScaleMapToQuantum((MagickRealType) (MaxMap* (i-black.blue)/(white.blue-black.blue))); } if ((channel & OpacityChannel) != 0) { if (i < (ssize_t) black.opacity) stretch_map[i].opacity=0; else if (i > (ssize_t) white.opacity) stretch_map[i].opacity=QuantumRange; else if (black.opacity != white.opacity) stretch_map[i].opacity=ScaleMapToQuantum((MagickRealType) (MaxMap* (i-black.opacity)/(white.opacity-black.opacity))); } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) { if (i < (ssize_t) black.index) stretch_map[i].index=0; else if (i > (ssize_t) white.index) stretch_map[i].index=QuantumRange; else if (black.index != white.index) stretch_map[i].index=ScaleMapToQuantum((MagickRealType) (MaxMap* (i-black.index)/(white.index-black.index))); } } /* Stretch the image. */ if (((channel & OpacityChannel) != 0) || (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace))) image->storage_class=DirectClass; if (image->storage_class == PseudoClass) { /* Stretch colormap. */ for (i=0; i < (ssize_t) image->colors; i++) { if ((channel & RedChannel) != 0) { if (black.red != white.red) image->colormap[i].red=stretch_map[ ScaleQuantumToMap(image->colormap[i].red)].red; } if ((channel & GreenChannel) != 0) { if (black.green != white.green) image->colormap[i].green=stretch_map[ ScaleQuantumToMap(image->colormap[i].green)].green; } if ((channel & BlueChannel) != 0) { if (black.blue != white.blue) image->colormap[i].blue=stretch_map[ ScaleQuantumToMap(image->colormap[i].blue)].blue; } if ((channel & OpacityChannel) != 0) { if (black.opacity != white.opacity) image->colormap[i].opacity=stretch_map[ ScaleQuantumToMap(image->colormap[i].opacity)].opacity; } } } /* Stretch image. */ status=MagickTrue; progress=0; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *magick_restrict indexes; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) { if (black.red != white.red) SetPixelRed(q,stretch_map[ ScaleQuantumToMap(GetPixelRed(q))].red); } if ((channel & GreenChannel) != 0) { if (black.green != white.green) SetPixelGreen(q,stretch_map[ ScaleQuantumToMap(GetPixelGreen(q))].green); } if ((channel & BlueChannel) != 0) { if (black.blue != white.blue) SetPixelBlue(q,stretch_map[ ScaleQuantumToMap(GetPixelBlue(q))].blue); } if ((channel & OpacityChannel) != 0) { if (black.opacity != white.opacity) SetPixelOpacity(q,stretch_map[ ScaleQuantumToMap(GetPixelOpacity(q))].opacity); } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) { if (black.index != white.index) SetPixelIndex(indexes+x,stretch_map[ ScaleQuantumToMap(GetPixelIndex(indexes+x))].index); } q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ContrastStretchImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); stretch_map=(QuantumPixelPacket *) RelinquishMagickMemory(stretch_map); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E n h a n c e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % EnhanceImage() applies a digital filter that improves the quality of a % noisy image. % % The format of the EnhanceImage method is: % % Image *EnhanceImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *EnhanceImage(const Image *image,ExceptionInfo *exception) { #define EnhancePixel(weight) \ mean=QuantumScale*((double) GetPixelRed(r)+pixel.red)/2.0; \ distance=QuantumScale*((double) GetPixelRed(r)-pixel.red); \ distance_squared=(4.0+mean)*distance*distance; \ mean=QuantumScale*((double) GetPixelGreen(r)+pixel.green)/2.0; \ distance=QuantumScale*((double) GetPixelGreen(r)-pixel.green); \ distance_squared+=(7.0-mean)*distance*distance; \ mean=QuantumScale*((double) GetPixelBlue(r)+pixel.blue)/2.0; \ distance=QuantumScale*((double) GetPixelBlue(r)-pixel.blue); \ distance_squared+=(5.0-mean)*distance*distance; \ mean=QuantumScale*((double) GetPixelOpacity(r)+pixel.opacity)/2.0; \ distance=QuantumScale*((double) GetPixelOpacity(r)-pixel.opacity); \ distance_squared+=(5.0-mean)*distance*distance; \ if (distance_squared < 0.069) \ { \ aggregate.red+=(weight)*GetPixelRed(r); \ aggregate.green+=(weight)*GetPixelGreen(r); \ aggregate.blue+=(weight)*GetPixelBlue(r); \ aggregate.opacity+=(weight)*GetPixelOpacity(r); \ total_weight+=(weight); \ } \ r++; #define EnhanceImageTag "Enhance/Image" CacheView *enhance_view, *image_view; Image *enhance_image; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket zero; ssize_t y; /* Initialize enhanced image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if ((image->columns < 5) || (image->rows < 5)) return((Image *) NULL); enhance_image=CloneImage(image,0,0,MagickTrue,exception); if (enhance_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(enhance_image,DirectClass) == MagickFalse) { InheritException(exception,&enhance_image->exception); enhance_image=DestroyImage(enhance_image); return((Image *) NULL); } /* Enhance image. */ status=MagickTrue; progress=0; (void) memset(&zero,0,sizeof(zero)); image_view=AcquireAuthenticCacheView(image,exception); enhance_view=AcquireAuthenticCacheView(enhance_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,enhance_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *magick_restrict p; register PixelPacket *magick_restrict q; register ssize_t x; /* Read another scan line. */ if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-2,y-2,image->columns+4,5,exception); q=QueueCacheViewAuthenticPixels(enhance_view,0,y,enhance_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double distance, distance_squared, mean, total_weight; MagickPixelPacket aggregate; PixelPacket pixel; register const PixelPacket *magick_restrict r; /* Compute weighted average of target pixel color components. */ aggregate=zero; total_weight=0.0; r=p+2*(image->columns+4)+2; pixel=(*r); r=p; EnhancePixel(5.0); EnhancePixel(8.0); EnhancePixel(10.0); EnhancePixel(8.0); EnhancePixel(5.0); r=p+(image->columns+4); EnhancePixel(8.0); EnhancePixel(20.0); EnhancePixel(40.0); EnhancePixel(20.0); EnhancePixel(8.0); r=p+2*(image->columns+4); EnhancePixel(10.0); EnhancePixel(40.0); EnhancePixel(80.0); EnhancePixel(40.0); EnhancePixel(10.0); r=p+3*(image->columns+4); EnhancePixel(8.0); EnhancePixel(20.0); EnhancePixel(40.0); EnhancePixel(20.0); EnhancePixel(8.0); r=p+4*(image->columns+4); EnhancePixel(5.0); EnhancePixel(8.0); EnhancePixel(10.0); EnhancePixel(8.0); EnhancePixel(5.0); if (total_weight > MagickEpsilon) { SetPixelRed(q,(aggregate.red+(total_weight/2)-1)/total_weight); SetPixelGreen(q,(aggregate.green+(total_weight/2)-1)/total_weight); SetPixelBlue(q,(aggregate.blue+(total_weight/2)-1)/total_weight); SetPixelOpacity(q,(aggregate.opacity+(total_weight/2)-1)/ total_weight); } p++; q++; } if (SyncCacheViewAuthenticPixels(enhance_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,EnhanceImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } enhance_view=DestroyCacheView(enhance_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) enhance_image=DestroyImage(enhance_image); return(enhance_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E q u a l i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % EqualizeImage() applies a histogram equalization to the image. % % The format of the EqualizeImage method is: % % MagickBooleanType EqualizeImage(Image *image) % MagickBooleanType EqualizeImageChannel(Image *image, % const ChannelType channel) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % */ MagickExport MagickBooleanType EqualizeImage(Image *image) { return(EqualizeImageChannel(image,DefaultChannels)); } MagickExport MagickBooleanType EqualizeImageChannel(Image *image, const ChannelType channel) { #define EqualizeImageTag "Equalize/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket black, *histogram, intensity, *map, white; QuantumPixelPacket *equalize_map; register ssize_t i; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); exception=(&image->exception); #if defined(MAGICKCORE_OPENCL_SUPPORT) /* Call OpenCL version */ status=AccelerateEqualizeImage(image,channel,&image->exception); if (status != MagickFalse) return status; #endif /* Allocate and initialize histogram arrays. */ equalize_map=(QuantumPixelPacket *) AcquireQuantumMemory(MaxMap+1UL, sizeof(*equalize_map)); histogram=(MagickPixelPacket *) AcquireQuantumMemory(MaxMap+1UL, sizeof(*histogram)); map=(MagickPixelPacket *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*map)); if ((equalize_map == (QuantumPixelPacket *) NULL) || (histogram == (MagickPixelPacket *) NULL) || (map == (MagickPixelPacket *) NULL)) { if (map != (MagickPixelPacket *) NULL) map=(MagickPixelPacket *) RelinquishMagickMemory(map); if (histogram != (MagickPixelPacket *) NULL) histogram=(MagickPixelPacket *) RelinquishMagickMemory(histogram); if (equalize_map != (QuantumPixelPacket *) NULL) equalize_map=(QuantumPixelPacket *) RelinquishMagickMemory( equalize_map); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } /* Form histogram. */ (void) memset(histogram,0,(MaxMap+1)*sizeof(*histogram)); image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register const IndexPacket *magick_restrict indexes; register const PixelPacket *magick_restrict p; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; indexes=GetCacheViewVirtualIndexQueue(image_view); if ((channel & SyncChannels) != 0) for (x=0; x < (ssize_t) image->columns; x++) { MagickRealType intensity=GetPixelIntensity(image,p); histogram[ScaleQuantumToMap(ClampToQuantum(intensity))].red++; p++; } else for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) histogram[ScaleQuantumToMap(GetPixelRed(p))].red++; if ((channel & GreenChannel) != 0) histogram[ScaleQuantumToMap(GetPixelGreen(p))].green++; if ((channel & BlueChannel) != 0) histogram[ScaleQuantumToMap(GetPixelBlue(p))].blue++; if ((channel & OpacityChannel) != 0) histogram[ScaleQuantumToMap(GetPixelOpacity(p))].opacity++; if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) histogram[ScaleQuantumToMap(GetPixelIndex(indexes+x))].index++; p++; } } image_view=DestroyCacheView(image_view); /* Integrate the histogram to get the equalization map. */ (void) memset(&intensity,0,sizeof(intensity)); for (i=0; i <= (ssize_t) MaxMap; i++) { if ((channel & SyncChannels) != 0) { intensity.red+=histogram[i].red; map[i]=intensity; continue; } if ((channel & RedChannel) != 0) intensity.red+=histogram[i].red; if ((channel & GreenChannel) != 0) intensity.green+=histogram[i].green; if ((channel & BlueChannel) != 0) intensity.blue+=histogram[i].blue; if ((channel & OpacityChannel) != 0) intensity.opacity+=histogram[i].opacity; if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) intensity.index+=histogram[i].index; map[i]=intensity; } black=map[0]; white=map[(int) MaxMap]; (void) memset(equalize_map,0,(MaxMap+1)*sizeof(*equalize_map)); for (i=0; i <= (ssize_t) MaxMap; i++) { if ((channel & SyncChannels) != 0) { if (white.red != black.red) equalize_map[i].red=ScaleMapToQuantum((MagickRealType) ((MaxMap* (map[i].red-black.red))/(white.red-black.red))); continue; } if (((channel & RedChannel) != 0) && (white.red != black.red)) equalize_map[i].red=ScaleMapToQuantum((MagickRealType) ((MaxMap* (map[i].red-black.red))/(white.red-black.red))); if (((channel & GreenChannel) != 0) && (white.green != black.green)) equalize_map[i].green=ScaleMapToQuantum((MagickRealType) ((MaxMap* (map[i].green-black.green))/(white.green-black.green))); if (((channel & BlueChannel) != 0) && (white.blue != black.blue)) equalize_map[i].blue=ScaleMapToQuantum((MagickRealType) ((MaxMap* (map[i].blue-black.blue))/(white.blue-black.blue))); if (((channel & OpacityChannel) != 0) && (white.opacity != black.opacity)) equalize_map[i].opacity=ScaleMapToQuantum((MagickRealType) ((MaxMap* (map[i].opacity-black.opacity))/(white.opacity-black.opacity))); if ((((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) && (white.index != black.index)) equalize_map[i].index=ScaleMapToQuantum((MagickRealType) ((MaxMap* (map[i].index-black.index))/(white.index-black.index))); } histogram=(MagickPixelPacket *) RelinquishMagickMemory(histogram); map=(MagickPixelPacket *) RelinquishMagickMemory(map); if (image->storage_class == PseudoClass) { /* Equalize colormap. */ for (i=0; i < (ssize_t) image->colors; i++) { if ((channel & SyncChannels) != 0) { if (white.red != black.red) { image->colormap[i].red=equalize_map[ ScaleQuantumToMap(image->colormap[i].red)].red; image->colormap[i].green=equalize_map[ ScaleQuantumToMap(image->colormap[i].green)].red; image->colormap[i].blue=equalize_map[ ScaleQuantumToMap(image->colormap[i].blue)].red; image->colormap[i].opacity=equalize_map[ ScaleQuantumToMap(image->colormap[i].opacity)].red; } continue; } if (((channel & RedChannel) != 0) && (white.red != black.red)) image->colormap[i].red=equalize_map[ ScaleQuantumToMap(image->colormap[i].red)].red; if (((channel & GreenChannel) != 0) && (white.green != black.green)) image->colormap[i].green=equalize_map[ ScaleQuantumToMap(image->colormap[i].green)].green; if (((channel & BlueChannel) != 0) && (white.blue != black.blue)) image->colormap[i].blue=equalize_map[ ScaleQuantumToMap(image->colormap[i].blue)].blue; if (((channel & OpacityChannel) != 0) && (white.opacity != black.opacity)) image->colormap[i].opacity=equalize_map[ ScaleQuantumToMap(image->colormap[i].opacity)].opacity; } } /* Equalize image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *magick_restrict indexes; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & SyncChannels) != 0) { if (white.red != black.red) { SetPixelRed(q,equalize_map[ ScaleQuantumToMap(GetPixelRed(q))].red); SetPixelGreen(q,equalize_map[ ScaleQuantumToMap(GetPixelGreen(q))].red); SetPixelBlue(q,equalize_map[ ScaleQuantumToMap(GetPixelBlue(q))].red); SetPixelOpacity(q,equalize_map[ ScaleQuantumToMap(GetPixelOpacity(q))].red); if (image->colorspace == CMYKColorspace) SetPixelIndex(indexes+x,equalize_map[ ScaleQuantumToMap(GetPixelIndex(indexes+x))].red); } q++; continue; } if (((channel & RedChannel) != 0) && (white.red != black.red)) SetPixelRed(q,equalize_map[ ScaleQuantumToMap(GetPixelRed(q))].red); if (((channel & GreenChannel) != 0) && (white.green != black.green)) SetPixelGreen(q,equalize_map[ ScaleQuantumToMap(GetPixelGreen(q))].green); if (((channel & BlueChannel) != 0) && (white.blue != black.blue)) SetPixelBlue(q,equalize_map[ ScaleQuantumToMap(GetPixelBlue(q))].blue); if (((channel & OpacityChannel) != 0) && (white.opacity != black.opacity)) SetPixelOpacity(q,equalize_map[ ScaleQuantumToMap(GetPixelOpacity(q))].opacity); if ((((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) && (white.index != black.index)) SetPixelIndex(indexes+x,equalize_map[ ScaleQuantumToMap(GetPixelIndex(indexes+x))].index); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,EqualizeImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); equalize_map=(QuantumPixelPacket *) RelinquishMagickMemory(equalize_map); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G a m m a I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GammaImage() gamma-corrects a particular image channel. The same % image viewed on different devices will have perceptual differences in the % way the image's intensities are represented on the screen. Specify % individual gamma levels for the red, green, and blue channels, or adjust % all three with the gamma parameter. Values typically range from 0.8 to 2.3. % % You can also reduce the influence of a particular channel with a gamma % value of 0. % % The format of the GammaImage method is: % % MagickBooleanType GammaImage(Image *image,const char *level) % MagickBooleanType GammaImageChannel(Image *image, % const ChannelType channel,const double gamma) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o level: the image gamma as a string (e.g. 1.6,1.2,1.0). % % o gamma: the image gamma. % */ static inline double gamma_pow(const double value,const double gamma) { return(value < 0.0 ? value : pow(value,gamma)); } MagickExport MagickBooleanType GammaImage(Image *image,const char *level) { GeometryInfo geometry_info; MagickPixelPacket gamma; MagickStatusType flags, status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (level == (char *) NULL) return(MagickFalse); flags=ParseGeometry(level,&geometry_info); gamma.red=geometry_info.rho; gamma.green=geometry_info.sigma; if ((flags & SigmaValue) == 0) gamma.green=gamma.red; gamma.blue=geometry_info.xi; if ((flags & XiValue) == 0) gamma.blue=gamma.red; if ((gamma.red == 1.0) && (gamma.green == 1.0) && (gamma.blue == 1.0)) return(MagickTrue); if ((gamma.red == gamma.green) && (gamma.green == gamma.blue)) status=GammaImageChannel(image,(ChannelType) (RedChannel | GreenChannel | BlueChannel),(double) gamma.red); else { status=GammaImageChannel(image,RedChannel,(double) gamma.red); status&=GammaImageChannel(image,GreenChannel,(double) gamma.green); status&=GammaImageChannel(image,BlueChannel,(double) gamma.blue); } return(status != 0 ? MagickTrue : MagickFalse); } MagickExport MagickBooleanType GammaImageChannel(Image *image, const ChannelType channel,const double gamma) { #define GammaImageTag "Gamma/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; Quantum *gamma_map; register ssize_t i; ssize_t y; /* Allocate and initialize gamma maps. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); exception=(&image->exception); if (gamma == 1.0) return(MagickTrue); gamma_map=(Quantum *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*gamma_map)); if (gamma_map == (Quantum *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); (void) memset(gamma_map,0,(MaxMap+1)*sizeof(*gamma_map)); if (gamma != 0.0) for (i=0; i <= (ssize_t) MaxMap; i++) gamma_map[i]=ClampToQuantum((MagickRealType) ScaleMapToQuantum(( MagickRealType) (MaxMap*pow((double) i/MaxMap, PerceptibleReciprocal(gamma))))); if (image->storage_class == PseudoClass) { /* Gamma-correct colormap. */ for (i=0; i < (ssize_t) image->colors; i++) { #if !defined(MAGICKCORE_HDRI_SUPPORT) if ((channel & RedChannel) != 0) image->colormap[i].red=gamma_map[ScaleQuantumToMap( image->colormap[i].red)]; if ((channel & GreenChannel) != 0) image->colormap[i].green=gamma_map[ScaleQuantumToMap( image->colormap[i].green)]; if ((channel & BlueChannel) != 0) image->colormap[i].blue=gamma_map[ScaleQuantumToMap( image->colormap[i].blue)]; if ((channel & OpacityChannel) != 0) { if (image->matte == MagickFalse) image->colormap[i].opacity=gamma_map[ScaleQuantumToMap( image->colormap[i].opacity)]; else image->colormap[i].opacity=QuantumRange-gamma_map[ ScaleQuantumToMap((Quantum) (QuantumRange- image->colormap[i].opacity))]; } #else if ((channel & RedChannel) != 0) image->colormap[i].red=QuantumRange*gamma_pow(QuantumScale* image->colormap[i].red,PerceptibleReciprocal(gamma)); if ((channel & GreenChannel) != 0) image->colormap[i].green=QuantumRange*gamma_pow(QuantumScale* image->colormap[i].green,PerceptibleReciprocal(gamma)); if ((channel & BlueChannel) != 0) image->colormap[i].blue=QuantumRange*gamma_pow(QuantumScale* image->colormap[i].blue,PerceptibleReciprocal(gamma)); if ((channel & OpacityChannel) != 0) { if (image->matte == MagickFalse) image->colormap[i].opacity=QuantumRange*gamma_pow(QuantumScale* image->colormap[i].opacity,PerceptibleReciprocal(gamma)); else image->colormap[i].opacity=QuantumRange-QuantumRange*gamma_pow( QuantumScale*(QuantumRange-image->colormap[i].opacity),1.0/ gamma); } #endif } } /* Gamma-correct image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *magick_restrict indexes; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { #if !defined(MAGICKCORE_HDRI_SUPPORT) if ((channel & SyncChannels) != 0) { SetPixelRed(q,gamma_map[ScaleQuantumToMap(GetPixelRed(q))]); SetPixelGreen(q,gamma_map[ScaleQuantumToMap(GetPixelGreen(q))]); SetPixelBlue(q,gamma_map[ScaleQuantumToMap(GetPixelBlue(q))]); } else { if ((channel & RedChannel) != 0) SetPixelRed(q,gamma_map[ScaleQuantumToMap(GetPixelRed(q))]); if ((channel & GreenChannel) != 0) SetPixelGreen(q,gamma_map[ScaleQuantumToMap(GetPixelGreen(q))]); if ((channel & BlueChannel) != 0) SetPixelBlue(q,gamma_map[ScaleQuantumToMap(GetPixelBlue(q))]); if ((channel & OpacityChannel) != 0) { if (image->matte == MagickFalse) SetPixelOpacity(q,gamma_map[ScaleQuantumToMap( GetPixelOpacity(q))]); else SetPixelAlpha(q,gamma_map[ScaleQuantumToMap((Quantum) GetPixelAlpha(q))]); } } #else if ((channel & SyncChannels) != 0) { SetPixelRed(q,QuantumRange*gamma_pow(QuantumScale*GetPixelRed(q), PerceptibleReciprocal(gamma))); SetPixelGreen(q,QuantumRange*gamma_pow(QuantumScale*GetPixelGreen(q), PerceptibleReciprocal(gamma))); SetPixelBlue(q,QuantumRange*gamma_pow(QuantumScale*GetPixelBlue(q), PerceptibleReciprocal(gamma))); } else { if ((channel & RedChannel) != 0) SetPixelRed(q,QuantumRange*gamma_pow(QuantumScale*GetPixelRed(q), PerceptibleReciprocal(gamma))); if ((channel & GreenChannel) != 0) SetPixelGreen(q,QuantumRange*gamma_pow(QuantumScale* GetPixelGreen(q),PerceptibleReciprocal(gamma))); if ((channel & BlueChannel) != 0) SetPixelBlue(q,QuantumRange*gamma_pow(QuantumScale*GetPixelBlue(q), PerceptibleReciprocal(gamma))); if ((channel & OpacityChannel) != 0) { if (image->matte == MagickFalse) SetPixelOpacity(q,QuantumRange*gamma_pow(QuantumScale* GetPixelOpacity(q),PerceptibleReciprocal(gamma))); else SetPixelAlpha(q,QuantumRange*gamma_pow(QuantumScale* GetPixelAlpha(q),PerceptibleReciprocal(gamma))); } } #endif q++; } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) for (x=0; x < (ssize_t) image->columns; x++) SetPixelIndex(indexes+x,gamma_map[ScaleQuantumToMap( GetPixelIndex(indexes+x))]); if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,GammaImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); gamma_map=(Quantum *) RelinquishMagickMemory(gamma_map); if (image->gamma != 0.0) image->gamma*=gamma; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G r a y s c a l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GrayscaleImage() converts the colors in the reference image to gray. % % The format of the GrayscaleImageChannel method is: % % MagickBooleanType GrayscaleImage(Image *image, % const PixelIntensityMethod method) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % */ MagickExport MagickBooleanType GrayscaleImage(Image *image, const PixelIntensityMethod method) { #define GrayscaleImageTag "Grayscale/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } /* Grayscale image. */ /* call opencl version */ #if defined(MAGICKCORE_OPENCL_SUPPORT) if (AccelerateGrayscaleImage(image,method,&image->exception) != MagickFalse) { image->intensity=method; image->type=GrayscaleType; if ((method == Rec601LuminancePixelIntensityMethod) || (method == Rec709LuminancePixelIntensityMethod)) return(SetImageColorspace(image,LinearGRAYColorspace)); return(SetImageColorspace(image,GRAYColorspace)); } #endif status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { MagickRealType blue, green, intensity, red; red=(MagickRealType) q->red; green=(MagickRealType) q->green; blue=(MagickRealType) q->blue; intensity=0.0; switch (method) { case AveragePixelIntensityMethod: { intensity=(red+green+blue)/3.0; break; } case BrightnessPixelIntensityMethod: { intensity=MagickMax(MagickMax(red,green),blue); break; } case LightnessPixelIntensityMethod: { intensity=(MagickMin(MagickMin(red,green),blue)+ MagickMax(MagickMax(red,green),blue))/2.0; break; } case MSPixelIntensityMethod: { intensity=(MagickRealType) (((double) red*red+green*green+ blue*blue)/(3.0*QuantumRange)); break; } case Rec601LumaPixelIntensityMethod: { if (image->colorspace == RGBColorspace) { red=EncodePixelGamma(red); green=EncodePixelGamma(green); blue=EncodePixelGamma(blue); } intensity=0.298839*red+0.586811*green+0.114350*blue; break; } case Rec601LuminancePixelIntensityMethod: { if (image->colorspace == sRGBColorspace) { red=DecodePixelGamma(red); green=DecodePixelGamma(green); blue=DecodePixelGamma(blue); } intensity=0.298839*red+0.586811*green+0.114350*blue; break; } case Rec709LumaPixelIntensityMethod: default: { if (image->colorspace == RGBColorspace) { red=EncodePixelGamma(red); green=EncodePixelGamma(green); blue=EncodePixelGamma(blue); } intensity=0.212656*red+0.715158*green+0.072186*blue; break; } case Rec709LuminancePixelIntensityMethod: { if (image->colorspace == sRGBColorspace) { red=DecodePixelGamma(red); green=DecodePixelGamma(green); blue=DecodePixelGamma(blue); } intensity=0.212656*red+0.715158*green+0.072186*blue; break; } case RMSPixelIntensityMethod: { intensity=(MagickRealType) (sqrt((double) red*red+green*green+ blue*blue)/sqrt(3.0)); break; } } SetPixelGray(q,ClampToQuantum(intensity)); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,GrayscaleImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); image->intensity=method; image->type=GrayscaleType; if ((method == Rec601LuminancePixelIntensityMethod) || (method == Rec709LuminancePixelIntensityMethod)) return(SetImageColorspace(image,LinearGRAYColorspace)); return(SetImageColorspace(image,GRAYColorspace)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % H a l d C l u t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % HaldClutImage() applies a Hald color lookup table to the image. A Hald % color lookup table is a 3-dimensional color cube mapped to 2 dimensions. % Create it with the HALD coder. You can apply any color transformation to % the Hald image and then use this method to apply the transform to the % image. % % The format of the HaldClutImage method is: % % MagickBooleanType HaldClutImage(Image *image,Image *hald_image) % MagickBooleanType HaldClutImageChannel(Image *image, % const ChannelType channel,Image *hald_image) % % A description of each parameter follows: % % o image: the image, which is replaced by indexed CLUT values % % o hald_image: the color lookup table image for replacement color values. % % o channel: the channel. % */ MagickExport MagickBooleanType HaldClutImage(Image *image, const Image *hald_image) { return(HaldClutImageChannel(image,DefaultChannels,hald_image)); } MagickExport MagickBooleanType HaldClutImageChannel(Image *image, const ChannelType channel,const Image *hald_image) { #define HaldClutImageTag "Clut/Image" typedef struct _HaldInfo { MagickRealType x, y, z; } HaldInfo; CacheView *hald_view, *image_view; double width; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket zero; size_t cube_size, length, level; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(hald_image != (Image *) NULL); assert(hald_image->signature == MagickCoreSignature); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); if (IsGrayColorspace(image->colorspace) != MagickFalse) (void) SetImageColorspace(image,sRGBColorspace); if (image->matte == MagickFalse) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel); /* Hald clut image. */ status=MagickTrue; progress=0; length=(size_t) MagickMin((MagickRealType) hald_image->columns, (MagickRealType) hald_image->rows); for (level=2; (level*level*level) < length; level++) ; level*=level; cube_size=level*level; width=(double) hald_image->columns; GetMagickPixelPacket(hald_image,&zero); exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); hald_view=AcquireAuthenticCacheView(hald_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,hald_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { double area, offset; HaldInfo point; MagickPixelPacket pixel, pixel1, pixel2, pixel3, pixel4; register IndexPacket *magick_restrict indexes; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(hald_view); pixel=zero; pixel1=zero; pixel2=zero; pixel3=zero; pixel4=zero; for (x=0; x < (ssize_t) image->columns; x++) { point.x=QuantumScale*(level-1.0)*GetPixelRed(q); point.y=QuantumScale*(level-1.0)*GetPixelGreen(q); point.z=QuantumScale*(level-1.0)*GetPixelBlue(q); offset=(double) (point.x+level*floor(point.y)+cube_size*floor(point.z)); point.x-=floor(point.x); point.y-=floor(point.y); point.z-=floor(point.z); status=InterpolateMagickPixelPacket(image,hald_view, UndefinedInterpolatePixel,fmod(offset,width),floor(offset/width), &pixel1,exception); if (status == MagickFalse) break; status=InterpolateMagickPixelPacket(image,hald_view, UndefinedInterpolatePixel,fmod(offset+level,width),floor((offset+level)/ width),&pixel2,exception); if (status == MagickFalse) break; area=point.y; if (hald_image->interpolate == NearestNeighborInterpolatePixel) area=(point.y < 0.5) ? 0.0 : 1.0; MagickPixelCompositeAreaBlend(&pixel1,pixel1.opacity,&pixel2, pixel2.opacity,area,&pixel3); offset+=cube_size; status=InterpolateMagickPixelPacket(image,hald_view, UndefinedInterpolatePixel,fmod(offset,width),floor(offset/width), &pixel1,exception); if (status == MagickFalse) break; status=InterpolateMagickPixelPacket(image,hald_view, UndefinedInterpolatePixel,fmod(offset+level,width),floor((offset+level)/ width),&pixel2,exception); if (status == MagickFalse) break; MagickPixelCompositeAreaBlend(&pixel1,pixel1.opacity,&pixel2, pixel2.opacity,area,&pixel4); area=point.z; if (hald_image->interpolate == NearestNeighborInterpolatePixel) area=(point.z < 0.5)? 0.0 : 1.0; MagickPixelCompositeAreaBlend(&pixel3,pixel3.opacity,&pixel4, pixel4.opacity,area,&pixel); if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(pixel.red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(pixel.green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(pixel.blue)); if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) SetPixelOpacity(q,ClampToQuantum(pixel.opacity)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,ClampToQuantum(pixel.index)); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,HaldClutImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } hald_view=DestroyCacheView(hald_view); image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L e v e l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LevelImage() adjusts the levels of a particular image channel by % scaling the colors falling between specified white and black points to % the full available quantum range. % % The parameters provided represent the black, and white points. The black % point specifies the darkest color in the image. Colors darker than the % black point are set to zero. White point specifies the lightest color in % the image. Colors brighter than the white point are set to the maximum % quantum value. % % If a '!' flag is given, map black and white colors to the given levels % rather than mapping those levels to black and white. See % LevelizeImageChannel() and LevelizeImageChannel(), below. % % Gamma specifies a gamma correction to apply to the image. % % The format of the LevelImage method is: % % MagickBooleanType LevelImage(Image *image,const char *levels) % % A description of each parameter follows: % % o image: the image. % % o levels: Specify the levels where the black and white points have the % range of 0-QuantumRange, and gamma has the range 0-10 (e.g. 10x90%+2). % A '!' flag inverts the re-mapping. % */ MagickExport MagickBooleanType LevelImage(Image *image,const char *levels) { double black_point, gamma, white_point; GeometryInfo geometry_info; MagickBooleanType status; MagickStatusType flags; /* Parse levels. */ if (levels == (char *) NULL) return(MagickFalse); flags=ParseGeometry(levels,&geometry_info); black_point=geometry_info.rho; white_point=(double) QuantumRange; if ((flags & SigmaValue) != 0) white_point=geometry_info.sigma; gamma=1.0; if ((flags & XiValue) != 0) gamma=geometry_info.xi; if ((flags & PercentValue) != 0) { black_point*=(double) image->columns*image->rows/100.0; white_point*=(double) image->columns*image->rows/100.0; } if ((flags & SigmaValue) == 0) white_point=(double) QuantumRange-black_point; if ((flags & AspectValue ) == 0) status=LevelImageChannel(image,DefaultChannels,black_point,white_point, gamma); else status=LevelizeImage(image,black_point,white_point,gamma); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L e v e l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LevelImage() applies the normal level operation to the image, spreading % out the values between the black and white points over the entire range of % values. Gamma correction is also applied after the values has been mapped. % % It is typically used to improve image contrast, or to provide a controlled % linear threshold for the image. If the black and white points are set to % the minimum and maximum values found in the image, the image can be % normalized. or by swapping black and white values, negate the image. % % The format of the LevelImage method is: % % MagickBooleanType LevelImage(Image *image,const double black_point, % const double white_point,const double gamma) % MagickBooleanType LevelImageChannel(Image *image, % const ChannelType channel,const double black_point, % const double white_point,const double gamma) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o black_point: The level which is to be mapped to zero (black) % % o white_point: The level which is to be mapped to QuantumRange (white) % % o gamma: adjust gamma by this factor before mapping values. % use 1.0 for purely linear stretching of image color values % */ static inline double LevelPixel(const double black_point, const double white_point,const double gamma,const MagickRealType pixel) { double level_pixel, scale; scale=PerceptibleReciprocal(white_point-black_point); level_pixel=QuantumRange*gamma_pow(scale*((double) pixel-black_point),1.0/ gamma); return(level_pixel); } MagickExport MagickBooleanType LevelImageChannel(Image *image, const ChannelType channel,const double black_point,const double white_point, const double gamma) { #define LevelImageTag "Level/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; ssize_t y; /* Allocate and initialize levels map. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) for (i=0; i < (ssize_t) image->colors; i++) { /* Level colormap. */ if ((channel & RedChannel) != 0) image->colormap[i].red=(Quantum) ClampToQuantum(LevelPixel(black_point, white_point,gamma,(MagickRealType) image->colormap[i].red)); if ((channel & GreenChannel) != 0) image->colormap[i].green=(Quantum) ClampToQuantum(LevelPixel( black_point,white_point,gamma,(MagickRealType) image->colormap[i].green)); if ((channel & BlueChannel) != 0) image->colormap[i].blue=(Quantum) ClampToQuantum(LevelPixel(black_point, white_point,gamma,(MagickRealType) image->colormap[i].blue)); if ((channel & OpacityChannel) != 0) image->colormap[i].opacity=(Quantum) (QuantumRange-(Quantum) ClampToQuantum(LevelPixel(black_point,white_point,gamma, (MagickRealType) (QuantumRange-image->colormap[i].opacity)))); } /* Level image. */ status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *magick_restrict indexes; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(LevelPixel(black_point,white_point,gamma, (MagickRealType) GetPixelRed(q)))); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(LevelPixel(black_point,white_point,gamma, (MagickRealType) GetPixelGreen(q)))); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(LevelPixel(black_point,white_point,gamma, (MagickRealType) GetPixelBlue(q)))); if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) SetPixelAlpha(q,ClampToQuantum(LevelPixel(black_point,white_point,gamma, (MagickRealType) GetPixelAlpha(q)))); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,ClampToQuantum(LevelPixel(black_point, white_point,gamma,(MagickRealType) GetPixelIndex(indexes+x)))); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,LevelImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); (void) ClampImage(image); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L e v e l i z e I m a g e C h a n n e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LevelizeImageChannel() applies the reversed LevelImage() operation to just % the specific channels specified. It compresses the full range of color % values, so that they lie between the given black and white points. Gamma is % applied before the values are mapped. % % LevelizeImageChannel() can be called with by using a +level command line % API option, or using a '!' on a -level or LevelImage() geometry string. % % It can be used for example de-contrast a greyscale image to the exact % levels specified. Or by using specific levels for each channel of an image % you can convert a gray-scale image to any linear color gradient, according % to those levels. % % The format of the LevelizeImageChannel method is: % % MagickBooleanType LevelizeImageChannel(Image *image, % const ChannelType channel,const char *levels) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o black_point: The level to map zero (black) to. % % o white_point: The level to map QuantumRange (white) to. % % o gamma: adjust gamma by this factor before mapping values. % */ MagickExport MagickBooleanType LevelizeImage(Image *image, const double black_point,const double white_point,const double gamma) { MagickBooleanType status; status=LevelizeImageChannel(image,DefaultChannels,black_point,white_point, gamma); return(status); } MagickExport MagickBooleanType LevelizeImageChannel(Image *image, const ChannelType channel,const double black_point,const double white_point, const double gamma) { #define LevelizeImageTag "Levelize/Image" #define LevelizeValue(x) ClampToQuantum(((MagickRealType) gamma_pow((double) \ (QuantumScale*(x)),gamma))*(white_point-black_point)+black_point) CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; ssize_t y; /* Allocate and initialize levels map. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) for (i=0; i < (ssize_t) image->colors; i++) { /* Level colormap. */ if ((channel & RedChannel) != 0) image->colormap[i].red=LevelizeValue(image->colormap[i].red); if ((channel & GreenChannel) != 0) image->colormap[i].green=LevelizeValue(image->colormap[i].green); if ((channel & BlueChannel) != 0) image->colormap[i].blue=LevelizeValue(image->colormap[i].blue); if ((channel & OpacityChannel) != 0) image->colormap[i].opacity=(Quantum) (QuantumRange-LevelizeValue( QuantumRange-image->colormap[i].opacity)); } /* Level image. */ status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *magick_restrict indexes; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) SetPixelRed(q,LevelizeValue(GetPixelRed(q))); if ((channel & GreenChannel) != 0) SetPixelGreen(q,LevelizeValue(GetPixelGreen(q))); if ((channel & BlueChannel) != 0) SetPixelBlue(q,LevelizeValue(GetPixelBlue(q))); if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) SetPixelAlpha(q,LevelizeValue(GetPixelAlpha(q))); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,LevelizeValue(GetPixelIndex(indexes+x))); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,LevelizeImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L e v e l I m a g e C o l o r s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LevelImageColor() maps the given color to "black" and "white" values, % linearly spreading out the colors, and level values on a channel by channel % bases, as per LevelImage(). The given colors allows you to specify % different level ranges for each of the color channels separately. % % If the boolean 'invert' is set true the image values will modifyed in the % reverse direction. That is any existing "black" and "white" colors in the % image will become the color values given, with all other values compressed % appropriatally. This effectivally maps a greyscale gradient into the given % color gradient. % % The format of the LevelColorsImageChannel method is: % % MagickBooleanType LevelColorsImage(Image *image, % const MagickPixelPacket *black_color, % const MagickPixelPacket *white_color,const MagickBooleanType invert) % MagickBooleanType LevelColorsImageChannel(Image *image, % const ChannelType channel,const MagickPixelPacket *black_color, % const MagickPixelPacket *white_color,const MagickBooleanType invert) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o black_color: The color to map black to/from % % o white_point: The color to map white to/from % % o invert: if true map the colors (levelize), rather than from (level) % */ MagickExport MagickBooleanType LevelColorsImage(Image *image, const MagickPixelPacket *black_color,const MagickPixelPacket *white_color, const MagickBooleanType invert) { MagickBooleanType status; status=LevelColorsImageChannel(image,DefaultChannels,black_color,white_color, invert); return(status); } MagickExport MagickBooleanType LevelColorsImageChannel(Image *image, const ChannelType channel,const MagickPixelPacket *black_color, const MagickPixelPacket *white_color,const MagickBooleanType invert) { MagickStatusType status; /* Allocate and initialize levels map. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if ((IsGrayColorspace(image->colorspace) != MagickFalse) && ((IsGrayColorspace(black_color->colorspace) != MagickFalse) || (IsGrayColorspace(white_color->colorspace) != MagickFalse))) (void) SetImageColorspace(image,sRGBColorspace); status=MagickTrue; if (invert == MagickFalse) { if ((channel & RedChannel) != 0) status&=LevelImageChannel(image,RedChannel,black_color->red, white_color->red,(double) 1.0); if ((channel & GreenChannel) != 0) status&=LevelImageChannel(image,GreenChannel,black_color->green, white_color->green,(double) 1.0); if ((channel & BlueChannel) != 0) status&=LevelImageChannel(image,BlueChannel,black_color->blue, white_color->blue,(double) 1.0); if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) status&=LevelImageChannel(image,OpacityChannel,black_color->opacity, white_color->opacity,(double) 1.0); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) status&=LevelImageChannel(image,IndexChannel,black_color->index, white_color->index,(double) 1.0); } else { if ((channel & RedChannel) != 0) status&=LevelizeImageChannel(image,RedChannel,black_color->red, white_color->red,(double) 1.0); if ((channel & GreenChannel) != 0) status&=LevelizeImageChannel(image,GreenChannel,black_color->green, white_color->green,(double) 1.0); if ((channel & BlueChannel) != 0) status&=LevelizeImageChannel(image,BlueChannel,black_color->blue, white_color->blue,(double) 1.0); if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) status&=LevelizeImageChannel(image,OpacityChannel,black_color->opacity, white_color->opacity,(double) 1.0); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) status&=LevelizeImageChannel(image,IndexChannel,black_color->index, white_color->index,(double) 1.0); } return(status == 0 ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L i n e a r S t r e t c h I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LinearStretchImage() discards any pixels below the black point and above % the white point and levels the remaining pixels. % % The format of the LinearStretchImage method is: % % MagickBooleanType LinearStretchImage(Image *image, % const double black_point,const double white_point) % % A description of each parameter follows: % % o image: the image. % % o black_point: the black point. % % o white_point: the white point. % */ MagickExport MagickBooleanType LinearStretchImage(Image *image, const double black_point,const double white_point) { #define LinearStretchImageTag "LinearStretch/Image" ExceptionInfo *exception; MagickBooleanType status; MagickRealType *histogram, intensity; ssize_t black, white, y; /* Allocate histogram and linear map. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); exception=(&image->exception); histogram=(MagickRealType *) AcquireQuantumMemory(MaxMap+1UL, sizeof(*histogram)); if (histogram == (MagickRealType *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); /* Form histogram. */ (void) memset(histogram,0,(MaxMap+1)*sizeof(*histogram)); for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *magick_restrict p; register ssize_t x; p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; for (x=(ssize_t) image->columns-1; x >= 0; x--) { histogram[ScaleQuantumToMap(ClampToQuantum(GetPixelIntensity(image,p)))]++; p++; } } /* Find the histogram boundaries by locating the black and white point levels. */ intensity=0.0; for (black=0; black < (ssize_t) MaxMap; black++) { intensity+=histogram[black]; if (intensity >= black_point) break; } intensity=0.0; for (white=(ssize_t) MaxMap; white != 0; white--) { intensity+=histogram[white]; if (intensity >= white_point) break; } histogram=(MagickRealType *) RelinquishMagickMemory(histogram); status=LevelImageChannel(image,DefaultChannels,(double) ScaleMapToQuantum(black),(double) ScaleMapToQuantum(white),1.0); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M o d u l a t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ModulateImage() lets you control the brightness, saturation, and hue % of an image. Modulate represents the brightness, saturation, and hue % as one parameter (e.g. 90,150,100). If the image colorspace is HSL, the % modulation is lightness, saturation, and hue. For HWB, use blackness, % whiteness, and hue. And for HCL, use chrome, luma, and hue. % % The format of the ModulateImage method is: % % MagickBooleanType ModulateImage(Image *image,const char *modulate) % % A description of each parameter follows: % % o image: the image. % % o modulate: Define the percent change in brightness, saturation, and % hue. % */ static inline void ModulateHCL(const double percent_hue, const double percent_chroma,const double percent_luma,Quantum *red, Quantum *green,Quantum *blue) { double hue, luma, chroma; /* Increase or decrease color luma, chroma, or hue. */ ConvertRGBToHCL(*red,*green,*blue,&hue,&chroma,&luma); hue+=fmod((percent_hue-100.0),200.0)/200.0; chroma*=0.01*percent_chroma; luma*=0.01*percent_luma; ConvertHCLToRGB(hue,chroma,luma,red,green,blue); } static inline void ModulateHCLp(const double percent_hue, const double percent_chroma,const double percent_luma,Quantum *red, Quantum *green,Quantum *blue) { double hue, luma, chroma; /* Increase or decrease color luma, chroma, or hue. */ ConvertRGBToHCLp(*red,*green,*blue,&hue,&chroma,&luma); hue+=fmod((percent_hue-100.0),200.0)/200.0; chroma*=0.01*percent_chroma; luma*=0.01*percent_luma; ConvertHCLpToRGB(hue,chroma,luma,red,green,blue); } static inline void ModulateHSB(const double percent_hue, const double percent_saturation,const double percent_brightness,Quantum *red, Quantum *green,Quantum *blue) { double brightness, hue, saturation; /* Increase or decrease color brightness, saturation, or hue. */ ConvertRGBToHSB(*red,*green,*blue,&hue,&saturation,&brightness); hue+=fmod((percent_hue-100.0),200.0)/200.0; saturation*=0.01*percent_saturation; brightness*=0.01*percent_brightness; ConvertHSBToRGB(hue,saturation,brightness,red,green,blue); } static inline void ModulateHSI(const double percent_hue, const double percent_saturation,const double percent_intensity,Quantum *red, Quantum *green,Quantum *blue) { double intensity, hue, saturation; /* Increase or decrease color intensity, saturation, or hue. */ ConvertRGBToHSI(*red,*green,*blue,&hue,&saturation,&intensity); hue+=fmod((percent_hue-100.0),200.0)/200.0; saturation*=0.01*percent_saturation; intensity*=0.01*percent_intensity; ConvertHSIToRGB(hue,saturation,intensity,red,green,blue); } static inline void ModulateHSL(const double percent_hue, const double percent_saturation,const double percent_lightness,Quantum *red, Quantum *green,Quantum *blue) { double hue, lightness, saturation; /* Increase or decrease color lightness, saturation, or hue. */ ConvertRGBToHSL(*red,*green,*blue,&hue,&saturation,&lightness); hue+=fmod((percent_hue-100.0),200.0)/200.0; saturation*=0.01*percent_saturation; lightness*=0.01*percent_lightness; ConvertHSLToRGB(hue,saturation,lightness,red,green,blue); } static inline void ModulateHSV(const double percent_hue, const double percent_saturation,const double percent_value,Quantum *red, Quantum *green,Quantum *blue) { double hue, saturation, value; /* Increase or decrease color value, saturation, or hue. */ ConvertRGBToHSV(*red,*green,*blue,&hue,&saturation,&value); hue+=fmod((percent_hue-100.0),200.0)/200.0; saturation*=0.01*percent_saturation; value*=0.01*percent_value; ConvertHSVToRGB(hue,saturation,value,red,green,blue); } static inline void ModulateHWB(const double percent_hue, const double percent_whiteness,const double percent_blackness,Quantum *red, Quantum *green,Quantum *blue) { double blackness, hue, whiteness; /* Increase or decrease color blackness, whiteness, or hue. */ ConvertRGBToHWB(*red,*green,*blue,&hue,&whiteness,&blackness); hue+=fmod((percent_hue-100.0),200.0)/200.0; blackness*=0.01*percent_blackness; whiteness*=0.01*percent_whiteness; ConvertHWBToRGB(hue,whiteness,blackness,red,green,blue); } static inline void ModulateLCHab(const double percent_luma, const double percent_chroma,const double percent_hue,Quantum *red, Quantum *green,Quantum *blue) { double hue, luma, chroma; /* Increase or decrease color luma, chroma, or hue. */ ConvertRGBToLCHab(*red,*green,*blue,&luma,&chroma,&hue); luma*=0.01*percent_luma; chroma*=0.01*percent_chroma; hue+=fmod((percent_hue-100.0),200.0)/200.0; ConvertLCHabToRGB(luma,chroma,hue,red,green,blue); } static inline void ModulateLCHuv(const double percent_luma, const double percent_chroma,const double percent_hue,Quantum *red, Quantum *green,Quantum *blue) { double hue, luma, chroma; /* Increase or decrease color luma, chroma, or hue. */ ConvertRGBToLCHuv(*red,*green,*blue,&luma,&chroma,&hue); luma*=0.01*percent_luma; chroma*=0.01*percent_chroma; hue+=fmod((percent_hue-100.0),200.0)/200.0; ConvertLCHuvToRGB(luma,chroma,hue,red,green,blue); } MagickExport MagickBooleanType ModulateImage(Image *image,const char *modulate) { #define ModulateImageTag "Modulate/Image" CacheView *image_view; ColorspaceType colorspace; const char *artifact; double percent_brightness, percent_hue, percent_saturation; ExceptionInfo *exception; GeometryInfo geometry_info; MagickBooleanType status; MagickOffsetType progress; MagickStatusType flags; register ssize_t i; ssize_t y; /* Initialize modulate table. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (modulate == (char *) NULL) return(MagickFalse); if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) (void) SetImageColorspace(image,sRGBColorspace); flags=ParseGeometry(modulate,&geometry_info); percent_brightness=geometry_info.rho; percent_saturation=geometry_info.sigma; if ((flags & SigmaValue) == 0) percent_saturation=100.0; percent_hue=geometry_info.xi; if ((flags & XiValue) == 0) percent_hue=100.0; colorspace=UndefinedColorspace; artifact=GetImageArtifact(image,"modulate:colorspace"); if (artifact != (const char *) NULL) colorspace=(ColorspaceType) ParseCommandOption(MagickColorspaceOptions, MagickFalse,artifact); if (image->storage_class == PseudoClass) for (i=0; i < (ssize_t) image->colors; i++) { Quantum blue, green, red; /* Modulate image colormap. */ red=image->colormap[i].red; green=image->colormap[i].green; blue=image->colormap[i].blue; switch (colorspace) { case HCLColorspace: { ModulateHCL(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HCLpColorspace: { ModulateHCLp(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HSBColorspace: { ModulateHSB(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HSIColorspace: { ModulateHSI(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HSLColorspace: default: { ModulateHSL(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HSVColorspace: { ModulateHSV(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HWBColorspace: { ModulateHWB(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case LCHabColorspace: case LCHColorspace: { ModulateLCHab(percent_brightness,percent_saturation,percent_hue, &red,&green,&blue); break; } case LCHuvColorspace: { ModulateLCHuv(percent_brightness,percent_saturation,percent_hue, &red,&green,&blue); break; } } image->colormap[i].red=red; image->colormap[i].green=green; image->colormap[i].blue=blue; } /* Modulate image. */ /* call opencl version */ #if defined(MAGICKCORE_OPENCL_SUPPORT) status=AccelerateModulateImage(image,percent_brightness,percent_hue, percent_saturation,colorspace,&image->exception); if (status != MagickFalse) return status; #endif status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { Quantum blue, green, red; red=GetPixelRed(q); green=GetPixelGreen(q); blue=GetPixelBlue(q); switch (colorspace) { case HCLColorspace: { ModulateHCL(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HCLpColorspace: { ModulateHCLp(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HSBColorspace: { ModulateHSB(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HSLColorspace: default: { ModulateHSL(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HSVColorspace: { ModulateHSV(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HWBColorspace: { ModulateHWB(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case LCHabColorspace: { ModulateLCHab(percent_brightness,percent_saturation,percent_hue, &red,&green,&blue); break; } case LCHColorspace: case LCHuvColorspace: { ModulateLCHuv(percent_brightness,percent_saturation,percent_hue, &red,&green,&blue); break; } } SetPixelRed(q,red); SetPixelGreen(q,green); SetPixelBlue(q,blue); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ModulateImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N e g a t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % NegateImage() negates the colors in the reference image. The grayscale % option means that only grayscale values within the image are negated. % % The format of the NegateImageChannel method is: % % MagickBooleanType NegateImage(Image *image, % const MagickBooleanType grayscale) % MagickBooleanType NegateImageChannel(Image *image, % const ChannelType channel,const MagickBooleanType grayscale) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o grayscale: If MagickTrue, only negate grayscale pixels within the image. % */ MagickExport MagickBooleanType NegateImage(Image *image, const MagickBooleanType grayscale) { MagickBooleanType status; status=NegateImageChannel(image,DefaultChannels,grayscale); return(status); } MagickExport MagickBooleanType NegateImageChannel(Image *image, const ChannelType channel,const MagickBooleanType grayscale) { #define NegateImageTag "Negate/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) { /* Negate colormap. */ for (i=0; i < (ssize_t) image->colors; i++) { if (grayscale != MagickFalse) if ((image->colormap[i].red != image->colormap[i].green) || (image->colormap[i].green != image->colormap[i].blue)) continue; if ((channel & RedChannel) != 0) image->colormap[i].red=QuantumRange-image->colormap[i].red; if ((channel & GreenChannel) != 0) image->colormap[i].green=QuantumRange-image->colormap[i].green; if ((channel & BlueChannel) != 0) image->colormap[i].blue=QuantumRange-image->colormap[i].blue; } } /* Negate image. */ status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); if (grayscale != MagickFalse) { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register IndexPacket *magick_restrict indexes; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((GetPixelRed(q) != GetPixelGreen(q)) || (GetPixelGreen(q) != GetPixelBlue(q))) { q++; continue; } if ((channel & RedChannel) != 0) SetPixelRed(q,QuantumRange-GetPixelRed(q)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,QuantumRange-GetPixelGreen(q)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,QuantumRange-GetPixelBlue(q)); if ((channel & OpacityChannel) != 0) SetPixelOpacity(q,QuantumRange-GetPixelOpacity(q)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,QuantumRange-GetPixelIndex(indexes+x)); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,NegateImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(MagickTrue); } /* Negate image. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *magick_restrict indexes; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); if (channel == DefaultChannels) for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(q+x,QuantumRange-GetPixelRed(q+x)); SetPixelGreen(q+x,QuantumRange-GetPixelGreen(q+x)); SetPixelBlue(q+x,QuantumRange-GetPixelBlue(q+x)); } else for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) SetPixelRed(q+x,QuantumRange-GetPixelRed(q+x)); if ((channel & GreenChannel) != 0) SetPixelGreen(q+x,QuantumRange-GetPixelGreen(q+x)); if ((channel & BlueChannel) != 0) SetPixelBlue(q+x,QuantumRange-GetPixelBlue(q+x)); if ((channel & OpacityChannel) != 0) SetPixelOpacity(q+x,QuantumRange-GetPixelOpacity(q+x)); } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) for (x=0; x < (ssize_t) image->columns; x++) SetPixelIndex(indexes+x,QuantumRange-GetPixelIndex(indexes+x)); if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,NegateImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N o r m a l i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % The NormalizeImage() method enhances the contrast of a color image by % mapping the darkest 2 percent of all pixel to black and the brightest % 1 percent to white. % % The format of the NormalizeImage method is: % % MagickBooleanType NormalizeImage(Image *image) % MagickBooleanType NormalizeImageChannel(Image *image, % const ChannelType channel) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % */ MagickExport MagickBooleanType NormalizeImage(Image *image) { MagickBooleanType status; status=NormalizeImageChannel(image,DefaultChannels); return(status); } MagickExport MagickBooleanType NormalizeImageChannel(Image *image, const ChannelType channel) { double black_point, white_point; black_point=(double) image->columns*image->rows*0.0015; white_point=(double) image->columns*image->rows*0.9995; return(ContrastStretchImageChannel(image,channel,black_point,white_point)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S i g m o i d a l C o n t r a s t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SigmoidalContrastImage() adjusts the contrast of an image with a non-linear % sigmoidal contrast algorithm. Increase the contrast of the image using a % sigmoidal transfer function without saturating highlights or shadows. % Contrast indicates how much to increase the contrast (0 is none; 3 is % typical; 20 is pushing it); mid-point indicates where midtones fall in the % resultant image (0 is white; 50% is middle-gray; 100% is black). Set % sharpen to MagickTrue to increase the image contrast otherwise the contrast % is reduced. % % The format of the SigmoidalContrastImage method is: % % MagickBooleanType SigmoidalContrastImage(Image *image, % const MagickBooleanType sharpen,const char *levels) % MagickBooleanType SigmoidalContrastImageChannel(Image *image, % const ChannelType channel,const MagickBooleanType sharpen, % const double contrast,const double midpoint) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o sharpen: Increase or decrease image contrast. % % o contrast: strength of the contrast, the larger the number the more % 'threshold-like' it becomes. % % o midpoint: midpoint of the function as a color value 0 to QuantumRange. % */ /* ImageMagick 7 has a version of this function which does not use LUTs. */ /* Sigmoidal function Sigmoidal with inflexion point moved to b and "slope constant" set to a. The first version, based on the hyperbolic tangent tanh, when combined with the scaling step, is an exact arithmetic clone of the the sigmoid function based on the logistic curve. The equivalence is based on the identity 1/(1+exp(-t)) = (1+tanh(t/2))/2 (http://de.wikipedia.org/wiki/Sigmoidfunktion) and the fact that the scaled sigmoidal derivation is invariant under affine transformations of the ordinate. The tanh version is almost certainly more accurate and cheaper. The 0.5 factor in the argument is to clone the legacy ImageMagick behavior. The reason for making the define depend on atanh even though it only uses tanh has to do with the construction of the inverse of the scaled sigmoidal. */ #if defined(MAGICKCORE_HAVE_ATANH) #define Sigmoidal(a,b,x) ( tanh((0.5*(a))*((x)-(b))) ) #else #define Sigmoidal(a,b,x) ( 1.0/(1.0+exp((a)*((b)-(x)))) ) #endif /* Scaled sigmoidal function: ( Sigmoidal(a,b,x) - Sigmoidal(a,b,0) ) / ( Sigmoidal(a,b,1) - Sigmoidal(a,b,0) ) See http://osdir.com/ml/video.image-magick.devel/2005-04/msg00006.html and http://www.cs.dartmouth.edu/farid/downloads/tutorials/fip.pdf. The limit of ScaledSigmoidal as a->0 is the identity, but a=0 gives a division by zero. This is fixed below by exiting immediately when contrast is small, leaving the image (or colormap) unmodified. This appears to be safe because the series expansion of the logistic sigmoidal function around x=b is 1/2-a*(b-x)/4+... so that the key denominator s(1)-s(0) is about a/4 (a/2 with tanh). */ #define ScaledSigmoidal(a,b,x) ( \ (Sigmoidal((a),(b),(x))-Sigmoidal((a),(b),0.0)) / \ (Sigmoidal((a),(b),1.0)-Sigmoidal((a),(b),0.0)) ) /* Inverse of ScaledSigmoidal, used for +sigmoidal-contrast. Because b may be 0 or 1, the argument of the hyperbolic tangent (resp. logistic sigmoidal) may be outside of the interval (-1,1) (resp. (0,1)), even when creating a LUT from in gamut values, hence the branching. In addition, HDRI may have out of gamut values. InverseScaledSigmoidal is not a two-sided inverse of ScaledSigmoidal: It is only a right inverse. This is unavoidable. */ static inline double InverseScaledSigmoidal(const double a,const double b, const double x) { const double sig0=Sigmoidal(a,b,0.0); const double sig1=Sigmoidal(a,b,1.0); const double argument=(sig1-sig0)*x+sig0; const double clamped= ( #if defined(MAGICKCORE_HAVE_ATANH) argument < -1+MagickEpsilon ? -1+MagickEpsilon : ( argument > 1-MagickEpsilon ? 1-MagickEpsilon : argument ) ); return(b+(2.0/a)*atanh(clamped)); #else argument < MagickEpsilon ? MagickEpsilon : ( argument > 1-MagickEpsilon ? 1-MagickEpsilon : argument ) ); return(b-log(1.0/clamped-1.0)/a); #endif } MagickExport MagickBooleanType SigmoidalContrastImage(Image *image, const MagickBooleanType sharpen,const char *levels) { GeometryInfo geometry_info; MagickBooleanType status; MagickStatusType flags; flags=ParseGeometry(levels,&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=1.0*QuantumRange/2.0; if ((flags & PercentValue) != 0) geometry_info.sigma=1.0*QuantumRange*geometry_info.sigma/100.0; status=SigmoidalContrastImageChannel(image,DefaultChannels,sharpen, geometry_info.rho,geometry_info.sigma); return(status); } MagickExport MagickBooleanType SigmoidalContrastImageChannel(Image *image, const ChannelType channel,const MagickBooleanType sharpen, const double contrast,const double midpoint) { #define SigmoidalContrastImageTag "SigmoidalContrast/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; MagickRealType *sigmoidal_map; register ssize_t i; ssize_t y; /* Side effect: clamps values unless contrast<MagickEpsilon, in which case nothing is done. */ if (contrast < MagickEpsilon) return(MagickTrue); /* Allocate and initialize sigmoidal maps. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); exception=(&image->exception); sigmoidal_map=(MagickRealType *) AcquireQuantumMemory(MaxMap+1UL, sizeof(*sigmoidal_map)); if (sigmoidal_map == (MagickRealType *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); (void) memset(sigmoidal_map,0,(MaxMap+1)*sizeof(*sigmoidal_map)); if (sharpen != MagickFalse) for (i=0; i <= (ssize_t) MaxMap; i++) sigmoidal_map[i]=(MagickRealType) ScaleMapToQuantum((MagickRealType) (MaxMap*ScaledSigmoidal(contrast,QuantumScale*midpoint,(double) i/ MaxMap))); else for (i=0; i <= (ssize_t) MaxMap; i++) sigmoidal_map[i]=(MagickRealType) ScaleMapToQuantum((MagickRealType) ( MaxMap*InverseScaledSigmoidal(contrast,QuantumScale*midpoint,(double) i/ MaxMap))); /* Sigmoidal-contrast enhance colormap. */ if (image->storage_class == PseudoClass) for (i=0; i < (ssize_t) image->colors; i++) { if ((channel & RedChannel) != 0) image->colormap[i].red=ClampToQuantum(sigmoidal_map[ ScaleQuantumToMap(image->colormap[i].red)]); if ((channel & GreenChannel) != 0) image->colormap[i].green=ClampToQuantum(sigmoidal_map[ ScaleQuantumToMap(image->colormap[i].green)]); if ((channel & BlueChannel) != 0) image->colormap[i].blue=ClampToQuantum(sigmoidal_map[ ScaleQuantumToMap(image->colormap[i].blue)]); if ((channel & OpacityChannel) != 0) image->colormap[i].opacity=ClampToQuantum(sigmoidal_map[ ScaleQuantumToMap(image->colormap[i].opacity)]); } /* Sigmoidal-contrast enhance image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *magick_restrict indexes; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(sigmoidal_map[ScaleQuantumToMap( GetPixelRed(q))])); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(sigmoidal_map[ScaleQuantumToMap( GetPixelGreen(q))])); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(sigmoidal_map[ScaleQuantumToMap( GetPixelBlue(q))])); if ((channel & OpacityChannel) != 0) SetPixelOpacity(q,ClampToQuantum(sigmoidal_map[ScaleQuantumToMap( GetPixelOpacity(q))])); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,ClampToQuantum(sigmoidal_map[ScaleQuantumToMap( GetPixelIndex(indexes+x))])); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,SigmoidalContrastImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); sigmoidal_map=(MagickRealType *) RelinquishMagickMemory(sigmoidal_map); return(status); }
GB_unop__identity_fc32_int64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_fc32_int64) // op(A') function: GB (_unop_tran__identity_fc32_int64) // C type: GxB_FC32_t // A type: int64_t // cast: GxB_FC32_t cij = GxB_CMPLXF ((float) (aij), 0) // unaryop: cij = aij #define GB_ATYPE \ int64_t #define GB_CTYPE \ GxB_FC32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ int64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_FC32 || GxB_NO_INT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_fc32_int64) ( GxB_FC32_t *Cx, // Cx and Ax may be aliased const int64_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int64_t aij = Ax [p] ; GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; int64_t aij = Ax [p] ; GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_fc32_int64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
SUGE.c
/** * Name: SUGE.c v2.3.1 * Date: 2021-09-12 * Intro: A lib to simulate astronomical phenomenons. * Update: Allows two colors to display clearly, now supports v4. * Open configure file SUGE by default, which can be appointed by cmd argument: * ver: 2/3/4 * number: Numbers of celestial bodies * X position (a list of length number) * Y position (a list of length number) * X velocity (a list of length number) * Y velocity (a list of length number) * Quality (a list of length number) * [Added: v3] Width (a list of length number) * [Deprecated: v4] Foreground color (a list of length number in hex BBGGRR) * [Added: v4] Foreground color #1 * [Added: v4] Foreground color #2 * Background color * Simulate accuracy * Display frequent (ticks in each frame) * [Deprecated: v3] Width * [Added: v4] Switch (frames in each switching colors) * X/Y offset */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <windows.h> #define TITLE "SUGE" COLORREF BACKGROUND_COLOR; #include "gdiTemplate.c" #ifdef _SIMULATOR_USE_OMP #include <omp.h> #endif int version, number, displayFreq, switchTicks = 1, offset[2], ticks, i, parity; double simulateAcc; LPSTR fileName = "SUGE"; DWORD WINAPI threadProc(LPVOID lpParamter) { FILE *file = fopen(fileName, "r"); HWND hwnd = (HWND)lpParamter; fscanf(file, "%d%d", &version, &number); HDC hdc[number][2]; HPEN hpen[number][2]; double position[number][2], velocity[number][2], quality[number]; int width[number]; COLORREF color[number][2]; if(version==2 || version==3 || version==4) { for(i=0; i<number; ++i) fscanf(file, "%lf", &position[i][0]); for(i=0; i<number; ++i) fscanf(file, "%lf", &position[i][1]); for(i=0; i<number; ++i) fscanf(file, "%lf", &velocity[i][0]); for(i=0; i<number; ++i) fscanf(file, "%lf", &velocity[i][1]); for(i=0; i<number; ++i) fscanf(file, "%lf", &quality[i]); if(version >= 3) for(i=0; i<number; ++i) fscanf(file, "%d", &width[i]); for(i=0; i<number; ++i) fscanf(file, "%lx", &color[i][0]); if(version < 4) for(i=0; i<number; ++i) color[i][1] = color[i][0]; else for(i=0; i<number; ++i) fscanf(file, "%lx", &color[i][1]); fscanf(file, "%lx %lf %d", &BACKGROUND_COLOR, &simulateAcc, &displayFreq); if(version == 2) { fscanf(file, "%d", &width[0]); for(i=0; i<number; ++i) width[i] = width[0]; } else if(version == 4) { fscanf(file, "%d", &switchTicks); } fscanf(file, "%d %d", &offset[0], &offset[1]); } else { printf("Version not support: %d", version); return 1; } fclose(file); for(i=0; i<number; ++i) { position[i][0] += offset[0]; position[i][1] += offset[1]; velocity[i][0] *= simulateAcc; velocity[i][1] *= simulateAcc; quality[i] *= simulateAcc * simulateAcc; } switchTicks *= displayFreq; for(i=0; i<number; ++i) { int j; for(j=0; j<2; ++j) { hdc[i][j] = GetDC(hwnd); hpen[i][j] = CreatePen(PS_SOLID, width[i], color[i][j]); SelectObject(hdc[i][j], hpen[i][j]); MoveToEx(hdc[i][j], position[i][0], position[i][1], 0); } } while(1) { #ifdef _SIMULATOR_USE_OMP #pragma omp parallel for #endif for(i=0; i<number; ++i) { int j; for(j=0; j<i; ++j) { double distance_neg3, differenceX, differenceY, quality_j_distance_neg3, quality_i_distance_neg3; differenceX = position[j][0] - position[i][0]; differenceY = position[j][1] - position[i][1]; if(differenceX==0 && differenceY==0) continue; distance_neg3 = pow(differenceX * differenceX + differenceY * differenceY, -1.5); quality_j_distance_neg3 = quality[j] * distance_neg3; quality_i_distance_neg3 = quality[i] * distance_neg3; velocity[i][0] += differenceX * quality_j_distance_neg3; velocity[i][1] += differenceY * quality_j_distance_neg3; velocity[j][0] -= differenceX * quality_i_distance_neg3; velocity[j][1] -= differenceY * quality_i_distance_neg3; } } if(ticks % displayFreq == 0) { parity = (ticks / switchTicks) & 1; for(i=0; i<number; ++i) { LineTo(hdc[i][parity], position[i][0], position[i][1]); MoveToEx(hdc[i][parity^1], position[i][0], position[i][1], 0); } } for(i=0; i<number; ++i) { position[i][0] += velocity[i][0]; position[i][1] += velocity[i][1]; } ++ticks; } } int WINAPI initProc(HINSTANCE hInstance, HINSTANCE prevInstance, LPSTR pCmdLine, int nCmdShow) { if(strlen(pCmdLine) != 0) { fileName = pCmdLine; } return 0; }
GB_dense_subassign_22_template.c
//------------------------------------------------------------------------------ // GB_dense_subassign_22_template: C += b where C is dense and b is a scalar //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ { //-------------------------------------------------------------------------- // get C //-------------------------------------------------------------------------- GB_CTYPE *restrict Cx = (GB_CTYPE *) C->x ; const int64_t cnz = GB_nnz (C) ; ASSERT (!C->iso) ; //-------------------------------------------------------------------------- // C += b where C is dense and b is a scalar //-------------------------------------------------------------------------- int64_t pC ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (pC = 0 ; pC < cnz ; pC++) { GB_BINOP (GB_CX (pC), GB_CX (pC), bwork, 0, 0) ; } }
GB_binop__bor_int32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__bor_int32) // A.*B function (eWiseMult): GB (_AemultB_08__bor_int32) // A.*B function (eWiseMult): GB (_AemultB_02__bor_int32) // A.*B function (eWiseMult): GB (_AemultB_04__bor_int32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__bor_int32) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__bor_int32) // C+=b function (dense accum): GB (_Cdense_accumb__bor_int32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bor_int32) // C=scalar+B GB (_bind1st__bor_int32) // C=scalar+B' GB (_bind1st_tran__bor_int32) // C=A+scalar GB (_bind2nd__bor_int32) // C=A'+scalar GB (_bind2nd_tran__bor_int32) // C type: int32_t // A type: int32_t // A pattern? 0 // B type: int32_t // B pattern? 0 // BinaryOp: cij = (aij) | (bij) #define GB_ATYPE \ int32_t #define GB_BTYPE \ int32_t #define GB_CTYPE \ int32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int32_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int32_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x) | (y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BOR || GxB_NO_INT32 || GxB_NO_BOR_INT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__bor_int32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__bor_int32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__bor_int32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int32_t int32_t bwork = (*((int32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *restrict Cx = (int32_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *restrict Cx = (int32_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__bor_int32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int32_t alpha_scalar ; int32_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int32_t *) alpha_scalar_in)) ; beta_scalar = (*((int32_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__bor_int32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__bor_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__bor_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__bor_int32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__bor_int32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *Cx = (int32_t *) Cx_output ; int32_t x = (*((int32_t *) x_input)) ; int32_t *Bx = (int32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int32_t bij = GBX (Bx, p, false) ; Cx [p] = (x) | (bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__bor_int32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int32_t *Cx = (int32_t *) Cx_output ; int32_t *Ax = (int32_t *) Ax_input ; int32_t y = (*((int32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int32_t aij = GBX (Ax, p, false) ; Cx [p] = (aij) | (y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x) | (aij) ; \ } GrB_Info GB (_bind1st_tran__bor_int32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t x = (*((const int32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij) | (y) ; \ } GrB_Info GB (_bind2nd_tran__bor_int32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t y = (*((const int32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
Example_target_task_reduction.2b.c
/* * @@name: target_task_reduction.2b.c * @@type: C * @@compilable: yes * @@linkable: yes * @@expect: success * @@version: omp_5.0 */ #include <stdio.h> #pragma omp declare target to(device_compute) extern void device_compute(int *); extern void host_compute(int *); int main() { int sum = 0; #pragma omp parallel master reduction(task, +:sum) { #pragma omp target in_reduction(+:sum) nowait device_compute(&sum); host_compute(&sum); } printf( "sum = %d\n", sum); //OUTPUT: sum = 2 return 0; } void device_compute(int *sum){ *sum = 1; } void host_compute(int *sum){ *sum = 1; }
convolution-2d.c
/* POLYBENCH/GPU-OPENMP * * This file is a part of the Polybench/GPU-OpenMP suite * * Contact: * William Killian <killian@udel.edu> * * Copyright 2013, The University of Delaware */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4096x4096. */ #include "convolution-2d.h" /* Array initialization. */ static void init_array (int ni, int nj, DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj)) { printf("Initializing Array\n"); int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nj; j++) { A[i][j] = ((DATA_TYPE) (i + j) / nj); } } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int ni, int nj, DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj)) { int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nj; j++) { fprintf(stderr, DATA_PRINTF_MODIFIER, B[i][j]); if ((i * NJ + j) % 20 == 0) fprintf(stderr, "\n"); } fprintf(stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_conv2d(int ni, int nj, DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj), DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj)) { int i, j; #pragma scop #pragma omp parallel for private(j) collapse(2) schedule(static) for (i = 1; i < _PB_NI - 1; ++i) { for (j = 1; j < _PB_NJ - 1; ++j) { B[i][j] = 0.2 * A[i-1][j-1] + 0.5 * A[i-1][j] + -0.8 * A[i-1][j+1] + -0.3 * A[ i ][j-1] + 0.6 * A[ i ][j] + -0.9 * A[ i ][j+1] + 0.4 * A[i+1][j-1] + 0.7 * A[i+1][j] + 0.1 * A[i+1][j+1]; } } #pragma endscop printf("Kernal computation complete !!\n"); } int main(int argc, char** argv) { /* Retrieve problem size. */ int ni = NI; int nj = NJ; /* Variable declaration/allocation. */ POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NJ, ni, nj); POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NI, NJ, ni, nj); /* Initialize array(s). */ init_array (ni, nj, POLYBENCH_ARRAY(A)); /* Start timer. */ //polybench_start_instruments; polybench_timer_start(); /* Run kernel. */ kernel_conv2d (ni, nj, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B)); /* Stop and print timer. */ polybench_timer_stop(); polybench_timer_print(); //polybench_stop_instruments; //polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(ni, nj, POLYBENCH_ARRAY(B))); /* Be clean. */ POLYBENCH_FREE_ARRAY(A); POLYBENCH_FREE_ARRAY(B); return 0; }
shared-clause.c
#include <stdio.h> #ifdef _OPENMP #include <omp.h> #endif main() { int i, n = 7; int a[n]; for (i=0; i<n; i++) a[i] = i+1; // none significa que cualquier variable usada en una región //paralela que no estén con private (OpenMP), //shared (OpenMP), reduction, firstprivate, o la cláusula de lastprivate //provocará un error del compilador #pragma omp parallel for default(none)//shared(a) for (i=0; i<n; i++) a[i] += i; printf("Después de parallel for:\n"); for (i=0; i<n; i++) printf("a[%d] = %d\n",i,a[i]); }
ParSHUM_solver.c
#define _GNU_SOURCE #include <stdlib.h> #include <stdio.h> #include <string.h> #include <libgen.h> #include <math.h> #include <limits.h> #ifdef USE_PLASMA #include <plasma.h> #else #include <mkl.h> #endif #include "ParSHUM_verbose.h" #include "ParSHUM_enum.h" #include "ParSHUM_matrix.h" #include "ParSHUM_dense.h" #include "ParSHUM_schur_matrix.h" #include "ParSHUM_pivot_list.h" #include "ParSHUM_paje.h" #include "ParSHUM_auxiliary.h" #include "ParSHUM_solver.h" const char *usageStrign[] = { "usage test: [--help] [--matrix matrix] [--RHS_file file] [--debug_mode] [--verbosity level] [--marko_tol tol] [--value_tol tol]", " [--extra_space factor] [--extra_space_inbetween factor] [--nb_threads #threads] [--nb_candidates_per_block #blocks] ", " [--output_dir dir] [--output_file file] [--nb_previous_pivots #pivtos] [--schur_density_tolerance tol]", " [--min_pivot_per_steps #steps] [--prog_name name ] [--check_schur_symetry] [--check_schur_memory] [--check_pivots]", " [--check_ParSHUM_with_plasma_perm] [--check_dense_with_ParSHUM_perm] [--print_each_step] [--check_GC]", " [--group_run value_tol|marko_tol|schur_density|nb_candidates|min_pivots|nb_threads init inc nb_steps]", " [--counters_size #double_counters] [--check_counters] [--check_schur_doubles] [--max_dense_schur size]", " [--luby_algorithm] [--singeltons_relaxation tol] [--trace]", NULL, }; #ifdef USE_PLASMA int is_plasma_init; #endif int ParSHUM_solver_run_group(ParSHUM_solver solver, ParSHUM_parm_type type, void *init_val, int nb_steps, void *inc); ParSHUM_solver ParSHUM_solver_create() { ParSHUM_solver self = calloc(1, sizeof(*self)); self->exe_parms = calloc(1, sizeof(*self->exe_parms)); self->verbosity = 1; self->size_counters = 100; self->exe_parms->nb_threads = 1; self->exe_parms->value_tol = 0.1; self->exe_parms->singeltons_relaxation = 0.01; self->exe_parms->marko_tol = 4; self->exe_parms->extra_space = 1.0; self->exe_parms->extra_space_inbetween = 1.0; self->exe_parms->nb_candidates_per_block = 10; self->exe_parms->nb_previous_pivots = 5; self->exe_parms->min_pivot_per_steps = 5; self->exe_parms->density_tolerance = 0.2; self->exe_parms->max_dense_schur = 20000; self->exe_parms->luby_algo = 1; self->verbose = ParSHUM_verbose_create(self->exe_parms); return self; } void ParSHUM_solver_dealloc(ParSHUM_solver self) { ParSHUM_verbose_destroy(self->verbose); free(self->exe_parms); free(self); } int check_ParSHUM_with_plasma_perm(int argc, char **argv) { ParSHUM_solver plasma; ParSHUM_vector X, sol_plasma, sol_ParSHUM; plasma = ParSHUM_solver_create(); ParSHUM_solver_parse_args(plasma, argc, argv, 1); plasma->exe_parms->density_tolerance = 1.0; plasma->exe_parms->min_pivot_per_steps = 5; plasma->exe_parms->nb_previous_pivots = 5; plasma->debug |= ParSHUM_CHECK_ParSHUM_W_PLASMA_PERM; ParSHUM_solver_read_matrix(plasma); ParSHUM_solver_init(plasma); X = ParSHUM_vector_create(plasma->A->n); sol_plasma = ParSHUM_vector_create(plasma->A->n); sol_ParSHUM = ParSHUM_vector_create(plasma->A->n); ParSHUM_vector_read_file(X, plasma->exe_parms->RHS_file); ParSHUM_vector_copy(X, sol_plasma); ParSHUM_solver_factorize(plasma); ParSHUM_solver_solve(plasma, sol_plasma); ParSHUM_solver_compute_norms(plasma, X, sol_plasma); ParSHUM_solver_finalize(plasma); /* apply plasma row permutation to A */ int *plasma_perms = ParSHUM_dense_get_row_perms(plasma->S_dense, plasma->row_perm); ParSHUM_matrix debug_matrix = ParSHUM_matrix_permute(plasma->A, plasma->col_perm, plasma_perms); ParSHUM_solver debug_solver = ParSHUM_solver_create(); debug_solver->A = debug_matrix; debug_solver->debug |= ParSHUM_CHECK_ParSHUM_W_PLASMA_PERM; debug_solver->exe_parms->density_tolerance = 1.0; debug_solver->exe_parms->min_pivot_per_steps = 5; debug_solver->exe_parms->nb_previous_pivots = 5; ParSHUM_solver_init(debug_solver); ParSHUM_vector_permute(X, plasma_perms, X->n); ParSHUM_vector_copy(X, sol_ParSHUM); ParSHUM_solver_factorize(debug_solver); ParSHUM_solver_solve(debug_solver, sol_ParSHUM); ParSHUM_solver_compute_norms(debug_solver, X, sol_ParSHUM); ParSHUM_solver_finalize(debug_solver); free(plasma_perms); ParSHUM_vector_destroy(X); ParSHUM_vector_destroy(sol_plasma); ParSHUM_vector_destroy(sol_ParSHUM); ParSHUM_solver_destroy(debug_solver); ParSHUM_solver_destroy(plasma); return 0; } int check_dense_with_ParSHUM_perm(int argc, char **argv) { ParSHUM_solver self; ParSHUM_vector X, sol_ParSHUM, sol_dense; self = ParSHUM_solver_create(); ParSHUM_solver_parse_args(self, argc, argv, 1); self->exe_parms->density_tolerance = 1.0; self->exe_parms->min_pivot_per_steps = 5; self->exe_parms->nb_previous_pivots = 5; ParSHUM_solver_read_matrix(self); ParSHUM_solver_init(self); X = ParSHUM_vector_create(self->A->n); sol_ParSHUM = ParSHUM_vector_create(self->A->n); sol_dense = ParSHUM_vector_create(self->A->n); ParSHUM_vector_read_file(X, self->exe_parms->RHS_file); ParSHUM_vector_copy(X, sol_ParSHUM); ParSHUM_vector_copy(X, sol_dense); ParSHUM_solver_factorize(self); ParSHUM_solver_solve(self, sol_ParSHUM); ParSHUM_solver_compute_norms(self, X, sol_ParSHUM); ParSHUM_solver_finalize(self); ParSHUM_solver dense_solver = ParSHUM_solver_create(); ParSHUM_solver_parse_args(dense_solver, argc, argv, 1); dense_solver->A = self->A; dense_solver->debug |= ParSHUM_CHECK_DENSE_W_ParSHUM_PERM; dense_solver->row_perm = self->row_perm; dense_solver->col_perm = self->col_perm; dense_solver->invr_row_perm = self->invr_row_perm; dense_solver->invr_col_perm = self->invr_col_perm; dense_solver->verbose->parms->prog_name = "Dense solver"; ParSHUM_solver_init(dense_solver); ParSHUM_solver_factorize(dense_solver); ParSHUM_solver_solve(dense_solver, sol_dense); ParSHUM_solver_compute_norms(dense_solver, X, sol_dense); ParSHUM_solver_finalize(dense_solver); ParSHUM_vector_destroy(X); ParSHUM_vector_destroy(sol_ParSHUM); ParSHUM_vector_destroy(sol_dense); ParSHUM_solver_destroy(dense_solver); ParSHUM_solver_destroy(self); return 0; } void ParSHUM_solver_parse_args(ParSHUM_solver self, int argc, char **argv, int exit_on_notFound) { int i, run_args_start = 0; for (i = 1; i < argc; i++) { if (!strcmp(argv[i], "--verbosity")) { self->verbose->parms->verbosity = atoi(argv[++i]); self->verbosity = atoi(argv[i]); continue; } else if (!strcmp(argv[i], "--matrix")) { self->exe_parms->matrix_file = argv[++i]; continue; } else if (!strcmp(argv[i], "--RHS_file")) { self->exe_parms->RHS_file = argv[++i]; continue; } else if (!strcmp(argv[i], "--value_tol")) { double tmp = atof(argv[++i]); if ( tmp > 1.0 || tmp < 0.0) ParSHUM_fatal_error(__FUNCTION__, __FILE__, __LINE__, "value tolerance should be between 0 and 1"); self->exe_parms->value_tol = tmp; continue; } else if (!strcmp(argv[i], "--marko_tol")) { double tmp = atof(argv[++i]); if ( tmp < 1.0 ) ParSHUM_fatal_error(__FUNCTION__, __FILE__, __LINE__, "marko tolerance should be larger then 1"); self->exe_parms->marko_tol = tmp; continue; } else if (!strcmp(argv[i], "--nb_threads")) { int tmp = atoi(argv[++i]); self->exe_parms->nb_threads = tmp; continue; } else if (!strcmp(argv[i], "--extra_space")) { double tmp = atof(argv[++i]); self->exe_parms->extra_space = tmp; continue; } else if (!strcmp(argv[i], "--extra_space_inbetween")) { double tmp = atof(argv[++i]); self->exe_parms->extra_space_inbetween = tmp; continue; } else if (!strcmp(argv[i], "--nb_candidates_per_block")) { int tmp = atoi(argv[++i]); if (tmp < 1) ParSHUM_fatal_error(__FUNCTION__, __FILE__, __LINE__, "nb_candidates_per_blocks should be at least 1"); self->exe_parms->nb_candidates_per_block = tmp; continue; } else if (!strcmp(argv[i], "--prog_name")) { self->verbose->parms->prog_name = argv[++i]; continue; } else if (!strcmp(argv[i], "--output_dir")) { self->verbose->parms->output_dir = argv[++i]; self->verbose->parms->user_out_dir = 1; continue; } else if (!strcmp(argv[i], "--output_file")) { FILE *file = fopen(argv[++i], "w+"); if ( file ) { self->verbose->parms->out_file = file; self->verbose->parms->user_out_file = 1; } else { ParSHUM_warning(__FUNCTION__, __FILE__, __LINE__,"unable to open output file, the program will write on stdout instead"); file = stdout; } continue; } else if (!strcmp(argv[i], "--nb_previous_pivots")) { int tmp = atoi(argv[++i]); if (tmp < 1) ParSHUM_fatal_error(__FUNCTION__, __FILE__, __LINE__, "nb_previous_pivots should be at least 1"); self->exe_parms->nb_previous_pivots = tmp; continue; } else if (!strcmp(argv[i], "--schur_density_tolerance")) { double tmp = atof(argv[++i]); if ( tmp > 1.0 ) ParSHUM_fatal_error(__FUNCTION__, __FILE__, __LINE__, "schur density tolerance can not be larger then 1"); if ( tmp < 0.0 ) ParSHUM_fatal_error(__FUNCTION__, __FILE__, __LINE__, "schur density tolerance can not be smaller then 0"); self->exe_parms->density_tolerance = tmp; continue; } else if (!strcmp(argv[i], "--min_pivot_per_steps")) { int tmp = atoi(argv[++i]); if ( tmp < self->exe_parms->nb_previous_pivots ) ParSHUM_fatal_error(__FUNCTION__, __FILE__, __LINE__, "min_pivot_per_steps should be at least nb_previous_pivots"); self->exe_parms->min_pivot_per_steps = tmp; continue; } else if (!strcmp(argv[i], "--debug_mode")) { ParSHUM_warning(__FUNCTION__, __FILE__, __LINE__,"debug mode is not implemented"); continue; } else if (!strcmp(argv[i], "--check_pivots")) { self->debug |= ParSHUM_CHECK_PIVOTS; continue; } else if (!strcmp(argv[i], "--check_schur_memory")) { self->debug |= ParSHUM_CHECK_SCHUR_MEMORY; continue; } else if (!strcmp(argv[i], "--check_schur_symetry")) { self->debug |= ParSHUM_CHECK_SCHUR_SYMETRY; continue; } else if (!strcmp(argv[i], "--check_counters")) { self->debug |= ParSHUM_CHECK_COUNTERS; continue; } else if (!strcmp(argv[i], "--print_each_step")) { self->debug |= ParSHUM_DEBUG_VERBOSE_EACH_STEP; continue; } else if (!strcmp(argv[i], "--verbose_gossip_girl")) { self->debug |= ParSHUM_DEBUG_GOSSIP_GIRL; continue; } else if (!strcmp(argv[i], "--check_schur_doubles")) { self->debug |= ParSHUM_CHECK_SCHUR_DOUBLES; continue; } else if (!strcmp(argv[i], "--check_ParSHUM_with_plasma_perm")) { ParSHUM_solver_dealloc(self); exit(check_ParSHUM_with_plasma_perm(argc, argv)); } else if (!strcmp(argv[i], "--check_dense_with_ParSHUM_perm")) { ParSHUM_solver_dealloc(self); exit(check_dense_with_ParSHUM_perm(argc, argv)); continue; } else if (!strcmp(argv[i], "--group_run")) { run_args_start = ++i; i += 3; continue; } else if (!strcmp(argv[i], "--counters_size")) { self->size_counters = atoi(argv[++i]); continue; } else if (!strcmp(argv[i], "--max_dense_schur")) { int tmp = atoi(argv[++i]); if (tmp < 1) ParSHUM_fatal_error(__FUNCTION__, __FILE__, __LINE__, "max_dense_schur should be at least 1"); self->exe_parms->max_dense_schur = tmp; continue; } else if (!strcmp(argv[i], "--singeltons_relaxation")) { self->exe_parms->singeltons_relaxation = atof(argv[++i]); } else if (!strcmp(argv[i], "--luby_algorithm")) { self->exe_parms->luby_algo = 1; } else if (!strcmp(argv[i], "--trace")) { self->exe_parms->trace = 1; } else if (!strcmp(argv[i], "--help")) { int j = 0; while( usageStrign[j] != NULL) printf("%s\n", usageStrign[j++]); exit(0); } else { if (exit_on_notFound) { char mess[2048]; snprintf(mess, 2048, "unrecognized option \"%s\" ", argv[i]); int j = 0; while( usageStrign[j] != NULL) printf("%s\n", usageStrign[j++]); ParSHUM_fatal_error(__FUNCTION__, __FILE__, __LINE__, mess); } } } if (run_args_start) { ParSHUM_parm_type type; if ( !strcmp(argv[run_args_start], "value_tol") ) { type = ParSHUM_value_tol; double init = atof(argv[++run_args_start]), inc = atof(argv[++run_args_start]); int nb_steps = atoi(argv[++run_args_start]); exit(ParSHUM_solver_run_group(self, type, (void *) &init, nb_steps, (void *) &inc)); } else if ( !strcmp(argv[run_args_start], "marko_tol") ) { type = ParSHUM_marko_tol; double init = atof(argv[++run_args_start]), inc = atof(argv[++run_args_start]); int nb_steps = atoi(argv[++run_args_start]); exit(ParSHUM_solver_run_group(self, type, (void *) &init, nb_steps, (void *) &inc)); } else if ( !strcmp(argv[run_args_start], "schur_density") ) { type = ParSHUM_schur_density; double init = atof(argv[++run_args_start]), inc = atof(argv[++run_args_start]); int nb_steps = atoi(argv[++run_args_start]); exit(ParSHUM_solver_run_group(self, type, (void *) &init, nb_steps, (void *) &inc)); } else if ( !strcmp(argv[run_args_start], "nb_candidates") ) { type = ParSHUM_nb_candidates; int init = atoi(argv[++run_args_start]), inc = atoi(argv[++run_args_start]); int nb_steps = atoi(argv[++run_args_start]); exit(ParSHUM_solver_run_group(self, type, (void *) &init, nb_steps, (void *) &inc)); } else if ( !strcmp(argv[run_args_start], "min_pivots") ) { type = ParSHUM_min_pivots; int init = atoi(argv[++run_args_start]), inc = atoi(argv[++run_args_start]); int nb_steps = atoi(argv[++run_args_start]); exit(ParSHUM_solver_run_group(self, type, (void *) &init, nb_steps, (void *) &inc)); } else if ( !strcmp(argv[run_args_start], "nb_threads") ) { type = ParSHUM_nb_threads; int init = atoi(argv[++run_args_start]), inc = atoi(argv[++run_args_start]); int nb_steps = atoi(argv[++run_args_start]); exit(ParSHUM_solver_run_group(self, type, (void *) &init, nb_steps, (void *) &inc)); } else { int j = 0; while( usageStrign[j] != NULL) printf("%s\n", usageStrign[j++]); ParSHUM_fatal_error(__FUNCTION__, __FILE__, __LINE__, "for the group run, unrecognized type of argument is given" ); } } } void update_exe_parms(ParSHUM_exe_parms parms, ParSHUM_parm_type type, void *init_val, int step, void *val, void *inc) { double *Dinit, *Dinc, *Dval; int *Iinit, *Iinc, *Ival; switch (type) { case (ParSHUM_value_tol) : Dinit = (double *) init_val; Dinc = (double *) inc; Dval = (double *) val; *Dval = *Dinit * pow(*Dinc, (double) step); parms->value_tol = *Dval; break; case (ParSHUM_marko_tol) : Dinit = (double *) init_val; Dinc = (double *) inc; Dval = (double *) val; *Dval = *Dinit + *Dinc * step; parms->marko_tol = *Dval; break; case (ParSHUM_schur_density) : Dinit = (double *) init_val; Dinc = (double *) inc; Dval = (double *) val; *Dval = *Dinit + *Dinc * step; parms->density_tolerance = *Dval; break; case (ParSHUM_nb_candidates) : Iinit = (int *) init_val, Iinc = (int *) inc; Ival = (int *) val; *Ival = *Iinit + *Iinc * step; parms->nb_candidates_per_block = *Ival; break; case (ParSHUM_min_pivots) : Iinit = (int *) init_val, Iinc = (int *) inc; Ival = (int *) val; *Ival = *Iinit + *Iinc * step; parms->min_pivot_per_steps = *Ival; break; case (ParSHUM_nb_threads) : Iinit = (int *) init_val, Iinc = (int *) inc; Ival = (int *) val; *Ival = *Iinit + *Iinc * step; parms->nb_threads = *Ival; break; default : ParSHUM_fatal_error(__FUNCTION__, __FILE__, __LINE__, "unrecognized type of exe_parms"); } } char * get_outfile_prefix(ParSHUM_exe_parms exe_parms, ParSHUM_parm_type type) { char *self = calloc(PATH_LENGTH, sizeof(*self)); size_t length = 0; *self = '\0'; if (exe_parms->matrix_file) snprintf(self, 2058, "%s", basename(exe_parms->matrix_file)); length = strnlen(self, PATH_LENGTH - length); if (type == ParSHUM_value_tol) snprintf(self + length, PATH_LENGTH - length,"_MULTIValTol"); else snprintf(self + length, PATH_LENGTH - length,"_%fValTol", exe_parms->value_tol); length = strnlen(self, PATH_LENGTH - length); if (type == ParSHUM_marko_tol) snprintf(self + length, PATH_LENGTH - length,"_MULTIMarkoTol"); else snprintf(self + length, PATH_LENGTH - length,"_%fMarkoTol", exe_parms->marko_tol); length = strnlen(self, PATH_LENGTH - length); if (type == ParSHUM_nb_threads) snprintf(self + length, PATH_LENGTH - length,"_MULTIthreads"); else snprintf(self + length, PATH_LENGTH - length,"_%dthreads", exe_parms->nb_threads); length = strnlen(self, PATH_LENGTH - length); if (type == ParSHUM_nb_candidates) snprintf(self + length, PATH_LENGTH - length,"_MULTIcandidates"); else snprintf(self + length, PATH_LENGTH - length,"_%dcandidates", exe_parms->nb_candidates_per_block); length = strnlen(self, PATH_LENGTH - length); if (type == ParSHUM_schur_density) snprintf(self + length, PATH_LENGTH - length,"_MULTIdensityTol"); else snprintf(self + length, PATH_LENGTH - length,"_%fdensityTol", exe_parms->density_tolerance); length = strnlen(self, PATH_LENGTH - length); if (type == ParSHUM_min_pivots) snprintf(self + length, PATH_LENGTH - length,"_MULTIminPivots"); else snprintf(self + length, PATH_LENGTH - length,"_%dminPivots", exe_parms->min_pivot_per_steps); length = strnlen(self, PATH_LENGTH - length); self[length] = '\0'; return self; } int ParSHUM_solver_run_group(ParSHUM_solver solver, ParSHUM_parm_type type, void *init_val, int nb_steps, void *inc) { FILE *file; int i; ParSHUM_matrix A = ParSHUM_matrix_create(); ParSHUM_exe_parms exe_parms = solver->exe_parms; char *file_ext = strrchr(exe_parms->matrix_file, '.'); ParSHUM_vector X, rhs; char *output_runs_file = get_outfile_prefix(exe_parms, type); char filename[PATH_LENGTH]; double current_val; ParSHUM_verbose_create_dirs(solver->verbose->parms->output_dir); snprintf(filename, PATH_LENGTH, "%s/data/MULTI_%s_raw.dat", solver->verbose->parms->output_dir, output_runs_file); file = fopen(filename, "w+"); if (!strcmp(file_ext, ".mtl")) ParSHUM_read_mtl_file(A, exe_parms->matrix_file); #ifdef HAVE_SPRAL else if (!strcmp(file_ext, ".rb")) ParSHUM_read_rutherford_boeing(A, exe_parms->matrix_file); #endif else ParSHUM_fatal_error(__FUNCTION__, __FILE__, __LINE__,"unrecognized file type format"); solver->A = A; X = ParSHUM_vector_create(A->n); rhs = ParSHUM_vector_create(A->n); ParSHUM_vector_read_file(X, solver->exe_parms->RHS_file); ParSHUM_verbose_print_parms_raw(exe_parms, type, file); for( i = 0; i < nb_steps; i++) { ParSHUM_solver run = ParSHUM_solver_create(); ParSHUM_matrix matrix = ParSHUM_matrix_create(); ParSHUM_exe_parms run_exe_parms = malloc(sizeof(*run_exe_parms)); *run_exe_parms = *exe_parms; ParSHUM_matrix_copy(A, matrix); free(run->exe_parms); run->A = matrix; run->exe_parms = run->verbose->exe_parms = run_exe_parms; free(run->verbose->parms->output_dir); run->verbose->parms->output_dir = solver->verbose->parms->output_dir; run->verbose->parms->user_out_dir = 1; update_exe_parms(run->exe_parms, type, init_val, i, (void *) &current_val, inc); ParSHUM_solver_init(run); ParSHUM_vector_copy(X, rhs); ParSHUM_solver_factorize(run); ParSHUM_solver_solve(run, rhs); ParSHUM_solver_compute_norms(run, rhs, X); ParSHUM_solver_finalize(run); ParSHUM_verbose_print_group_run(run->verbose, type, (void *) &current_val, i, file); ParSHUM_solver_destroy(run); } fclose(file); ParSHUM_vector_destroy(X); ParSHUM_vector_destroy(rhs); ParSHUM_matrix_destroy(A); free(output_runs_file); ParSHUM_solver_dealloc(solver); return 0; } /* TODO: FOR NOW WE ASUME THAT N == M */ void ParSHUM_solver_alloc_counters(ParSHUM_solver solver, int **col_count, int **row_count) { int i, total; ParSHUM_counters counter = NULL; pthread_mutex_lock(&solver->counters_lock); for( i = 0; i < solver->nb_counters; i++) if ( solver->counters[i]->nb_used_counters < solver->size_counters) { counter = solver->counters[i]; break; } if (!counter) { solver->nb_counters++; int new = solver->nb_counters - 1; solver->counters = realloc(solver->counters, solver->nb_counters * sizeof(*solver->counters)); solver->counters[new] = calloc(1, sizeof(**solver->counters)); solver->counters[new]->array = calloc((size_t) solver->size_counters * 2 * solver->A->n, sizeof(*solver->counters[new]->array)); solver->counters[new]->used_counters = calloc((size_t) solver->size_counters, sizeof(*solver->counters[new]->used_counters)); counter = solver->counters[new]; } total = solver->size_counters; for(i = 0; i < total; i++) if(!counter->used_counters[i]) { *col_count = &counter->array[i * solver->A->n * 2 ]; *row_count = &counter->array[i * solver->A->n * 2 + solver->A->n]; counter->nb_used_counters++; counter->used_counters[i] = 1; pthread_mutex_unlock(&solver->counters_lock); return; } ParSHUM_fatal_error(__FUNCTION__, __FILE__, __LINE__,"this should not happened!"); } void ParSHUM_solver_dealloc_counters(ParSHUM_solver solver, int *col_count, int *row_count) { pthread_mutex_lock(&solver->counters_lock); int i; ParSHUM_counters counter = NULL; long diff; for( i = 0; i < solver->nb_counters; i++) { diff = col_count - solver->counters[i]->array; if ( diff >= 0 && diff < solver->A->n * 2 * solver->size_counters){ counter = solver->counters[i]; break; } } if (!counter) ParSHUM_fatal_error(__FUNCTION__, __FILE__, __LINE__,"puffff"); counter->used_counters[ diff / ( solver->A->n * 2) ] = 0; counter->nb_used_counters--; pthread_mutex_unlock(&solver->counters_lock); } void ParSHUM_solver_alloc_internal(ParSHUM_solver self) { double total_extra_space = 1 + self->exe_parms->extra_space_inbetween + self->exe_parms->extra_space; int n = self->A->n, m = self->A->m, i; /* TOCORRECT */ /* int needed_pivots = n < m ? n : m; */ int needed_pivots = n - self->BB_cols - 1; int larger_size = n > m ? n : m; /* needed_pivots -= self->BB_cols; */ self->S = ParSHUM_schur_matrix_create(); self->D = ParSHUM_matrix_create(); if (self->debug & (ParSHUM_DEBUG_VERBOSE_EACH_STEP | ParSHUM_DEBUG_GOSSIP_GIRL)) ParSHUM_matrix_print(self->A, "A on input"); ParSHUM_schur_matrix_allocate(self->S, n, m, self->A->nnz, self->debug, self->verbose, self->exe_parms->nb_threads, self->exe_parms->extra_space, self->exe_parms->extra_space_inbetween); ParSHUM_schur_matrix_copy(self->A, self->S, self->exe_parms->value_tol); if (self->debug & (ParSHUM_DEBUG_VERBOSE_EACH_STEP | ParSHUM_DEBUG_GOSSIP_GIRL)) ParSHUM_schur_matrix_print(self->S, "S on input"); ParSHUM_matrix_allocate(self->D, needed_pivots, 0, 0, 1.0, ParSHUM_Diag_matrix); self->L = ParSHUM_L_matrix_create(needed_pivots); self->U = ParSHUM_U_matrix_create(self->A, total_extra_space); self->row_perm = malloc((size_t) m * sizeof(*self->row_perm)); self->invr_row_perm = malloc((size_t) m * sizeof(*self->invr_row_perm)); self->col_perm = malloc((size_t) n * sizeof(*self->col_perm)); self->invr_col_perm = malloc((size_t) n * sizeof(*self->invr_col_perm)); int_array_memset(self->row_perm, ParSHUM_UNUSED_PIVOT, m); int_array_memset(self->invr_row_perm, ParSHUM_UNUSED_PIVOT, m); int_array_memset(self->col_perm, ParSHUM_UNUSED_PIVOT, n); int_array_memset(self->invr_col_perm, ParSHUM_UNUSED_PIVOT, n); self->previous_pivots = malloc((size_t) self->exe_parms->nb_previous_pivots * sizeof(*self->previous_pivots)); int_array_memset(self->previous_pivots, INT_MAX / self->exe_parms->nb_previous_pivots, self->exe_parms->nb_previous_pivots); /* TODO: check if this is correct for the TP algo */ self->candidates = calloc(1, sizeof(*self->candidates)); self->candidates->row = malloc((size_t) n * sizeof(*self->candidates->row)); self->candidates->marko = malloc((size_t) n * sizeof(*self->candidates->marko)); self->candidates->best_marko = malloc((size_t) self->exe_parms->nb_threads * sizeof(*self->candidates->best_marko)); self->counters = calloc(1, sizeof(*self->counters)); *self->counters = calloc(1, sizeof(**self->counters)); self->random_col = create_randomize(n); /* TODO: check if this is correct for the TP algo */ (*self->counters)->array = calloc((size_t) self->size_counters * 2 * n, sizeof(*self->counters[0]->array)); (*self->counters)->used_counters = calloc((size_t) self->size_counters, sizeof(*self->counters[0]->used_counters)); self->nb_counters = 1; self->allocated_U_struct = n / 100; self->allocated_U_struct = self->allocated_U_struct ? self->allocated_U_struct : 1; self->U_struct = calloc((size_t) self->allocated_U_struct, sizeof(*self->U_struct)); self->allocated_L_struct = m / 100; self->allocated_L_struct = self->allocated_L_struct ? self->allocated_L_struct : 1; self->L_struct = calloc((size_t) self->allocated_L_struct, sizeof(*self->L_struct)); self->Luby = ParSHUM_Luby_create(n - self->BB_cols); self->cols = malloc((size_t) (n - self->BB_cols) * sizeof(*self->cols)); for( i = 0; i < n - self->BB_cols; i++) self->cols[i] = i; self->rows = malloc((size_t) m * sizeof(*self->rows)); for( i = 0; i < m; i++) self->rows[i] = i; self->distributions = malloc((size_t) (self->exe_parms->nb_threads + 1) * sizeof(*self->distributions)); /* srand(time(NULL)); */ self->seeds = malloc((size_t) self->exe_parms->nb_threads * sizeof(*self->seeds)); for(i = 0; i < self->exe_parms->nb_threads; i++) /* TODO: Put an argument "determenistic" and call srand before if activated */ self->seeds[i] = rand(); self->workspace = malloc((size_t) self->exe_parms->nb_threads * sizeof(*self->workspace)); for(i = 0; i < self->exe_parms->nb_threads; i++) self->workspace[i] = malloc((size_t) larger_size * (sizeof(int) + sizeof(double))); self->logical_cols = calloc((size_t) n, sizeof(*self->logical_cols)); self->logical_rows = calloc((size_t) m, sizeof(*self->logical_rows)); pthread_mutex_init(&self->counters_lock, NULL); } void ParSHUM_solver_init(ParSHUM_solver self) { self->verbose->n = self->A->n; self->verbose->m = self->A->m; self->verbose->nnz_input = self->verbose->nnz_final = self->A->nnz; self->verbose->Luby = self->exe_parms->luby_algo; self->verbose->parms->outfiles_prefix = get_outfile_prefix(self->exe_parms, ParSHUM_parm_none); if (self->exe_parms->trace) self->verbose->paje = ParSHUM_paje_create(self->exe_parms->nb_threads); #ifdef USE_PLASMA if (!is_plasma_init) { plasma_init(1); is_plasma_init = 1; } #endif if ( !(self->debug & ParSHUM_CHECK_DENSE_W_ParSHUM_PERM) ) ParSHUM_solver_alloc_internal(self); if (self->verbose->parms->verbosity > 1) ParSHUM_verbose_create_dirs(self->verbose->parms->output_dir); #pragma omp parallel num_threads(self->exe_parms->nb_threads) default(none) //proc_bind(spread) { int me = omp_get_thread_num(); me++; } } void ParSHUM_solver_read_matrix(ParSHUM_solver self) { char *file_ext = strrchr(self->exe_parms->matrix_file, '.'); self->A = ParSHUM_matrix_create(); if (!strcmp(file_ext, ".mtl")) ParSHUM_read_mtl_file(self->A, self->exe_parms->matrix_file); #ifdef HAVE_SPRAL else if (!strcmp(file_ext, ".rb")) ParSHUM_read_rutherford_boeing(self->A, self->exe_parms->matrix_file); #endif else ParSHUM_fatal_error(__FUNCTION__, __FILE__, __LINE__,"unsupported matrix file"); } void ParSHUM_solver_get_pivots(ParSHUM_solver self, ParSHUM_pivot_set set) { ParSHUM_pivot_cell cells = set->cells; int n = self->A->n, i, k, nnz ; int new_pivots = 0, old_pivots = self->found_pivots + self->nb_col_singletons + self->nb_row_singletons; int *row_perms = self->row_perm; int *col_perms = self->col_perm; int *invr_row_perms = self->invr_row_perm; int *invr_col_perms = self->invr_col_perm; while (cells) { int current_pivot = new_pivots + old_pivots; row_perms[current_pivot] = cells->row; col_perms[current_pivot] = cells->col; invr_row_perms[cells->row] = current_pivot; invr_col_perms[cells->col] = current_pivot; new_pivots++; cells = cells->next; } for ( i = 0, k=0, nnz = 0; i < n; i++) { if (set->cols_count[i] < set->base) continue; if (self->invr_col_perm[i] != ParSHUM_UNUSED_PIVOT) { if (set->cols_count[i] > set->base) { ParSHUM_warning(__FUNCTION__, __FILE__, __LINE__, "col is supposed to be a pivot but is larger then base "); } else { continue; } } self->U_struct[k].col = i; self->U_struct[k].nb_elem = set->cols_count[i] - set->base + 1; nnz += self->U_struct[k].nb_elem; if( ++k >= self->allocated_U_struct) { self->allocated_U_struct *= 2; self->U_struct = realloc(self->U_struct, (size_t) self->allocated_U_struct * sizeof(*self->U_struct)); } } self->n_U_structs = k; self->nnz_U_structs = nnz; for ( i = 0, k=0, nnz = 0; i < n; i++) { if (set->rows_count[i] < set->base) continue; if (self->invr_row_perm[i] != ParSHUM_UNUSED_PIVOT) { if (set->rows_count[i] > set->base) { ParSHUM_warning(__FUNCTION__, __FILE__, __LINE__, "row is supposed to be a pivot but is larger then base "); } else { continue; } } self->L_struct[k].col = i; self->L_struct[k].nb_elem = set->rows_count[i] - set->base + 1; nnz += self->L_struct[k].nb_elem; if( ++k >= self->allocated_L_struct) { self->allocated_L_struct *= 2; self->L_struct = realloc(self->L_struct, (size_t) self->allocated_L_struct * sizeof(*self->L_struct)); } } self->n_L_structs = k; self->nnz_L_structs = nnz; self->rows_count = set->rows_count; self->cols_count = set->cols_count; self->found_pivots += new_pivots; ParSHUM_verbose_update_pivots(self->verbose, new_pivots); } void ParSHUM_check_logical_sum(ParSHUM_solver self, int new_Luby_pivots) { ParSHUM_schur_matrix S = self->S ; int n = S->n, m = S->m, i, pivot; int *logical_rows = calloc(m, sizeof(*logical_rows)); int *logical_cols = calloc(n, sizeof(*logical_rows)); int *invr_row_perms = self->invr_row_perm; int *invr_col_perms = self->invr_col_perm; int *row_perms = self->row_perm; int *col_perms = self->col_perm; int start_pivots = self->done_pivots; int end_pivots = self->done_pivots + self->nb_row_singletons + self->nb_col_singletons + new_Luby_pivots; int nb_logical_rows = 0; int nb_logical_cols = 0; for( pivot = start_pivots; pivot < end_pivots; pivot++) { int col_pivot = col_perms[pivot]; CSC_struct *CSC = &S->CSC[col_pivot]; int *rows = CSC->row; int nb_elem = CSC->nb_elem; for ( i = 0; i < nb_elem; i++) if (invr_row_perms[rows[i]] == ParSHUM_UNUSED_PIVOT) logical_rows[rows[i]] = 1; int row_pivot = row_perms[pivot]; CSR_struct *CSR = &S->CSR[row_pivot]; int *cols = CSR->col; nb_elem = CSR->nb_elem; for(i = 0; i < nb_elem; i++) if (invr_col_perms[cols[i]] == ParSHUM_UNUSED_PIVOT) logical_cols[cols[i]] = 1; } for ( i = 0; i < n; i++) if (logical_cols[i]) nb_logical_cols++; for ( i = 0; i < m; i++) if (logical_rows[i]) nb_logical_rows++; if (nb_logical_cols != self->n_U_structs) ParSHUM_fatal_error(__FUNCTION__, __FILE__, __LINE__, "n_U_struct not correct"); if (nb_logical_rows != self->n_L_structs) ParSHUM_fatal_error(__FUNCTION__, __FILE__, __LINE__, "n_L_struct not correct"); free(logical_rows); free(logical_cols); } void ParSHUM_solver_get_Luby_pivots(ParSHUM_solver self, ParSHUM_Luby Luby, int new_Luby_pivots) { ParSHUM_schur_matrix S = self->S; ParSHUM_verbose verbose = self->verbose; int n = S->n, nb_cols = S->n - self->done_pivots - self->BB_cols, nb_rows = S->m - self->done_pivots; int done_pivots = self->done_pivots; int new_pivots = new_Luby_pivots + self->nb_col_singletons + self->nb_row_singletons; int all_pivots = new_pivots + done_pivots; int i, base = self->step + 1; int nb_threads = self->exe_parms->nb_threads; int nb_BB_cols = self->BB_cols; int distribution_perms[nb_threads+1], distribution_n[nb_threads+1], distribution_m[nb_threads+1]; int row_sizes[nb_threads], col_sizes[nb_threads]; int *logical_cols = self->logical_cols; int *logical_rows = self->logical_rows; int *row_perms = self->row_perm; int *col_perms = self->col_perm; int *invr_row_perms = self->invr_row_perm; int *invr_col_perms = self->invr_col_perm; int *cols = self->cols; int *rows = self->rows; self->previous_step_pivots = new_pivots; self->n_U_structs = 0; self->nnz_U_structs = 0; self->n_L_structs = 0; self->nnz_L_structs = 0; *distribution_perms = done_pivots + self->nb_col_singletons + self->nb_row_singletons; for(i = 1; i < nb_threads; i++) distribution_perms[i] = *distribution_perms + (new_Luby_pivots / nb_threads) * i; distribution_perms[nb_threads] = all_pivots; *distribution_n = 0; for(i = 1; i < nb_threads; i++) distribution_n[i] = (nb_cols / nb_threads) * i; distribution_n[nb_threads] = nb_cols; *distribution_m = 0; for(i = 1; i < nb_threads; i++) distribution_m[i] = (nb_rows / nb_threads) * i; distribution_m[nb_threads] = nb_rows; #pragma omp parallel num_threads(nb_threads) shared(distribution_perms, distribution_n, distribution_m, nb_cols, col_sizes, row_sizes, n, nb_BB_cols, verbose) firstprivate(self, S, base, invr_col_perms, invr_row_perms, col_perms, row_perms, logical_cols, logical_rows, nb_threads, cols, rows, all_pivots) default(none) //proc_bind(spread) { ParSHUM_verbose_trace_start_event(verbose, ParSHUM_GETTING_PIVOTS); int i, j; int me = omp_get_thread_num(); int start = distribution_perms[me] ; int end = distribution_perms[me+1]; int *my_col_perms = (int *) self->workspace[me]; int *my_row_perms = &my_col_perms[nb_cols]; int my_nb_cols = 0, my_nb_rows = 0; /* Updte the invr_*_perms arrays */ for( i = start; i < end; i++) { invr_col_perms[col_perms[i]] = i; invr_row_perms[row_perms[i]] = i; } #pragma omp barrier /* Updte the logical arrays with the non-singelton pivots */ for ( i = start; i < end; i++) { CSC_struct *CSC = &S->CSC[col_perms[i]]; int *rows = CSC->row; int nb_elem = CSC->nb_elem; for ( j = 0; j < nb_elem; j++) { logical_rows[rows[j]] = base; } CSR_struct *CSR = &S->CSR[row_perms[i]]; int *cols = CSR->col; nb_elem = CSR->nb_elem; for ( j = 0; j < nb_elem; j++) { logical_cols[cols[j]] = base; } } #pragma omp barrier #pragma omp single { /* Sequential updte the logical arrays with the col_singeltons. This is done seq beacuse the update in parallel will need to use atomics. We need to treat the col singeltons separately, because we need to indetify entries that are in the pivotal square matrix in order to put them in U. */ start = self->done_pivots + self->nb_row_singletons; end = self->done_pivots + self->nb_row_singletons + self->nb_col_singletons; for ( i = start; i < end; i++) { CSR_struct *CSR = &S->CSR[row_perms[i]]; int *cols = CSR->col; int nb_elem = CSR->nb_elem; for ( j = 0; j < nb_elem; j++) { int col = cols[j]; int perm = invr_col_perms[col]; if (perm != ParSHUM_UNUSED_PIVOT && perm > end) if (col_perms[perm] < n) col_perms[perm] += n; logical_cols[col] = base; } } *distribution_perms = self->done_pivots; for(i = 1; i < nb_threads; i++) distribution_perms[i] = *distribution_perms + (self->nb_row_singletons / nb_threads) * i; distribution_perms[nb_threads] = *distribution_perms + self->nb_row_singletons; } #pragma omp barrier start = distribution_perms[me] ; end = distribution_perms[me+1]; for ( i = start; i < end; i++) { CSC_struct *CSC = &S->CSC[col_perms[i]]; int *rows = CSC->row; int nb_elem = CSC->nb_elem; for ( j = 0; j < nb_elem; j++) { logical_rows[rows[j]] = base; } } #pragma omp barrier start = distribution_n[me]; end = distribution_n[me+1]; for( i = start; i < end; i++) { int col = cols[i]; if(logical_cols[col] == base && invr_col_perms[col] == ParSHUM_UNUSED_PIVOT) my_col_perms[my_nb_cols++] = col; } start = distribution_m[me]; end = distribution_m[me+1]; for( i = start; i < end; i++) { int row = rows[i]; if(logical_rows[row] == base && invr_row_perms[row] == ParSHUM_UNUSED_PIVOT) my_row_perms[my_nb_rows++] = row; } col_sizes[me] = my_nb_cols; row_sizes[me] = my_nb_rows; #pragma omp barrier #pragma omp single { for (i = 1; i < nb_threads; i++) { col_sizes[i] += col_sizes[i-1]; row_sizes[i] += row_sizes[i-1]; } self->n_U_structs = col_sizes[nb_threads-1]; self->n_L_structs = row_sizes[nb_threads-1]; } #pragma omp barrier if (me) { memcpy(&col_perms[all_pivots + col_sizes[me-1]], my_col_perms, (size_t) (col_sizes[me] - col_sizes[me-1]) * sizeof(*col_perms)); memcpy(&row_perms[all_pivots + row_sizes[me-1]], my_row_perms, (size_t) (row_sizes[me] - row_sizes[me-1]) * sizeof(*row_perms)); } else { memcpy(&col_perms[all_pivots], my_col_perms, (size_t) col_sizes[me] * sizeof(*col_perms)); memcpy(&row_perms[all_pivots], my_row_perms, (size_t) row_sizes[me] * sizeof(*row_perms)); for( i = n - nb_BB_cols; i < n; i++) if (logical_cols[i] == base && invr_col_perms[i] == ParSHUM_UNUSED_PIVOT) col_perms[all_pivots + self->n_U_structs++] = i; } ParSHUM_verbose_trace_stop_event(verbose); } self->found_pivots = all_pivots; ParSHUM_verbose_update_pivots(self->verbose, new_pivots); } void ParSHUM_solver_check_counters(ParSHUM_solver self) { int i; for (i = 0; i < self->nb_counters; i++) ParSHUM_check_counters(i, self->counters[i]->array, self->counters[i]->used_counters, self->done_pivots + 1, self->size_counters, self->A->n); } void ParSHUM_solver_find_pivot_set(ParSHUM_solver self) { ParSHUM_pivot_list list; ParSHUM_exe_parms exe_parms = self->exe_parms; ParSHUM_verbose_per_step step = ParSHUM_verbose_get_step(self->verbose); ParSHUM_verbose verbose = self->verbose; int nb_threads = self->exe_parms->nb_threads; /* TOCORRECT */ /* int needed_pivots = self->A->n < self->A->m ? self->A->n : self->A->m; */ int needed_pivots = self->A->n - self->BB_cols - 1; needed_pivots -= self->done_pivots; int new_pivots = 0; ParSHUM_verbose_start_timing(&step->timing_extracting_candidates); ParSHUM_verbose_trace_start_event(verbose, ParSHUM_GET_SINGELTONS); ParSHUM_schur_get_singletons(self->S, self->done_pivots, self->previous_step_pivots, self->exe_parms->value_tol * self->exe_parms->singeltons_relaxation, &self->nb_col_singletons, &self->nb_row_singletons, self->cols, self->rows, self->distributions, self->BB_cols, self->col_perm, self->row_perm, self->invr_col_perm, self->invr_row_perm, self->workspace); /* We should check if we have found more then enought singletons */ needed_pivots -= self->nb_col_singletons + self->nb_row_singletons; ParSHUM_verbose_trace_stop_event(verbose); if (exe_parms->luby_algo) { if (needed_pivots > 0) { int nb_cols = self->S->n - self->done_pivots - self->BB_cols; int *distributions = self->distributions; long best_marko, best_markos[nb_threads]; int candidates[nb_threads]; int i; *distributions = 0; for (i = 1; i < nb_threads; i++) distributions[i] = (nb_cols / nb_threads) * i; distributions[nb_threads] = nb_cols; #pragma omp parallel num_threads(nb_threads) shared(self, nb_cols, best_marko, best_markos, distributions, candidates, step) firstprivate(verbose, exe_parms, i, nb_threads) default(none) //proc_bind(spread) { int me = omp_get_thread_num(); int *my_col_perms = (int *) self->workspace[me]; int *my_row_perms = &my_col_perms[nb_cols]; int *global_col_perms = (int *) self->workspace[0]; int *global_row_perms = &global_col_perms[nb_cols]; int max_col_length = self->S->nnz /nb_cols ; ParSHUM_verbose_trace_start_event(verbose, ParSHUM_GET_ELIGEBLE); best_markos[me] = ParSHUM_Luby_get_eligible(self->S, self->Luby, exe_parms->value_tol, self->invr_col_perm, self->invr_row_perm, self->cols, distributions[me], distributions[me + 1], max_col_length); ParSHUM_verbose_trace_stop_event(verbose); #pragma omp barrier #pragma omp single { ParSHUM_verbose_trace_start_event(verbose, ParSHUM_AUXILIARY); best_marko = *best_markos; for(i = 1; i < nb_threads; i++) best_marko = best_marko > best_markos[i] ? best_markos[i] : best_marko; best_marko = !best_marko ? 1 : best_marko; best_marko *= exe_parms->marko_tol; ParSHUM_verbose_trace_stop_event(verbose); } #pragma omp barrier ParSHUM_verbose_trace_start_event(verbose, ParSHUM_ASSIGN_SCORE); candidates[me] = ParSHUM_Luby_assign_score(self->Luby, self->S, best_marko, &self->seeds[me], my_col_perms, my_row_perms, self->cols, distributions[me], distributions[me + 1]); ParSHUM_verbose_trace_stop_event(verbose); #pragma omp barrier ParSHUM_verbose_trace_start_event(verbose, ParSHUM_AUXILIARY); #pragma omp single { for( i = 1; i < nb_threads; i++) candidates[i] += candidates[i-1]; step->nb_candidates = candidates[nb_threads-1]; } #pragma omp barrier if (me) { memcpy(&global_col_perms[candidates[me - 1]], my_col_perms, (size_t) (candidates[me] - candidates[me-1]) * sizeof(*self->col_perm)); memcpy(&global_row_perms[candidates[me - 1]], my_row_perms, (size_t) (candidates[me] - candidates[me-1]) * sizeof(*self->row_perm)); } ParSHUM_verbose_trace_stop_event(verbose); } ParSHUM_verbose_stop_timing(&step->timing_extracting_candidates); ParSHUM_verbose_start_timing(&step->timing_merging_pivots); /* This restrictoin most be done for the recantular (overderminated) case, so that we do not find too many pivots */ if (step->nb_candidates > needed_pivots) step->nb_candidates = needed_pivots; *distributions = 0; for (i = 1; i < nb_threads; i++) distributions[i] = (step->nb_candidates / nb_threads) * i; distributions[nb_threads] = step->nb_candidates; #pragma omp parallel num_threads(nb_threads) shared(self, nb_cols, distributions, candidates) firstprivate(verbose, i, nb_threads) default(none) //proc_bind(spread) { ParSHUM_verbose_trace_start_event(verbose, ParSHUM_FIRST_PASS); int me = omp_get_thread_num(); int non_BB_cols = self->S->n - self->BB_cols; int my_size = distributions[me + 1] - distributions[me]; int *tmp = (int *) self->workspace[0]; int *my_col_perms = &tmp[distributions[me]]; int *my_row_perms = &tmp[nb_cols + distributions[me]]; int *global_col_perms = &self->col_perm[self->done_pivots + self->nb_col_singletons + self->nb_row_singletons]; int *global_row_perms = &self->row_perm[self->done_pivots + self->nb_col_singletons + self->nb_row_singletons]; ParSHUM_Luby_first_pass(self->Luby, self->S, non_BB_cols, my_col_perms, my_row_perms, my_size); ParSHUM_verbose_trace_stop_event(verbose); #pragma omp barrier ParSHUM_verbose_trace_start_event(verbose, ParSHUM_DISCARDING_PIVOTS); candidates[me] = ParSHUM_Luby_second_pass(self->S, self->Luby, my_col_perms, my_row_perms, my_size); ParSHUM_verbose_trace_stop_event(verbose); #pragma omp barrier ParSHUM_verbose_trace_start_event(verbose, ParSHUM_AUXILIARY); #pragma omp single { self->Luby->chosen_base += 3; for( i = 1; i < nb_threads; i++) candidates[i] += candidates[i-1]; } #pragma omp barrier if (me) { memcpy(&global_col_perms[candidates[me-1]], my_col_perms, (size_t) (candidates[me] - candidates[me-1]) * sizeof(*my_col_perms)); memcpy(&global_row_perms[candidates[me-1]], my_row_perms,(size_t) (candidates[me] - candidates[me-1]) * sizeof(*self->row_perm)); } else { memcpy(global_col_perms, my_col_perms, (size_t) candidates[me] * sizeof(*my_col_perms)); memcpy(global_row_perms, my_row_perms, (size_t) candidates[me] * sizeof(*my_row_perms)); } ParSHUM_verbose_trace_stop_event(verbose); } new_pivots = candidates[nb_threads-1]; } else { // needed_pivots ParSHUM_verbose_start_timing(&step->timing_merging_pivots); } ParSHUM_solver_get_Luby_pivots(self, self->Luby, new_pivots); if (self->debug & ParSHUM_CHECK_PIVOTS) ParSHUM_check_logical_sum(self, new_pivots); ParSHUM_verbose_stop_timing(&step->timing_merging_pivots); if (self->debug & (ParSHUM_DEBUG_VERBOSE_EACH_STEP | ParSHUM_DEBUG_GOSSIP_GIRL)) { char mess[2048]; snprintf(mess, 2048,"%d row singeltons, %d col_singeltons and %d luby pivots were found\ncol pemrs", self->nb_row_singletons, self->nb_col_singletons, new_pivots); print_int_array(self->col_perm, self->A->n, mess); print_int_array(self->row_perm, self->A->m, "row_perms"); print_int_array(self->invr_col_perm, self->A->m, "invr_col_perms"); print_int_array(self->invr_row_perm, self->A->m, "invr_row_perms"); } } else { if (self->debug & ParSHUM_CHECK_COUNTERS ) ParSHUM_solver_check_counters(self); ParSHUM_verbose_start_timing(&step->timing_extracting_candidates); list = get_possible_pivots(self, self->S, self->random_col, self->candidates, nb_threads, exe_parms->value_tol, exe_parms->marko_tol, exe_parms->nb_candidates_per_block); ParSHUM_verbose_stop_timing(&step->timing_extracting_candidates); if( !list->nb_elem ) ParSHUM_fatal_error(__FUNCTION__, __FILE__, __LINE__, "no possible pivot were found"); ParSHUM_verbose_start_timing(&step->timing_merging_pivots); list = merge_pivot_sets(list, self->S, self); if (list->nb_elem > 1 ) ParSHUM_warning(__FUNCTION__, __FILE__, __LINE__, "not unique set"); ParSHUM_solver_get_pivots(self, list->sets); if (self->debug & ParSHUM_CHECK_COUNTERS ) ParSHUM_check_current_counters(self->S, &self->col_perm[self->done_pivots], &self->row_perm[self->done_pivots], self->found_pivots - self->done_pivots, self->cols_count, self->rows_count, self->done_pivots + 1); if (self->debug & (ParSHUM_DEBUG_VERBOSE_EACH_STEP | ParSHUM_DEBUG_GOSSIP_GIRL)) print_pivot_list(list, "found pivots"); ParSHUM_pivot_list_destroy(list, self); ParSHUM_verbose_stop_timing(&step->timing_merging_pivots); } } void ParSHUM_solver_update_matrix(ParSHUM_solver self) { ParSHUM_L_matrix L = self->L; ParSHUM_matrix D = self->D; ParSHUM_U_matrix U = self->U; ParSHUM_schur_matrix S = self->S; int nb_pivots = self->found_pivots - self->done_pivots; ParSHUM_verbose_per_step step = ParSHUM_verbose_get_step(self->verbose); ParSHUM_verbose_start_timing(&step->timing_update_LD); ParSHUM_schur_matrix_update_LD(S, L, U, D, &self->row_perm[self->done_pivots], &self->col_perm[self->done_pivots], nb_pivots, self->invr_row_perm, self->nb_row_singletons, self->nb_col_singletons, self->workspace); ParSHUM_verbose_stop_timing(&step->timing_update_LD); ParSHUM_verbose_start_timing(&step->timing_update_S); ParSHUM_schur_matrix_update_S(S, L, U, &self->col_perm[self->found_pivots], self->n_U_structs, &self->row_perm[self->found_pivots], self->n_L_structs, self->row_perm, self->invr_col_perm, self->invr_row_perm, nb_pivots, self->done_pivots, self->exe_parms->value_tol, self->workspace); ParSHUM_verbose_stop_timing(&step->timing_update_S); self->done_pivots = self->found_pivots; self->step++; if (self->debug & ParSHUM_CHECK_SCHUR_SYMETRY ) ParSHUM_schur_matrix_check_symetry(self->S); if (self->debug & ParSHUM_CHECK_PIVOTS) ParSHUM_schur_matrix_check_pivots(self->S, self->row_perm, self->col_perm, self->invr_row_perm, self->invr_col_perm, self->done_pivots); if (self->debug & (ParSHUM_DEBUG_VERBOSE_EACH_STEP | ParSHUM_DEBUG_GOSSIP_GIRL)) { ParSHUM_schur_matrix_print(S, "S after update"); ParSHUM_L_matrix_print(L, "L after update"); ParSHUM_matrix_print(D, "D after update"); ParSHUM_U_matrix_print(U, "U after update"); } if (self->debug & ParSHUM_CHECK_SCHUR_DOUBLES) { ParSHUM_schur_check_doubles(S); } } int ParSHUM_continue_pivot_search(ParSHUM_schur_matrix S, int nb_done_pivots, int nb_needed_pivots, int *previous_pivots, int nb_previous_pivots, double density_tolerance, int min_pivot_per_steps, int max_dense_schur, int debug, ParSHUM_verbose verbose) { long n_schur, m_schur, i, sum_pivots; int retval = 1; if ( debug & ParSHUM_CHECK_ParSHUM_W_PLASMA_PERM || debug & ParSHUM_CHECK_DENSE_W_ParSHUM_PERM ) { verbose->reason = ParSHUM_reason_because; return 0; } n_schur = (long) S->n - nb_done_pivots; m_schur = (long) S->m - nb_done_pivots; if ( S->nnz > (long) n_schur * m_schur * density_tolerance) { verbose->reason = ParSHUM_reason_density; retval = 0; } if (retval) { for( i = 0, sum_pivots = 0; i < nb_previous_pivots; i++) sum_pivots += previous_pivots[i]; if (sum_pivots < min_pivot_per_steps) { verbose->reason = ParSHUM_reason_no_pivots; retval = 0; } } if (nb_done_pivots >= nb_needed_pivots && retval) { verbose->reason = ParSHUM_reason_no_pivots; retval = 0; } if ( !retval && max_dense_schur < (nb_needed_pivots - nb_done_pivots) ) { verbose->reason |= ParSHUM_reason_dense_too_large; ParSHUM_fatal_error(__FUNCTION__, __FILE__, __LINE__, "dense schur too large"); } return retval; } void ParSHUM_solver_factorize(ParSHUM_solver self) { ParSHUM_exe_parms exe_parms = self->exe_parms; ParSHUM_verbose verbose = self->verbose; int n = self->S->n, m = self->S->m; int needed_pivots = n < m ? n : m; int *previous_pivots = self->previous_pivots; int nb_previous_pivots = exe_parms->nb_previous_pivots; int nb_pivot_blocks = 0; /* TOCORRECT */ /* needed_pivots -= self->BB_cols + 1; */ /* if (n - self->BB_cols + 1 < needed_pivots) */ needed_pivots = n - self->BB_cols - 1; if (self->debug & ParSHUM_CHECK_SCHUR_DOUBLES) ParSHUM_schur_check_doubles(self->S); ParSHUM_verbose_start_timing(&verbose->timing_facto); ParSHUM_verbose_start_timing(&verbose->timing_facto_sparse); while ( ParSHUM_continue_pivot_search(self->S, self->done_pivots, needed_pivots, previous_pivots, nb_previous_pivots, exe_parms->density_tolerance, exe_parms->min_pivot_per_steps, exe_parms->max_dense_schur, self->debug, verbose) ) { ParSHUM_verbose_per_step step = ParSHUM_verbose_step_start(verbose); ParSHUM_verbose_start_timing(&step->timing_step); ParSHUM_verbose_start_timing(&step->timing_pivot_search); ParSHUM_solver_find_pivot_set(self); previous_pivots[nb_pivot_blocks++ % nb_previous_pivots] = self->found_pivots - self->done_pivots; ParSHUM_verbose_stop_timing(&step->timing_pivot_search); ParSHUM_verbose_start_timing(&step->timing_apply_perms); ParSHUM_solver_update_matrix(self); ParSHUM_verbose_stop_timing(&step->timing_apply_perms); ParSHUM_verbose_stop_timing(&step->timing_step); } ParSHUM_verbose_stop_timing(&verbose->timing_facto_sparse); if (verbose->reason & ParSHUM_reason_dense_too_large) return; ParSHUM_verbose_start_timing(&verbose->timing_convert_schur); ParSHUM_verbose_trace_start_event(verbose, ParSHUM_CONVERT_MATRIX); if ( self->debug & ParSHUM_CHECK_DENSE_W_ParSHUM_PERM ) { self->A_debug = ParSHUM_dense_2D_permute(ParSHUM_dense_2D_convert_sparse(self->A), self->row_perm, self->col_perm); verbose->schur_density = 1.00; } else { self->S_dense = ParSHUM_schur_matrix_convert(self->S, self->done_pivots, self->col_perm, self->invr_col_perm, self->row_perm, self->invr_row_perm); verbose->schur_density = (double) self->S->nnz / ((n - self->done_pivots) * (m - self->done_pivots)); /* if (self->debug & (ParSHUM_DEBUG_VERBOSE_EACH_STEP | ParSHUM_DEBUG_GOSSIP_GIRL)) { */ /* printf("done pivots = %d \n", self->done_pivots); */ /* ParSHUM_dense_matrix_print(self->S_dense, "dense schur after conversion"); */ /* print_int_array(self->col_perm, self->A->n, "col_perms"); */ /* print_int_array(self->row_perm, self->A->m, "row_perms"); */ /* print_int_array(self->invr_col_perm, self->A->n, "invr_col_perms"); */ /* print_int_array(self->invr_row_perm, self->A->m, "invr_row_perms"); */ /* } */ } ParSHUM_verbose_trace_stop_event(verbose); ParSHUM_verbose_stop_timing(&verbose->timing_convert_schur); ParSHUM_verbose_start_timing(&verbose->timing_facto_dense); ParSHUM_verbose_trace_start_event(verbose, ParSHUM_DENSE_FACTORIZATION); if (self->debug & ParSHUM_CHECK_DENSE_W_ParSHUM_PERM) { ParSHUM_dense_2D_facto(self->A_debug); } else { ParSHUM_dense_matrix_factorize(self->S_dense, self->BB_cols, exe_parms->nb_threads); self->dense_pivots = self->A->n - self->done_pivots; ParSHUM_verbose_update_dense_pivots(verbose, self->dense_pivots); verbose->nnz_final = self->L->nnz + self->U->nnz + self->D->n + (self->S_dense->n - self->BB_cols) * self->S_dense->m + self->BB_cols * (self->S_dense->m - self->BB_cols); verbose->nnz_L = self->L->nnz + ( self->S_dense->n - self->BB_cols ) * self->S_dense->m - ((self->S_dense->n - self->BB_cols)*(self->S_dense->n - self->BB_cols) - self->S_dense->n + self->BB_cols ) / 2; verbose->nnz_U = self->U->nnz + self->D->n + ((self->S_dense->n-self->BB_cols) * (self->S_dense->n-self->BB_cols) - self->S_dense->m + self->BB_cols)/2 + self->S_dense->n - self->BB_cols + self->BB_cols * (self->S_dense->m - self->BB_cols); verbose->nnz_S_dense = (self->S_dense->n - self->BB_cols) * self->S_dense->m + self->BB_cols * (self->S_dense->m - self->BB_cols); } ParSHUM_verbose_trace_stop_event(verbose); ParSHUM_verbose_stop_timing(&verbose->timing_facto_dense); ParSHUM_verbose_stop_timing(&verbose->timing_facto); } void ParSHUM_solver_solve(ParSHUM_solver self, ParSHUM_vector RHS) { if (self->verbose->reason & ParSHUM_reason_dense_too_large) return; double *RHS_vals = RHS->vect; ParSHUM_verbose verbose = self->verbose; ParSHUM_verbose_start_timing(&verbose->timing_solve); if (self->debug & ParSHUM_CHECK_DENSE_W_ParSHUM_PERM ) { ParSHUM_vector_permute(RHS, self->row_perm, self->A_debug->m); ParSHUM_dense_2D_solve(self->A_debug, RHS); ParSHUM_vector_permute(RHS, self->invr_col_perm, self->A_debug->m); } else { ParSHUM_verbose_start_timing(&verbose->timing_solve_L); if (self->debug & ParSHUM_DEBUG_GOSSIP_GIRL) ParSHUM_vector_print(RHS, "Init RHS"); ParSHUM_L_matrix_solve(self->L, RHS, self->row_perm); if (self->debug & ParSHUM_DEBUG_GOSSIP_GIRL) ParSHUM_vector_print(RHS, "after forward solve"); ParSHUM_verbose_stop_timing(&verbose->timing_solve_L); ParSHUM_verbose_start_timing(&verbose->timing_solve_dense); if (self->S_dense->n && self->S_dense->m) { if (self->S_dense->n == self->S_dense->m) { double *dense_RHS = (double *) *self->workspace; ParSHUM_dense_matrix_get_RHS(self->S_dense, dense_RHS, &self->row_perm[self->done_pivots], RHS_vals, ParSHUM_perm_global); #ifdef USE_PLASMA plasma_dgetrs(self->S_dense->n, 1, self->S_dense->val, self->S_dense->n, self->S_dense->pivots, dense_RHS, self->S_dense->n); #else LAPACKE_dgetrs(LAPACK_COL_MAJOR, 'N', self->S_dense->n, 1, self->S_dense->val, self->S_dense->n, self->S_dense->pivots, dense_RHS, self->S_dense->n); #endif ParSHUM_dense_matrix_update_RHS(self->S_dense, dense_RHS, &self->row_perm[self->done_pivots], RHS_vals); } else if ( self->S_dense->n < self->S_dense->m ) { int diff_size = self->S_dense->m - self->S_dense->n; double *dense_RHS = (double *) *self->workspace; ParSHUM_dense_matrix_get_RHS(self->S_dense, dense_RHS, &self->row_perm[self->done_pivots], RHS_vals, ParSHUM_perm_both); #ifdef USE_PLASMA plasma_dtrsm(PlasmaLeft, PlasmaLower, PlasmaNoTrans, PlasmaUnit, self->S_dense->n, 1, 1.0, self->S_dense->val,self->S_dense->m, dense_RHS, self->S_dense->m); plasma_dgemm(PlasmaNoTrans, PlasmaNoTrans, diff_size, 1, self->S_dense->n, -1.0, &self->S_dense->val[self->S_dense->m - diff_size], self->S_dense->m, dense_RHS, self->S->m, 1.0, &dense_RHS[self->S_dense->n], self->S->m); plasma_dtrsm(PlasmaLeft, PlasmaUpper, PlasmaNoTrans, PlasmaNonUnit, self->S_dense->n, 1, 1.0, self->S_dense->val, self->S_dense->m, dense_RHS, self->S_dense->m); #else cblas_dtrsm(CblasColMajor, CblasLeft, CblasLower, CblasNoTrans, CblasUnit, self->S_dense->n, 1, 1.0, self->S_dense->val,self->S_dense->m, dense_RHS, self->S_dense->m); cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, diff_size, 1, self->S_dense->n, -1.0, &self->S_dense->val[self->S_dense->m - diff_size], self->S_dense->m, dense_RHS, self->S->m, 1.0, &dense_RHS[self->S_dense->n], self->S->m); cblas_dtrsm(CblasColMajor, CblasLeft, CblasUpper, CblasNoTrans, CblasNonUnit, self->S_dense->n, 1, 1.0, self->S_dense->val, self->S_dense->m, dense_RHS, self->S_dense->m); #endif ParSHUM_dense_matrix_update_RHS(self->S_dense, dense_RHS, &self->row_perm[self->done_pivots], RHS_vals); } else { ParSHUM_fatal_error(__FUNCTION__, __FILE__, __LINE__, "not implemeted"); } } if (self->debug & ParSHUM_DEBUG_GOSSIP_GIRL) ParSHUM_vector_print(RHS, "after dense solve"); ParSHUM_verbose_stop_timing(&verbose->timing_solve_dense); ParSHUM_verbose_start_timing(&verbose->timing_solve_U); ParSHUM_U_matrix_solve(self->U, self->D, RHS, self->col_perm, self->row_perm, self->dense_pivots); if (self->debug & (ParSHUM_DEBUG_VERBOSE_EACH_STEP | ParSHUM_DEBUG_GOSSIP_GIRL)) ParSHUM_vector_print(RHS, "after backward solve"); ParSHUM_vector_permute(RHS, self->row_perm, self->S->m); if (self->debug & ParSHUM_DEBUG_GOSSIP_GIRL) ParSHUM_vector_print(RHS, "P RHS"); ParSHUM_vector_permute(RHS, self->invr_col_perm, self->S->n); if (self->debug & (ParSHUM_DEBUG_VERBOSE_EACH_STEP | ParSHUM_DEBUG_GOSSIP_GIRL)) ParSHUM_vector_print(RHS, "after solve operation"); ParSHUM_verbose_stop_timing(&verbose->timing_solve_U); } ParSHUM_verbose_computed_norms(verbose); ParSHUM_verbose_stop_timing(&verbose->timing_solve); } /* #include <mpi.h> */ /* void */ /* ParSHUM_solver_SBBD_solve(ParSHUM_solver self, ParSHUM_vector RHS, ParSHUM_vector schur_RHS_v, */ /* ParSHUM_dense_matrix global_schur, int *distribution, */ /* int **BB_index, int *BB_sizes, ParSHUM_MPI_info MPI_info) */ /* { */ /* double *RHS_vals = RHS->vect; */ /* int rank = MPI_info->rank; */ /* MPI_Comm comm = MPI_info->world; */ /* int BB_cols = self->BB_cols; */ /* int nb_blocks = MPI_info->MPI_size; */ /* int square_n = self->S_dense->n - BB_cols; */ /* int rest_m = self->S_dense->m - square_n; */ /* double *dense_RHS = (double *) *self->workspace; */ /* double *BB_rhs = malloc(BB_cols * sizeof(*BB_rhs)); */ /* MPI_Status status; */ /* if (self->debug & ParSHUM_DEBUG_VERBOSE_EACH_STEP) */ /* ParSHUM_vector_print(RHS, "Init RHS"); */ /* ParSHUM_L_matrix_solve(self->L, RHS, self->row_perm); */ /* if (self->debug & ParSHUM_DEBUG_VERBOSE_EACH_STEP) */ /* ParSHUM_vector_print(RHS, "after forward solve"); */ /* ParSHUM_dense_matrix_get_RHS(self->S_dense, dense_RHS, &self->row_perm[self->done_pivots], RHS_vals, ParSHUM_perm_both); */ /* #ifdef USE_PLASMA */ /* plasma_dtrsm(PlasmaLeft, PlasmaLower, PlasmaNoTrans, PlasmaUnit, square_n, 1, 1.0, */ /* self->S_dense->val, self->S_dense->m, dense_RHS, self->S_dense->m); */ /* plasma_dgemm(PlasmaNoTrans, PlasmaNoTrans, rest_m, 1, square_n, -1.0, */ /* &self->S_dense->val[square_n], self->S_dense->m, */ /* dense_RHS, self->S->m, 1.0, &dense_RHS[square_n], self->S->m); */ /* #else */ /* cblas_dtrsm(CblasColMajor, CblasLeft, CblasLower, CblasNoTrans, CblasUnit, square_n, */ /* 1, 1.0, self->S_dense->val, self->S_dense->m, dense_RHS, self->S_dense->m); */ /* cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, rest_m, 1, square_n, -1.0, */ /* &self->S_dense->val[square_n], self->S_dense->m, */ /* dense_RHS, self->S->m, 1.0, &dense_RHS[square_n], self->S->m); */ /* #endif */ /* if (rank == 0) { */ /* int block, i; */ /* double *send_buff; */ /* int max_size = 0; */ /* double *schur_RHS = schur_RHS_v->vect; */ /* memcpy(schur_RHS, &dense_RHS[square_n], rest_m * sizeof(*schur_RHS)); */ /* for( block = 1; block < nb_blocks; block++) { */ /* MPI_Recv(&schur_RHS[distribution[block]], distribution[block+1] - distribution[block], */ /* MPI_DOUBLE, block, 0, comm, &status); */ /* } */ /* #ifdef USE_PLASMA */ /* plasma_dgetrs(global_schur->n, 1, global_schur->val, global_schur->n, */ /* global_schur->pivots, schur_RHS, global_schur->n); */ /* #else */ /* LAPACKE_dgetrs(LAPACK_COL_MAJOR, 'N', global_schur->n, 1, global_schur->val, global_schur->n, global_schur->pivots, schur_RHS, global_schur->n); */ /* #endif */ /* if (self->debug & ParSHUM_DEBUG_VERBOSE_EACH_STEP) */ /* ParSHUM_vector_print(schur_RHS_v, "global schur RHS"); */ /* for( block = 1; block < nb_blocks; block++) { */ /* int local_size = BB_sizes[block]; */ /* max_size = max_size > local_size ? max_size : local_size; */ /* } */ /* send_buff = malloc(max_size * sizeof(*send_buff)); */ /* for( block = 1; block < nb_blocks; block++) { */ /* int *indices = BB_index[block]; */ /* for ( i =0; i < BB_sizes[block]; i++) */ /* send_buff[i] = schur_RHS[indices[i]]; */ /* MPI_Send(send_buff, BB_sizes[block], MPI_DOUBLE, block, 0, comm); */ /* } */ /* int *indices = *BB_index; */ /* for(i = 0; i < BB_cols; i++) */ /* BB_rhs[i] = schur_RHS[indices[i]]; */ /* } else { */ /* MPI_Send(&dense_RHS[square_n], rest_m, MPI_DOUBLE, 0, 0, comm); */ /* MPI_Recv(BB_rhs, BB_cols, MPI_DOUBLE, 0, 0, comm, &status); */ /* } */ /* #ifdef USE_PLASMA */ /* plasma_dgemm(PlasmaNoTrans, PlasmaNoTrans, square_n, 1, BB_cols, -1.0, */ /* &self->S_dense->val[square_n*self->S_dense->m], self->S_dense->m, */ /* BB_rhs, BB_cols, 1.0, dense_RHS, self->S->m); */ /* plasma_dtrsm(PlasmaLeft, PlasmaUpper, PlasmaNoTrans, PlasmaNonUnit, square_n, 1, 1.0, */ /* self->S_dense->val, self->S_dense->m, dense_RHS, self->S_dense->m); */ /* #else */ /* cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, square_n, 1, BB_cols, -1.0, */ /* &self->S_dense->val[square_n*self->S_dense->m], self->S_dense->m, */ /* BB_rhs, BB_cols, 1.0, dense_RHS, self->S->m); */ /* cblas_dtrsm(CblasColMajor, CblasLeft, CblasUpper, CblasNoTrans, CblasNonUnit, square_n, 1, 1.0, */ /* self->S_dense->val, self->S_dense->m, dense_RHS, self->S_dense->m); */ /* #endif */ /* ParSHUM_dense_matrix_update_RHS(self->S_dense, dense_RHS, &self->row_perm[self->done_pivots], RHS_vals); */ /* if (self->debug & ParSHUM_DEBUG_VERBOSE_EACH_STEP) */ /* ParSHUM_vector_print(RHS, "after dense solve"); */ /* ParSHUM_U_BB_matrix_solve(self->U, self->D, RHS, BB_rhs, self->col_perm, self->row_perm, */ /* self->dense_pivots, BB_cols); */ /* if (self->debug & ParSHUM_DEBUG_VERBOSE_EACH_STEP) */ /* ParSHUM_vector_print(RHS, "after backward solve"); */ /* ParSHUM_vector_permute(RHS, self->row_perm, self->S->m); */ /* if (self->debug & ParSHUM_DEBUG_VERBOSE_EACH_STEP) */ /* ParSHUM_vector_print(RHS, "P RHS"); */ /* ParSHUM_vector_permute(RHS, self->invr_col_perm, self->S->n - BB_cols); */ /* if (self->debug & ParSHUM_DEBUG_VERBOSE_EACH_STEP) */ /* ParSHUM_vector_print(RHS, "after solve operation"); */ /* } */ void ParSHUM_solver_compute_norms(ParSHUM_solver self, ParSHUM_vector X, ParSHUM_vector rhs) { if (self->verbose->reason & ParSHUM_reason_dense_too_large) return; double x_norm, A_norm, b_norm; ParSHUM_vector r = ParSHUM_vector_create(X->n); /* || r = Ax - b || */ ParSHUM_matrix_SpMV(self->A, X, r); ParSHUM_vector_add(r, 1.00, rhs, -1.00, r); self->verbose->backward_error = ParSHUM_vector_2norm(r); A_norm = ParSHUM_matrix_get_norm(self->A); x_norm = ParSHUM_vector_2norm(X); b_norm = ParSHUM_vector_2norm(rhs); self->verbose->backward_error /= A_norm * x_norm + b_norm; printf("error = %e\n", self->verbose->backward_error); ParSHUM_vector_destroy(r); } /* void */ /* ParSHUM_solver_iterative_refinement(ParSHUM_solver self, */ /* ParSHUM_vector X, */ /* ParSHUM_vector rhs, */ /* double wanted_precision) */ /* { */ /* if (self->verbose->reason & ParSHUM_reason_dense_too_large) */ /* return; */ /* ParSHUM_vector sol = ParSHUM_vector_create(self->A->n); */ /* ParSHUM_vector tmp = ParSHUM_vector_create(self->A->n); */ /* int i = 0; */ /* ParSHUM_vector_copy(rhs, sol); */ /* ParSHUM_solver_solve(self, sol); */ /* ParSHUM_solver_copmpute_norms(self, X, sol, rhs); */ /* printf("iteration %d: backward error = %e and forward error = %e\n", */ /* i++, */ /* self->verbose->backward_error, */ /* self->verbose->forward_error); */ /* while ( i < 20 && self->verbose->backward_error > wanted_precision) { */ /* ParSHUM_matrix_SpMV(self->A, sol, tmp); */ /* ParSHUM_vector_add(rhs, 1.00, tmp, -1.00, tmp); */ /* ParSHUM_solver_solve(self, tmp); */ /* ParSHUM_vector_add(sol, 1.00, tmp, 1.00, sol); */ /* ParSHUM_solver_copmpute_norms(self, X, sol, rhs); */ /* printf("iteration %d: backward error = %e and forward error = %e\n", */ /* i++, */ /* self->verbose->backward_error, */ /* self->verbose->forward_error); */ /* } */ /* ParSHUM_vector_destroy(sol); */ /* ParSHUM_vector_destroy(tmp); */ /* } */ void ParSHUM_solver_finalize(ParSHUM_solver self) { #ifdef USE_PLASMA if (is_plasma_init) { plasma_finalize(); is_plasma_init = 0; } #endif ParSHUM_verbose_print(self->verbose); ParSHUM_verbose_draw_graph(self->verbose); } void ParSHUM_solver_destroy(ParSHUM_solver self) { int i; if (self->debug & ParSHUM_CHECK_DENSE_W_ParSHUM_PERM) { ParSHUM_dense_2D_destroy(self->A_debug); } else { ParSHUM_matrix_destroy(self->A); ParSHUM_schur_matrix_destroy(self->S); ParSHUM_L_matrix_destroy(self->L); ParSHUM_matrix_destroy(self->D); ParSHUM_U_matrix_destroy(self->U); if(self->S_dense) ParSHUM_dense_matrix_destroy(self->S_dense); free(self->candidates->row); free(self->candidates->marko); free(self->candidates->best_marko); free(self->candidates); for(i = 0; i < self->nb_counters; i++) { free(self->counters[i]->array); free(self->counters[i]->used_counters); free(self->counters[i]); } free(self->counters); free(self->U_struct); free(self->L_struct); pthread_mutex_destroy(&self->counters_lock); free(self->row_perm); free(self->col_perm); free(self->invr_row_perm); free(self->invr_col_perm); free(self->random_col); free(self->previous_pivots); ParSHUM_Luby_destroy(self->Luby); } ParSHUM_solver_dealloc(self); }
GB_unop__identity_uint8_uint32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_uint8_uint32) // op(A') function: GB (_unop_tran__identity_uint8_uint32) // C type: uint8_t // A type: uint32_t // cast: uint8_t cij = (uint8_t) aij // unaryop: cij = aij #define GB_ATYPE \ uint32_t #define GB_CTYPE \ uint8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ uint8_t z = (uint8_t) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint8_t z = (uint8_t) aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT8 || GxB_NO_UINT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_uint8_uint32) ( uint8_t *Cx, // Cx and Ax may be aliased const uint32_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint32_t aij = Ax [p] ; uint8_t z = (uint8_t) aij ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; uint32_t aij = Ax [p] ; uint8_t z = (uint8_t) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_uint8_uint32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif