source
stringlengths
3
92
c
stringlengths
26
2.25M
vednnLinearBackwardWeight.c
#include "vednnLinearBackwardWeight.h" #include "vednn-def.h" static inline vednnError_t vednnLinearBackwardWeight_wrapper( vednnLinearBackwardWeight_t pFunc, VEDNN_LINEARBKW_ARGS ) { #ifndef VEDNN_USE_OPENMP return pFunc(VEDNN_LINEARBKW_ARGS_LIST); #else if ( __vednn_omp_num_threads == 1 ) { return pFunc(VEDNN_LINEARBKW_ARGS_LIST, 0, inDim); } else { vednnError_t rc = VEDNN_SUCCESS ; #pragma omp parallel reduction(|:rc) { int64_t nthreads = omp_get_num_threads() ; int64_t threadid = omp_get_thread_num() ; int64_t nInDim = inDim / nthreads ; int64_t remain = inDim % nthreads ; int64_t inDimBegin = nInDim * threadid + ( threadid < remain ? threadid : remain ) ; int64_t myInDim = nInDim + ( threadid < remain ? 1 : 0 ) ; if( myInDim == 0 ) { rc |= VEDNN_SUCCESS ; } else { rc |= pFunc(VEDNN_LINEARBKW_ARGS_LIST, inDimBegin, inDimBegin+myInDim) ; } } return rc ; } #endif } /* ----------------------------------------------------------------------- */ vednnError_t vednnLinearBackwardWeight( VEDNN_LINEARBKW_ARGS ) { #define OMPWRAP( IMPL ) WRAP_RET(vednnLinearBackwardWeight_##IMPL, \ vednnLinearBackwardWeight_wrapper, VEDNN_LINEARBKW_ARGS_LIST ) if( (outDim & 0x01) == 0 && (((uint64_t)pDataGradWeight) & 0x07) == 0 && (((uint64_t)pDataGradOut) & 0x07) == 0 ) { if( outDim <= 128 ) OMPWRAP(o2XU128_woaligned); else OMPWRAP(o2X_woaligned); } else OMPWRAP(default); #undef OMPWRAP } // vim: et sw=2 ts=2 ai
3d7pt.c
/* * Order-1, 3D 7 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 24; tile_size[1] = 24; tile_size[2] = 8; tile_size[3] = 1024; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; const double alpha = 0.0876; const double beta = 0.0765; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt-1; t++) { for (i = 1; i < Nz-1; i++) { for (j = 1; j < Ny-1; j++) { for (k = 1; k < Nx-1; k++) { A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k]) + beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] + A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]); } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays (Causing performance degradation /* for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); */ return 0; }
AVX-512BWsearch.c
#include "charSubmat.h" #include "AVX-512BWsearch.h" #include "utils.h" // search using AVX512BW instructions and Score Profile technique void search_avx512bw_sp (char * query_sequences, unsigned short int * query_sequences_lengths, unsigned long int query_sequences_count, unsigned int * query_disp, char * vect_sequences_db, unsigned short int * vect_sequences_db_lengths, unsigned short int * vect_sequences_db_blocks, unsigned long int vect_sequences_db_count, unsigned long int * vect_sequences_db_disp, char * submat, int open_gap, int extend_gap, int n_threads, int block_size, int * scores, double * workTime){ long int i, j, k; double tick; char *a, * b; unsigned int * a_disp; unsigned long int * b_disp = NULL; unsigned short int * m, *n, *nbbs, sequences_db_max_length, query_sequences_max_length; a = query_sequences; m = query_sequences_lengths; a_disp = query_disp; query_sequences_max_length = query_sequences_lengths[query_sequences_count-1]; sequences_db_max_length = vect_sequences_db_lengths[vect_sequences_db_count-1]; b = vect_sequences_db; n = vect_sequences_db_lengths; nbbs = vect_sequences_db_blocks; b_disp = vect_sequences_db_disp; tick = dwalltime(); #pragma omp parallel default(none) shared(block_size, a, b, n, nbbs, m, a_disp, b_disp, submat, scores, query_sequences_count, vect_sequences_db_count, open_gap, extend_gap, sequences_db_max_length, query_sequences_max_length) num_threads(n_threads) { __m512i *row1, *row2, *tmp_row, *maxCol, *maxRow, *lastCol, * ptr_scores; __m512i *ptr_scoreProfile1, *ptr_scoreProfile2, *ptr_scoreProfile3, *ptr_scoreProfile4; char * ptr_a, * ptr_b, * scoreProfile; __declspec(align(MEMALIGN)) __m512i score, auxLastCol, b_values, submat_lo, submat_hi; __declspec(align(MEMALIGN)) __m512i current1, current2, current3, current4, previous2, previous3, previous4; __declspec(align(MEMALIGN)) __m512i aux0, aux1, aux2, aux3, aux4, aux5, aux6, aux7, aux8; __declspec(align(MEMALIGN)) __m512i vextend_gap_epi8 = _mm512_set1_epi8(extend_gap), vopen_extend_gap_epi8 = _mm512_set1_epi8(open_gap+extend_gap); __declspec(align(MEMALIGN)) __m512i vextend_gap_epi16 = _mm512_set1_epi16(extend_gap), vopen_extend_gap_epi16 = _mm512_set1_epi16(open_gap+extend_gap); __declspec(align(MEMALIGN)) __m512i vextend_gap_epi32 = _mm512_set1_epi32(extend_gap), vopen_extend_gap_epi32 = _mm512_set1_epi32(open_gap+extend_gap); __declspec(align(MEMALIGN)) __m512i vzero = _mm512_setzero_si512(); // SP __declspec(align(MEMALIGN)) __m512i v15 = _mm512_set1_epi8(15), vneg32 = _mm512_set1_epi8(-32), v16 = _mm512_set1_epi8(16); // overflow detection __declspec(align(MEMALIGN)) __m256i v127 = _mm256_set1_epi8(127), v32767 = _mm256_set1_epi16(32767), aux256[2]; // bias __declspec(align(MEMALIGN)) __m512i v128 = _mm512_set1_epi32(128), v32768 = _mm512_set1_epi32(32768); __declspec(align(MEMALIGN)) __m128i aux128, auxBlosum[2], *tmp; __mmask64 mask; unsigned int i, j, ii, jj, k, disp_1, disp_2, disp_3, disp_4, dim1, dim2, nbb; unsigned long int t, s, q; int overflow_low_flag, overflow_high_flag, bb1, bb2, bb1_start, bb1_end, bb2_start, bb2_end; // allocate memory for auxiliary buffers row1 = (__m512i *) _mm_malloc((block_size+1)*sizeof(__m512i), MEMALIGN); row2 = (__m512i *) _mm_malloc((block_size+1)*sizeof(__m512i), MEMALIGN); maxCol = (__m512i *) _mm_malloc((block_size+1)*sizeof(__m512i), MEMALIGN); maxRow = (__m512i *) _mm_malloc((query_sequences_max_length)*sizeof(__m512i), MEMALIGN); lastCol = (__m512i *) _mm_malloc((query_sequences_max_length)*sizeof(__m512i), MEMALIGN); scoreProfile = (char *) _mm_malloc((SUBMAT_ROWS_x_AVX512BW_INT8_VECTOR_LENGTH*block_size)*sizeof(char), MEMALIGN); // calculate alignment score #pragma omp for schedule(dynamic) nowait for (t=0; t< query_sequences_count*vect_sequences_db_count; t++) { q = (query_sequences_count-1) - (t % query_sequences_count); s = (vect_sequences_db_count-1) - (t / query_sequences_count); ptr_a = a + a_disp[q]; ptr_b = b + b_disp[s]; ptr_scores = (__m512i *) (scores + (q*vect_sequences_db_count+s)*AVX512BW_INT8_VECTOR_LENGTH); // calculate number of blocks nbb = nbbs[s]; // init buffers #pragma unroll(AVX512BW_UNROLL_COUNT) for (i=0; i<m[q] ; i++ ) maxRow[i] = _mm512_set1_epi8(-128); #pragma unroll(AVX512BW_UNROLL_COUNT) for (i=0; i<m[q] ; i++ ) lastCol[i] = _mm512_set1_epi8(-128); // set score to 0 score = _mm512_set1_epi8(-128); for (k=0; k < nbb; k++){ // calculate dim1 disp_4 = k*block_size; dim1 = n[s]-disp_4; dim1 = (block_size < dim1 ? block_size : dim1); // calculate dim2 dim2 = dim1 / DB_SEQ_LEN_MULT; // calculate SP sub-block length disp_1 = dim1*AVX512BW_INT8_VECTOR_LENGTH; // init buffers #pragma unroll(AVX512BW_UNROLL_COUNT) for (i=0; i<dim1+1 ; i++ ) maxCol[i] = _mm512_set1_epi8(-128); #pragma unroll(AVX512BW_UNROLL_COUNT) for (i=0; i<dim1+1 ; i++ ) row1[i] = _mm512_set1_epi8(-128); auxLastCol = _mm512_set1_epi8(-128); // build score profile for (i=0; i< dim1 ;i++ ) { // indexes b_values = _mm512_loadu_si512((__m512i *) (ptr_b + (disp_4+i)*AVX512BW_INT8_VECTOR_LENGTH)); // indexes >= 16 aux1 = _mm512_sub_epi8(b_values, v16); // indexes < 16 mask = _mm512_cmplt_epi8_mask(b_values,v16); aux4 = _mm512_mask_mov_epi8(vneg32, mask, b_values); ptr_scoreProfile1 = (__m512i *)(scoreProfile) + i; #pragma unroll for (j=0; j< SUBMAT_ROWS-1; j++) { tmp = (__m128i*) (submat + j*SUBMAT_COLS); auxBlosum[0] = _mm_load_si128(tmp); auxBlosum[1] = _mm_load_si128(tmp+1); submat_lo = _mm512_broadcast_i64x2(auxBlosum[0]); submat_hi = _mm512_broadcast_i64x2(auxBlosum[1]); aux5 = _mm512_shuffle_epi8(submat_lo,aux4); aux6 = _mm512_shuffle_epi8(submat_hi,aux1); _mm512_store_si512(ptr_scoreProfile1+j*dim1,_mm512_or_si512(aux5,aux6)); } _mm512_store_si512(ptr_scoreProfile1+(SUBMAT_ROWS-1)*dim1, vzero); } for( i = 0; i < m[q]; i+=QUERY_SEQ_LEN_MULT){ // update row[0] with lastCol[i-1] row1[0] = lastCol[i]; previous2 = lastCol[i+1]; previous3 = lastCol[i+2]; previous4 = lastCol[i+3]; // calculate score profile displacement ptr_scoreProfile1 = (__m512i*)(scoreProfile+((unsigned int)(ptr_a[i]))*disp_1); ptr_scoreProfile2 = (__m512i*)(scoreProfile+((unsigned int)(ptr_a[i+1]))*disp_1); ptr_scoreProfile3 = (__m512i*)(scoreProfile+((unsigned int)(ptr_a[i+2]))*disp_1); ptr_scoreProfile4 = (__m512i*)(scoreProfile+((unsigned int)(ptr_a[i+3]))*disp_1); // store maxRow in auxiliars aux1 = maxRow[i]; aux2 = maxRow[i+1]; aux3 = maxRow[i+2]; aux4 = maxRow[i+3]; for (ii=0; ii<dim2 ; ii++) { #pragma unroll(DB_SEQ_LEN_MULT) for( j=ii*DB_SEQ_LEN_MULT+1, jj=0; jj < DB_SEQ_LEN_MULT; jj++, j++) { //calcuate the diagonal value current1 = _mm512_adds_epi8(row1[j-1], _mm512_load_si512(ptr_scoreProfile1+(j-1))); // calculate current1 max value current1 = _mm512_max_epi8(current1, aux1); current1 = _mm512_max_epi8(current1, maxCol[j]); //current1 = _mm512_max_epi8(current1, vzero); // update maxRow and maxCol aux1 = _mm512_subs_epi8(aux1, vextend_gap_epi8); maxCol[j] = _mm512_subs_epi8(maxCol[j], vextend_gap_epi8); aux0 = _mm512_subs_epi8(current1, vopen_extend_gap_epi8); aux1 = _mm512_max_epi8(aux1, aux0); maxCol[j] = _mm512_max_epi8(maxCol[j], aux0); // update max score score = _mm512_max_epi8(score,current1); //calcuate the diagonal value current2 = _mm512_adds_epi8(previous2, _mm512_load_si512(ptr_scoreProfile2+(j-1))); // update previous previous2 = current1; // calculate current2 max value current2 = _mm512_max_epi8(current2, aux2); current2 = _mm512_max_epi8(current2, maxCol[j]); //current2 = _mm512_max_epi8(current2, vzero); // update maxRow and maxCol aux2 = _mm512_subs_epi8(aux2, vextend_gap_epi8); maxCol[j] = _mm512_subs_epi8(maxCol[j], vextend_gap_epi8); aux0 = _mm512_subs_epi8(current2, vopen_extend_gap_epi8); aux2 = _mm512_max_epi8(aux2, aux0); maxCol[j] = _mm512_max_epi8(maxCol[j], aux0); // update max score score = _mm512_max_epi8(score,current2); //calcuate the diagonal value current3 = _mm512_adds_epi8(previous3, _mm512_load_si512(ptr_scoreProfile3+(j-1))); // update previous previous3 = current2; // calculate current3 max value current3 = _mm512_max_epi8(current3, aux3); current3 = _mm512_max_epi8(current3, maxCol[j]); //current3 = _mm512_max_epi8(current3, vzero); // update maxRow and maxCol aux3 = _mm512_subs_epi8(aux3, vextend_gap_epi8); maxCol[j] = _mm512_subs_epi8(maxCol[j], vextend_gap_epi8); aux0 = _mm512_subs_epi8(current3, vopen_extend_gap_epi8); aux3 = _mm512_max_epi8(aux3, aux0); maxCol[j] = _mm512_max_epi8(maxCol[j], aux0); // update max score score = _mm512_max_epi8(score,current3); //calcuate the diagonal value current4 = _mm512_adds_epi8(previous4, _mm512_load_si512(ptr_scoreProfile4+(j-1))); // update previous previous4 = current3; // calculate current4 max value current4 = _mm512_max_epi8(current4, aux4); current4 = _mm512_max_epi8(current4, maxCol[j]); //current4 = _mm512_max_epi8(current4, vzero); // update maxRow and maxCol aux4 = _mm512_subs_epi8(aux4, vextend_gap_epi8); maxCol[j] = _mm512_subs_epi8(maxCol[j], vextend_gap_epi8); aux0 = _mm512_subs_epi8(current4, vopen_extend_gap_epi8); aux4 = _mm512_max_epi8(aux4, aux0); maxCol[j] = _mm512_max_epi8(maxCol[j], aux0); // update row buffer row2[j] = current4; // update max score score = _mm512_max_epi8(score,current4); } } // update maxRow maxRow[i] = aux1; maxRow[i+1] = aux2; maxRow[i+2] = aux3; maxRow[i+3] = aux4; // update lastCol lastCol[i] = auxLastCol; lastCol[i+1] = current1; lastCol[i+2] = current2; lastCol[i+3] = current3; auxLastCol = current4; // swap buffers tmp_row = row1; row1 = row2; row2 = tmp_row; } } // store max value #pragma unroll for (i=0; i < 4; i++){ aux128 = _mm512_extracti32x4_epi32 (score, i); aux1 = _mm512_add_epi32(_mm512_cvtepi8_epi32(aux128),v128); _mm512_store_si512 (ptr_scores+i, aux1); } // overflow detection // low aux256[0] = _mm512_extracti32x8_epi32 (score,0); aux256[1] = _mm256_cmpeq_epi8(aux256[0],v127); overflow_low_flag = _mm256_testz_si256(aux256[1],v127); // high aux256[0] = _mm512_extracti32x8_epi32 (score,1); aux256[1] = _mm256_cmpeq_epi8(aux256[0],v127); overflow_high_flag = _mm256_testz_si256(aux256[1],v127); // if overflow if ((overflow_low_flag == 0) || (overflow_high_flag == 0)){ bb1_start = overflow_low_flag; bb1_end = 2-overflow_high_flag; // recalculate using 16-bit signed integer precision for (bb1=bb1_start; bb1<bb1_end ; bb1++){ // init buffers #pragma unroll(AVX512BW_UNROLL_COUNT) for (i=0; i<m[q] ; i++ ) maxRow[i] = _mm512_set1_epi16(-32768); #pragma unroll(AVX512BW_UNROLL_COUNT) for (i=0; i<m[q] ; i++ ) lastCol[i] = _mm512_set1_epi16(-32768); // set score to 0 score = _mm512_set1_epi16(-32768); disp_2 = bb1*AVX512BW_INT16_VECTOR_LENGTH; for (k=0; k < nbb; k++){ // calculate dim1 disp_4 = k*block_size; dim1 = n[s]-disp_4; dim1 = (block_size < dim1 ? block_size : dim1); // calculate dim2 dim2 = dim1 / DB_SEQ_LEN_MULT; // calculate SP sub-block length disp_1 = dim1*AVX512BW_INT8_VECTOR_LENGTH; // init buffers #pragma unroll(AVX512BW_UNROLL_COUNT) for (i=0; i<dim1+1 ; i++ ) maxCol[i] = _mm512_set1_epi16(-32768); #pragma unroll(AVX512BW_UNROLL_COUNT) for (i=0; i<dim1+1 ; i++ ) row1[i] = _mm512_set1_epi16(-32768); auxLastCol = _mm512_set1_epi16(-32768); // build score profile for (i=0; i< dim1 ;i++ ) { // indexes b_values = _mm512_loadu_si512((__m512i *) (ptr_b + (disp_4+i)*AVX512BW_INT8_VECTOR_LENGTH)); // indexes >= 16 aux1 = _mm512_sub_epi8(b_values, v16); // indexes < 16 mask = _mm512_cmplt_epi8_mask(b_values,v16); aux4 = _mm512_mask_mov_epi8(vneg32, mask, b_values); ptr_scoreProfile1 = (__m512i *)(scoreProfile) + i; #pragma unroll for (j=0; j< SUBMAT_ROWS-1; j++) { tmp = (__m128i*) (submat + j*SUBMAT_COLS); auxBlosum[0] = _mm_load_si128(tmp); auxBlosum[1] = _mm_load_si128(tmp+1); submat_lo = _mm512_broadcast_i64x2(auxBlosum[0]); submat_hi = _mm512_broadcast_i64x2(auxBlosum[1]); aux5 = _mm512_shuffle_epi8(submat_lo,aux4); aux6 = _mm512_shuffle_epi8(submat_hi,aux1); _mm512_store_si512(ptr_scoreProfile1+j*dim1,_mm512_or_si512(aux5,aux6)); } _mm512_store_si512(ptr_scoreProfile1+(SUBMAT_ROWS-1)*dim1, vzero); } for( i = 0; i < m[q]; i+=QUERY_SEQ_LEN_MULT){ // update row[0] with lastCol[i-1] row1[0] = lastCol[i]; previous2 = lastCol[i+1]; previous3 = lastCol[i+2]; previous4 = lastCol[i+3]; // calculate score profile displacement ptr_scoreProfile1 = (__m512i *)(scoreProfile+((int)(ptr_a[i]))*disp_1+disp_2); ptr_scoreProfile2 = (__m512i *)(scoreProfile+((int)(ptr_a[i+1]))*disp_1+disp_2); ptr_scoreProfile3 = (__m512i *)(scoreProfile+((int)(ptr_a[i+2]))*disp_1+disp_2); ptr_scoreProfile4 = (__m512i *)(scoreProfile+((int)(ptr_a[i+3]))*disp_1+disp_2); // store maxRow in auxiliars aux1 = maxRow[i]; aux2 = maxRow[i+1]; aux3 = maxRow[i+2]; aux4 = maxRow[i+3]; for (ii=0; ii<dim2 ; ii++) { #pragma unroll(DB_SEQ_LEN_MULT) for( j=ii*DB_SEQ_LEN_MULT+1, jj=0; jj < DB_SEQ_LEN_MULT; jj++, j++) { //calcuate the diagonal value current1 = _mm512_adds_epi16(row1[j-1], _mm512_cvtepi8_epi16(_mm256_loadu_si256((__m256i *) (ptr_scoreProfile1+(j-1))))); // calculate current1 max value current1 = _mm512_max_epi16(current1, aux1); current1 = _mm512_max_epi16(current1, maxCol[j]); //current1 = _mm512_max_epi16(current1, vzero); // update maxRow and maxCol aux1 = _mm512_subs_epi16(aux1, vextend_gap_epi16); maxCol[j] = _mm512_subs_epi16(maxCol[j], vextend_gap_epi16); aux0 = _mm512_subs_epi16(current1, vopen_extend_gap_epi16); aux1 = _mm512_max_epi16(aux1, aux0); maxCol[j] = _mm512_max_epi16(maxCol[j], aux0); // update max score score = _mm512_max_epi16(score,current1); //calcuate the diagonal value current2 = _mm512_adds_epi16(previous2, _mm512_cvtepi8_epi16(_mm256_loadu_si256((__m256i *) (ptr_scoreProfile2+(j-1))))); // update previous previous2 = current1; // calculate current2 max value current2 = _mm512_max_epi16(current2, aux2); current2 = _mm512_max_epi16(current2, maxCol[j]); //current2 = _mm512_max_epi16(current2, vzero); // update maxRow and maxCol aux2 = _mm512_subs_epi16(aux2, vextend_gap_epi16); maxCol[j] = _mm512_subs_epi16(maxCol[j], vextend_gap_epi16); aux0 = _mm512_subs_epi16(current2, vopen_extend_gap_epi16); aux2 = _mm512_max_epi16(aux2, aux0); maxCol[j] = _mm512_max_epi16(maxCol[j], aux0); // update max score score = _mm512_max_epi16(score,current2); //calcuate the diagonal value current3 = _mm512_adds_epi16(previous3, _mm512_cvtepi8_epi16(_mm256_loadu_si256((__m256i *) (ptr_scoreProfile3+(j-1))))); // update previous previous3 = current2; // calculate current3 max value current3 = _mm512_max_epi16(current3, aux3); current3 = _mm512_max_epi16(current3, maxCol[j]); //current3 = _mm512_max_epi16(current3, vzero); // update maxRow and maxCol aux3 = _mm512_subs_epi16(aux3, vextend_gap_epi16); maxCol[j] = _mm512_subs_epi16(maxCol[j], vextend_gap_epi16); aux0 = _mm512_subs_epi16(current3, vopen_extend_gap_epi16); aux3 = _mm512_max_epi16(aux3, aux0); maxCol[j] = _mm512_max_epi16(maxCol[j], aux0); // update max score score = _mm512_max_epi16(score,current3); //calcuate the diagonal value current4 = _mm512_adds_epi16(previous4, _mm512_cvtepi8_epi16(_mm256_loadu_si256((__m256i *) (ptr_scoreProfile4+(j-1))))); // update previous previous4 = current3; // calculate current4 max value current4 = _mm512_max_epi16(current4, aux4); current4 = _mm512_max_epi16(current4, maxCol[j]); //current4 = _mm512_max_epi16(current4, vzero); // update maxRow and maxCol aux4 = _mm512_subs_epi16(aux4, vextend_gap_epi16); maxCol[j] = _mm512_subs_epi16(maxCol[j], vextend_gap_epi16); aux0 = _mm512_subs_epi16(current4, vopen_extend_gap_epi16); aux4 = _mm512_max_epi16(aux4, aux0); maxCol[j] = _mm512_max_epi16(maxCol[j], aux0); // update row buffer row2[j] = current4; // update max score score = _mm512_max_epi16(score,current4); } } // update maxRow maxRow[i] = aux1; maxRow[i+1] = aux2; maxRow[i+2] = aux3; maxRow[i+3] = aux4; // update lastCol lastCol[i] = auxLastCol; lastCol[i+1] = current1; lastCol[i+2] = current2; lastCol[i+3] = current3; auxLastCol = current4; // swap buffers tmp_row = row1; row1 = row2; row2 = tmp_row; } } // store max value (low) aux256[0] = _mm512_extracti32x8_epi32 (score,0); aux1 = _mm512_add_epi32(_mm512_cvtepi16_epi32(aux256[0]),v32768); _mm512_store_si512 (ptr_scores+bb1*2,aux1); // check overflow (low) aux256[1] = _mm256_cmpeq_epi16(aux256[0],v32767); overflow_low_flag = _mm256_testz_si256(aux256[1],v127); // store max value (high) aux256[0] = _mm512_extracti32x8_epi32 (score,1); aux1 = _mm512_add_epi32(_mm512_cvtepi16_epi32(aux256[0]),v32768); _mm512_store_si512 (ptr_scores+bb1*2+1,aux1); // check overflow (high) aux256[1] = _mm256_cmpeq_epi16(aux256[0],v32767); overflow_high_flag = _mm256_testz_si256(aux256[1],v32767); // if overflow if ((overflow_low_flag == 0) || (overflow_high_flag == 0)){ // check overflow in low 16 bits bb2_start = overflow_low_flag; // check overflow in high 16 bits bb2_end = 2 - overflow_high_flag; // recalculate using 32-bit signed integer precision for (bb2=bb2_start; bb2<bb2_end ; bb2++){ // init buffers #pragma unroll(AVX512BW_UNROLL_COUNT) for (i=0; i<m[q] ; i++ ) maxRow[i] = _mm512_setzero_si512(); #pragma unroll(AVX512BW_UNROLL_COUNT) for (i=0; i<m[q] ; i++ ) lastCol[i] = _mm512_setzero_si512(); // set score to 0 score = _mm512_setzero_si512(); disp_3 = disp_2 + bb2*AVX512BW_INT32_VECTOR_LENGTH; for (k=0; k < nbb; k++){ // calculate dim1 disp_4 = k*block_size; dim1 = n[s]-disp_4; dim1 = (block_size < dim1 ? block_size : dim1); // calculate dim2 dim2 = dim1 / DB_SEQ_LEN_MULT; // calculate SP sub-block length disp_1 = dim1*AVX512BW_INT8_VECTOR_LENGTH; // init buffers #pragma unroll(AVX512BW_UNROLL_COUNT) for (i=0; i<dim1+1 ; i++ ) maxCol[i] = _mm512_setzero_si512(); #pragma unroll(AVX512BW_UNROLL_COUNT) for (i=0; i<dim1+1 ; i++ ) row1[i] = _mm512_setzero_si512(); auxLastCol = _mm512_setzero_si512(); // build score profile for (i=0; i< dim1 ;i++ ) { // indexes b_values = _mm512_loadu_si512((__m512i *) (ptr_b + (disp_4+i)*AVX512BW_INT8_VECTOR_LENGTH)); // indexes >= 16 aux1 = _mm512_sub_epi8(b_values, v16); // indexes < 16 mask = _mm512_cmplt_epi8_mask(b_values,v16); aux4 = _mm512_mask_mov_epi8(vneg32, mask, b_values); ptr_scoreProfile1 = (__m512i *)(scoreProfile) + i; #pragma unroll for (j=0; j< SUBMAT_ROWS-1; j++) { tmp = (__m128i*) (submat + j*SUBMAT_COLS); auxBlosum[0] = _mm_load_si128(tmp); auxBlosum[1] = _mm_load_si128(tmp+1); submat_lo = _mm512_broadcast_i64x2(auxBlosum[0]); submat_hi = _mm512_broadcast_i64x2(auxBlosum[1]); aux5 = _mm512_shuffle_epi8(submat_lo,aux4); aux6 = _mm512_shuffle_epi8(submat_hi,aux1); _mm512_store_si512(ptr_scoreProfile1+j*dim1,_mm512_or_si512(aux5,aux6)); } _mm512_store_si512(ptr_scoreProfile1+(SUBMAT_ROWS-1)*dim1, vzero); } for( i = 0; i < m[q]; i+=QUERY_SEQ_LEN_MULT){ // update row[0] with lastCol[i-1] row1[0] = lastCol[i]; previous2 = lastCol[i+1]; previous3 = lastCol[i+2]; previous4 = lastCol[i+3]; // calculate score profile displacement ptr_scoreProfile1 = (__m512i *)(scoreProfile+((unsigned int)(ptr_a[i]))*disp_1+disp_3); ptr_scoreProfile2 = (__m512i *)(scoreProfile+((unsigned int)(ptr_a[i+1]))*disp_1+disp_3); ptr_scoreProfile3 = (__m512i *)(scoreProfile+((unsigned int)(ptr_a[i+2]))*disp_1+disp_3); ptr_scoreProfile4 = (__m512i *)(scoreProfile+((unsigned int)(ptr_a[i+3]))*disp_1+disp_3); // store maxRow in auxiliars aux1 = maxRow[i]; aux2 = maxRow[i+1]; aux3 = maxRow[i+2]; aux4 = maxRow[i+3]; for (ii=0; ii<dim2 ; ii++) { #pragma unroll(DB_SEQ_LEN_MULT) for( j=ii*DB_SEQ_LEN_MULT+1, jj=0; jj < DB_SEQ_LEN_MULT; jj++, j++) { //calcuate the diagonal value current1 = _mm512_add_epi32(row1[j-1], _mm512_cvtepi8_epi32(_mm_loadu_si128((__m128i *) (ptr_scoreProfile1+(j-1))))); // calculate current1 max value current1 = _mm512_max_epi32(current1, aux1); current1 = _mm512_max_epi32(current1, maxCol[j]); current1 = _mm512_max_epi32(current1, vzero); // update maxRow and maxCol aux1 = _mm512_sub_epi32(aux1, vextend_gap_epi32); maxCol[j] = _mm512_sub_epi32(maxCol[j], vextend_gap_epi32); aux0 = _mm512_sub_epi32(current1, vopen_extend_gap_epi32); aux1 = _mm512_max_epi32(aux1, aux0); maxCol[j] = _mm512_max_epi32(maxCol[j], aux0); // update max score score = _mm512_max_epi32(score,current1); //calcuate the diagonal value current2 = _mm512_add_epi32(previous2, _mm512_cvtepi8_epi32(_mm_loadu_si128((__m128i *) (ptr_scoreProfile2+(j-1))))); // update previous previous2 = current1; // calculate current2 max value current2 = _mm512_max_epi32(current2, aux2); current2 = _mm512_max_epi32(current2, maxCol[j]); current2 = _mm512_max_epi32(current2, vzero); // update maxRow and maxCol aux2 = _mm512_sub_epi32(aux2, vextend_gap_epi32); maxCol[j] = _mm512_sub_epi32(maxCol[j], vextend_gap_epi32); aux0 = _mm512_sub_epi32(current2, vopen_extend_gap_epi32); aux2 = _mm512_max_epi32(aux2, aux0); maxCol[j] = _mm512_max_epi32(maxCol[j], aux0); // update max score score = _mm512_max_epi32(score,current2); //calcuate the diagonal value current3 = _mm512_add_epi32(previous3, _mm512_cvtepi8_epi32(_mm_loadu_si128((__m128i *) (ptr_scoreProfile3+(j-1))))); // update previous previous3 = current2; // calculate current3 max value current3 = _mm512_max_epi32(current3, aux3); current3 = _mm512_max_epi32(current3, maxCol[j]); current3 = _mm512_max_epi32(current3, vzero); // update maxRow and maxCol aux3 = _mm512_sub_epi32(aux3, vextend_gap_epi32); maxCol[j] = _mm512_sub_epi32(maxCol[j], vextend_gap_epi32); aux0 = _mm512_sub_epi32(current3, vopen_extend_gap_epi32); aux3 = _mm512_max_epi32(aux3, aux0); maxCol[j] = _mm512_max_epi32(maxCol[j], aux0); // update max score score = _mm512_max_epi32(score,current3); //calcuate the diagonal value current4 = _mm512_add_epi32(previous4, _mm512_cvtepi8_epi32(_mm_loadu_si128((__m128i *) (ptr_scoreProfile4+(j-1))))); // update previous previous4 = current3; // calculate current4 max value current4 = _mm512_max_epi32(current4, aux4); current4 = _mm512_max_epi32(current4, maxCol[j]); current4 = _mm512_max_epi32(current4, vzero); // update maxRow and maxCol aux4 = _mm512_sub_epi32(aux4, vextend_gap_epi32); maxCol[j] = _mm512_sub_epi32(maxCol[j], vextend_gap_epi32); aux0 = _mm512_sub_epi32(current4, vopen_extend_gap_epi32); aux4 = _mm512_max_epi32(aux4, aux0); maxCol[j] = _mm512_max_epi32(maxCol[j], aux0); // update row buffer row2[j] = current4; // update max score score = _mm512_max_epi32(score,current4); } } // update maxRow maxRow[i] = aux1; maxRow[i+1] = aux2; maxRow[i+2] = aux3; maxRow[i+3] = aux4; // update lastCol lastCol[i] = auxLastCol; lastCol[i+1] = current1; lastCol[i+2] = current2; lastCol[i+3] = current3; auxLastCol = current4; // swap buffers tmp_row = row1; row1 = row2; row2 = tmp_row; } } // store max value _mm512_store_si512 (ptr_scores+bb1*2+bb2,score); } } } } } _mm_free(row1); _mm_free(row2); _mm_free(maxCol); _mm_free(maxRow); _mm_free(lastCol); _mm_free(scoreProfile); } *workTime = dwalltime()-tick; } // search using AVX512BW instructions and Query Profile technique void search_avx512bw_qp (char * query_sequences, unsigned short int * query_sequences_lengths, unsigned long int query_sequences_count, unsigned int * query_disp, char * vect_sequences_db, unsigned short int * vect_sequences_db_lengths, unsigned short int * vect_sequences_db_blocks, unsigned long int vect_sequences_db_count, unsigned long int * vect_sequences_db_disp, char * submat, int open_gap, int extend_gap, int n_threads, int block_size, int * scores, double * workTime){ long int i, j, k; double tick; char *a, * b; unsigned int * a_disp; unsigned long int * b_disp = NULL; unsigned short int * m, *n, *nbbs, sequences_db_max_length, query_sequences_max_length; __m512i * queryProfiles; a = query_sequences; m = query_sequences_lengths; a_disp = query_disp; query_sequences_max_length = query_sequences_lengths[query_sequences_count-1]; sequences_db_max_length = vect_sequences_db_lengths[vect_sequences_db_count-1]; b = vect_sequences_db; n = vect_sequences_db_lengths; nbbs = vect_sequences_db_blocks; b_disp = vect_sequences_db_disp; // allocate memory for query profiles (if correspond) queryProfiles = (__m512i *)_mm_malloc((a_disp[query_sequences_count])*2*sizeof(__m512i), MEMALIGN); tick = dwalltime(); #pragma omp parallel default(none) shared(block_size, a, b, n, nbbs, m, a_disp, b_disp, submat, scores, query_sequences_count, vect_sequences_db_count, \ open_gap, extend_gap, sequences_db_max_length, query_sequences_max_length, queryProfiles) num_threads(n_threads) { __m512i *row1, *row2, *tmp_row, *maxCol, *maxRow, *lastCol, * ptr_scores, * ptr_b_block, *queryProfile; __declspec(align(MEMALIGN)) char * ptr_a, * ptr_b, submatValues[AVX512BW_INT8_VECTOR_LENGTH]; __declspec(align(MEMALIGN)) __m512i score, auxLastCol, *bIndexes_lo, *bIndexes_hi; __declspec(align(MEMALIGN)) __m512i aux0, aux1, aux2, aux3, aux4, aux5, aux6, aux7; __declspec(align(MEMALIGN)) __m512i current1, current2, current3, current4, previous2, previous3, previous4; __declspec(align(MEMALIGN)) __m512i vextend_gap_epi8 = _mm512_set1_epi8(extend_gap), vopen_extend_gap_epi8 = _mm512_set1_epi8(open_gap+extend_gap); __declspec(align(MEMALIGN)) __m512i vextend_gap_epi16 = _mm512_set1_epi16(extend_gap), vopen_extend_gap_epi16 = _mm512_set1_epi16(open_gap+extend_gap); __declspec(align(MEMALIGN)) __m512i vextend_gap_epi32 = _mm512_set1_epi32(extend_gap), vopen_extend_gap_epi32 = _mm512_set1_epi32(open_gap+extend_gap); __declspec(align(MEMALIGN)) __m512i vzero = _mm512_setzero_si512(); // QP __declspec(align(MEMALIGN)) __m512i v15 = _mm512_set1_epi8(15), vneg32 = _mm512_set1_epi8(-32), v16 = _mm512_set1_epi8(16); // overflow detection __declspec(align(MEMALIGN)) __m256i v127 = _mm256_set1_epi8(127), v32767 = _mm256_set1_epi16(32767), aux256[2]; // bias __declspec(align(MEMALIGN)) __m512i v128 = _mm512_set1_epi32(128), v32768 = _mm512_set1_epi32(32768); __declspec(align(MEMALIGN)) __m128i aux128, auxBlosum[2], *tmp; __mmask64 mask; #pragma omp for schedule(dynamic) for (i=0; i< a_disp[query_sequences_count] ; i++) { queryProfiles[i*2] = _mm512_broadcast_i64x2(_mm_loadu_si128((__m128i*)(submat)+a[i]*2)); queryProfiles[i*2+1] = _mm512_broadcast_i64x2(_mm_loadu_si128((__m128i*)(submat)+a[i]*2+1)); } unsigned int i, j, ii, jj, k, disp_1, disp_2, disp_3, disp_4, dim1, dim2, nbb; unsigned long int t, s, q; int overflow_low_flag, overflow_high_flag, bb1, bb2, bb1_start, bb1_end, bb2_start, bb2_end; // allocate memory for auxiliary buffers row1 = (__m512i *) _mm_malloc((block_size+1)*sizeof(__m512i), MEMALIGN); row2 = (__m512i *) _mm_malloc((block_size+1)*sizeof(__m512i), MEMALIGN); maxCol = (__m512i *) _mm_malloc((block_size+1)*sizeof(__m512i), MEMALIGN); maxRow = (__m512i *) _mm_malloc((query_sequences_max_length)*sizeof(__m512i), MEMALIGN); lastCol = (__m512i *) _mm_malloc((query_sequences_max_length)*sizeof(__m512i), MEMALIGN); // alloc memory for indexes bIndexes_lo = (__m512i *) _mm_malloc((block_size)*sizeof(__m512i), MEMALIGN); bIndexes_hi = (__m512i *) _mm_malloc((block_size)*sizeof(__m512i), MEMALIGN); // calculate alignment score #pragma omp for schedule(dynamic) nowait for (t=0; t< query_sequences_count*vect_sequences_db_count; t++) { q = (query_sequences_count-1) - (t % query_sequences_count); s = (vect_sequences_db_count-1) - (t / query_sequences_count); ptr_a = a + a_disp[q]; ptr_b = b + b_disp[s]; ptr_scores = (__m512i *) (scores + (q*vect_sequences_db_count+s)*AVX512BW_INT8_VECTOR_LENGTH); queryProfile = queryProfiles + a_disp[q]*2; // calculate number of blocks nbb = nbbs[s]; // init buffers #pragma unroll(AVX512BW_UNROLL_COUNT) for (i=0; i<m[q] ; i++ ) maxRow[i] = _mm512_set1_epi8(-128); #pragma unroll(AVX512BW_UNROLL_COUNT) for (i=0; i<m[q] ; i++ ) lastCol[i] = _mm512_set1_epi8(-128); // set score to 0 score = _mm512_set1_epi8(-128); for (k=0; k < nbb; k++){ // calculate dim1 disp_4 = k*block_size; dim1 = n[s]-disp_4; dim1 = (block_size < dim1 ? block_size : dim1); // calculate dim2 dim2 = dim1 / DB_SEQ_LEN_MULT; // calculate SP sub-block length disp_1 = dim1*AVX512BW_INT8_VECTOR_LENGTH; // init buffers #pragma unroll(AVX512BW_UNROLL_COUNT) for (i=0; i<dim1+1 ; i++ ) maxCol[i] = _mm512_set1_epi8(-128); #pragma unroll(AVX512BW_UNROLL_COUNT) for (i=0; i<dim1+1 ; i++ ) row1[i] = _mm512_set1_epi8(-128); auxLastCol = _mm512_set1_epi8(-128); // get bIndexes ptr_b_block = (__m512i*)(ptr_b) + disp_4; #pragma unroll(AVX512BW_UNROLL_COUNT) for (i=0; i<dim1 ; i++ ) { // indexes >= 16 bIndexes_hi[i] = _mm512_sub_epi8(ptr_b_block[i], v16); // indexes < 16 mask = _mm512_cmplt_epi8_mask(ptr_b_block[i],v16); bIndexes_lo[i] = _mm512_mask_mov_epi8(vneg32, mask, ptr_b_block[i]); } for( i = 0; i < m[q]; i+=QUERY_SEQ_LEN_MULT){ // update row[0] with lastCol[i-1] row1[0] = lastCol[i]; previous2 = lastCol[i+1]; previous3 = lastCol[i+2]; previous4 = lastCol[i+3]; // store maxRow in auxiliars aux1 = maxRow[i]; aux2 = maxRow[i+1]; aux3 = maxRow[i+2]; aux4 = maxRow[i+3]; for (ii=0; ii<dim2 ; ii++) { #pragma unroll(DB_SEQ_LEN_MULT) for( j=ii*DB_SEQ_LEN_MULT+1, jj=0; jj < DB_SEQ_LEN_MULT; jj++, j++) { //calcuate the scoring matrix value aux5 = _mm512_shuffle_epi8(queryProfile[i*2],bIndexes_lo[j-1]); aux6 = _mm512_shuffle_epi8(queryProfile[i*2+1],bIndexes_hi[j-1]); //calcuate the diagonal value current1 = _mm512_adds_epi8(row1[j-1], _mm512_or_si512(aux5,aux6)); // calculate current1 max value current1 = _mm512_max_epi8(current1, aux1); current1 = _mm512_max_epi8(current1, maxCol[j]); //current1 = _mm512_max_epi8(current1, vzero); // update maxRow and maxCol aux1 = _mm512_subs_epi8(aux1, vextend_gap_epi8); maxCol[j] = _mm512_subs_epi8(maxCol[j], vextend_gap_epi8); aux0 = _mm512_subs_epi8(current1, vopen_extend_gap_epi8); aux1 = _mm512_max_epi8(aux1, aux0); maxCol[j] = _mm512_max_epi8(maxCol[j], aux0); // update max score score = _mm512_max_epi8(score,current1); //calcuate the scoring matrix value aux5 = _mm512_shuffle_epi8(queryProfile[(i+1)*2],bIndexes_lo[j-1]); aux6 = _mm512_shuffle_epi8(queryProfile[(i+1)*2+1],bIndexes_hi[j-1]); //calcuate the diagonal value current2 = _mm512_adds_epi8(previous2, _mm512_or_si512(aux5,aux6)); // update previous previous2 = current1; // calculate current2 max value current2 = _mm512_max_epi8(current2, aux2); current2 = _mm512_max_epi8(current2, maxCol[j]); //current2 = _mm512_max_epi8(current2, vzero); // update maxRow and maxCol aux2 = _mm512_subs_epi8(aux2, vextend_gap_epi8); maxCol[j] = _mm512_subs_epi8(maxCol[j], vextend_gap_epi8); aux0 = _mm512_subs_epi8(current2, vopen_extend_gap_epi8); aux2 = _mm512_max_epi8(aux2, aux0); maxCol[j] = _mm512_max_epi8(maxCol[j], aux0); // update max score score = _mm512_max_epi8(score,current2); //calcuate the scoring matrix value aux5 = _mm512_shuffle_epi8(queryProfile[(i+2)*2],bIndexes_lo[j-1]); aux6 = _mm512_shuffle_epi8(queryProfile[(i+2)*2+1],bIndexes_hi[j-1]); //calcuate the diagonal value current3 = _mm512_adds_epi8(previous3, _mm512_or_si512(aux5,aux6)); // update previous previous3 = current2; // calculate current3 max value current3 = _mm512_max_epi8(current3, aux3); current3 = _mm512_max_epi8(current3, maxCol[j]); //current3 = _mm512_max_epi8(current3, vzero); // update maxRow and maxCol aux3 = _mm512_subs_epi8(aux3, vextend_gap_epi8); maxCol[j] = _mm512_subs_epi8(maxCol[j], vextend_gap_epi8); aux0 = _mm512_subs_epi8(current3, vopen_extend_gap_epi8); aux3 = _mm512_max_epi8(aux3, aux0); maxCol[j] = _mm512_max_epi8(maxCol[j], aux0); // update max score score = _mm512_max_epi8(score,current3); //calcuate the scoring matrix value aux5 = _mm512_shuffle_epi8(queryProfile[(i+3)*2],bIndexes_lo[j-1]); aux6 = _mm512_shuffle_epi8(queryProfile[(i+3)*2+1],bIndexes_hi[j-1]); //calcuate the diagonal value current4 = _mm512_adds_epi8(previous4, _mm512_or_si512(aux5,aux6)); // update previous previous4 = current3; // calculate current4 max value current4 = _mm512_max_epi8(current4, aux4); current4 = _mm512_max_epi8(current4, maxCol[j]); //current4 = _mm512_max_epi8(current4, vzero); // update maxRow and maxCol aux4 = _mm512_subs_epi8(aux4, vextend_gap_epi8); maxCol[j] = _mm512_subs_epi8(maxCol[j], vextend_gap_epi8); aux0 = _mm512_subs_epi8(current4, vopen_extend_gap_epi8); aux4 = _mm512_max_epi8(aux4, aux0); maxCol[j] = _mm512_max_epi8(maxCol[j], aux0); // update row buffer row2[j] = current4; // update max score score = _mm512_max_epi8(score,current4); } } // update maxRow maxRow[i] = aux1; maxRow[i+1] = aux2; maxRow[i+2] = aux3; maxRow[i+3] = aux4; // update lastCol lastCol[i] = auxLastCol; lastCol[i+1] = current1; lastCol[i+2] = current2; lastCol[i+3] = current3; auxLastCol = current4; // swap buffers tmp_row = row1; row1 = row2; row2 = tmp_row; } } // store max value #pragma unroll for (i=0; i < 4; i++){ aux128 = _mm512_extracti32x4_epi32 (score, i); aux1 = _mm512_add_epi32(_mm512_cvtepi8_epi32(aux128),v128); _mm512_store_si512 (ptr_scores+i, aux1); } // overflow detection // low aux256[0] = _mm512_extracti32x8_epi32 (score,0); aux256[1] = _mm256_cmpeq_epi8(aux256[0],v127); overflow_low_flag = _mm256_testz_si256(aux256[1],v127); // high aux256[0] = _mm512_extracti32x8_epi32 (score,1); aux256[1] = _mm256_cmpeq_epi8(aux256[0],v127); overflow_high_flag = _mm256_testz_si256(aux256[1],v127); // if overflow if ((overflow_low_flag == 0) || (overflow_high_flag == 0)){ bb1_start = overflow_low_flag; bb1_end = 2-overflow_high_flag; // recalculate using 16-bit signed integer precision for (bb1=bb1_start; bb1<bb1_end ; bb1++){ // init buffers #pragma unroll(AVX512BW_UNROLL_COUNT) for (i=0; i<m[q] ; i++ ) maxRow[i] = _mm512_set1_epi16(-32768); #pragma unroll(AVX512BW_UNROLL_COUNT) for (i=0; i<m[q] ; i++ ) lastCol[i] = _mm512_set1_epi16(-32768); // set score to 0 score = _mm512_set1_epi16(-32768); disp_2 = bb1*AVX512BW_INT16_VECTOR_LENGTH; for (k=0; k < nbb; k++){ // calculate dim1 disp_4 = k*block_size; dim1 = n[s]-disp_4; dim1 = (block_size < dim1 ? block_size : dim1); // calculate dim2 dim2 = dim1 / DB_SEQ_LEN_MULT; // calculate SP sub-block length disp_1 = dim1*AVX512BW_INT8_VECTOR_LENGTH; // init buffers #pragma unroll(AVX512BW_UNROLL_COUNT) for (i=0; i<dim1+1 ; i++ ) maxCol[i] = _mm512_set1_epi16(-32768); #pragma unroll(AVX512BW_UNROLL_COUNT) for (i=0; i<dim1+1 ; i++ ) row1[i] = _mm512_set1_epi16(-32768); auxLastCol = _mm512_set1_epi16(-32768); // get bIndexes ptr_b_block = (__m512i*)(ptr_b) + disp_4; #pragma unroll(AVX512BW_UNROLL_COUNT) for (i=0; i<dim1 ; i++ ) { // indexes >= 16 bIndexes_hi[i] = _mm512_sub_epi8(ptr_b_block[i], v16); // indexes < 16 mask = _mm512_cmplt_epi8_mask(ptr_b_block[i],v16); bIndexes_lo[i] = _mm512_mask_mov_epi8(vneg32, mask, ptr_b_block[i]); } for( i = 0; i < m[q]; i+=QUERY_SEQ_LEN_MULT){ // update row[0] with lastCol[i-1] row1[0] = lastCol[i]; previous2 = lastCol[i+1]; previous3 = lastCol[i+2]; previous4 = lastCol[i+3]; // store maxRow in auxiliars aux1 = maxRow[i]; aux2 = maxRow[i+1]; aux3 = maxRow[i+2]; aux4 = maxRow[i+3]; for (ii=0; ii<dim2 ; ii++) { #pragma unroll(DB_SEQ_LEN_MULT) for( j=ii*DB_SEQ_LEN_MULT+1, jj=0; jj < DB_SEQ_LEN_MULT; jj++, j++) { //calcuate the scoring matrix value aux5 = _mm512_shuffle_epi8(queryProfile[i*2],bIndexes_lo[j-1]); aux6 = _mm512_shuffle_epi8(queryProfile[i*2+1],bIndexes_hi[j-1]); _mm512_store_si512(submatValues, _mm512_or_si512(aux5,aux6)); //calcuate the diagonal value current1 = _mm512_adds_epi16(row1[j-1], _mm512_cvtepi8_epi16(_mm256_loadu_si256((__m256i *) (submatValues+disp_2)))); // calculate current1 max value current1 = _mm512_max_epi16(current1, aux1); current1 = _mm512_max_epi16(current1, maxCol[j]); //current1 = _mm512_max_epi16(current1, vzero); // update maxRow and maxCol aux1 = _mm512_subs_epi16(aux1, vextend_gap_epi16); maxCol[j] = _mm512_subs_epi16(maxCol[j], vextend_gap_epi16); aux0 = _mm512_subs_epi16(current1, vopen_extend_gap_epi16); aux1 = _mm512_max_epi16(aux1, aux0); maxCol[j] = _mm512_max_epi16(maxCol[j], aux0); // update max score score = _mm512_max_epi16(score,current1); //calcuate the scoring matrix value aux5 = _mm512_shuffle_epi8(queryProfile[(i+1)*2],bIndexes_lo[j-1]); aux6 = _mm512_shuffle_epi8(queryProfile[(i+1)*2+1],bIndexes_hi[j-1]); _mm512_store_si512(submatValues, _mm512_or_si512(aux5,aux6)); //calcuate the diagonal value current2 = _mm512_adds_epi16(previous2, _mm512_cvtepi8_epi16(_mm256_loadu_si256((__m256i *) (submatValues+disp_2)))); // update previous previous2 = current1; // calculate current2 max value current2 = _mm512_max_epi16(current2, aux2); current2 = _mm512_max_epi16(current2, maxCol[j]); //current2 = _mm512_max_epi16(current2, vzero); // update maxRow and maxCol aux2 = _mm512_subs_epi16(aux2, vextend_gap_epi16); maxCol[j] = _mm512_subs_epi16(maxCol[j], vextend_gap_epi16); aux0 = _mm512_subs_epi16(current2, vopen_extend_gap_epi16); aux2 = _mm512_max_epi16(aux2, aux0); maxCol[j] = _mm512_max_epi16(maxCol[j], aux0); // update max score score = _mm512_max_epi16(score,current2); //calcuate the scoring matrix value aux5 = _mm512_shuffle_epi8(queryProfile[(i+2)*2],bIndexes_lo[j-1]); aux6 = _mm512_shuffle_epi8(queryProfile[(i+2)*2+1],bIndexes_hi[j-1]); _mm512_store_si512(submatValues, _mm512_or_si512(aux5,aux6)); //calcuate the diagonal value current3 = _mm512_adds_epi16(previous3, _mm512_cvtepi8_epi16(_mm256_loadu_si256((__m256i *) (submatValues+disp_2)))); // update previous previous3 = current2; // calculate current3 max value current3 = _mm512_max_epi16(current3, aux3); current3 = _mm512_max_epi16(current3, maxCol[j]); //current3 = _mm512_max_epi16(current3, vzero); // update maxRow and maxCol aux3 = _mm512_subs_epi16(aux3, vextend_gap_epi16); maxCol[j] = _mm512_subs_epi16(maxCol[j], vextend_gap_epi16); aux0 = _mm512_subs_epi16(current3, vopen_extend_gap_epi16); aux3 = _mm512_max_epi16(aux3, aux0); maxCol[j] = _mm512_max_epi16(maxCol[j], aux0); // update max score score = _mm512_max_epi16(score,current3); //calcuate the scoring matrix value aux5 = _mm512_shuffle_epi8(queryProfile[(i+3)*2],bIndexes_lo[j-1]); aux6 = _mm512_shuffle_epi8(queryProfile[(i+3)*2+1],bIndexes_hi[j-1]); _mm512_store_si512(submatValues, _mm512_or_si512(aux5,aux6)); //calcuate the diagonal value current4 = _mm512_adds_epi16(previous4, _mm512_cvtepi8_epi16(_mm256_loadu_si256((__m256i *) (submatValues+disp_2)))); // update previous previous4 = current3; // calculate current4 max value current4 = _mm512_max_epi16(current4, aux4); current4 = _mm512_max_epi16(current4, maxCol[j]); //current4 = _mm512_max_epi16(current4, vzero); // update maxRow and maxCol aux4 = _mm512_subs_epi16(aux4, vextend_gap_epi16); maxCol[j] = _mm512_subs_epi16(maxCol[j], vextend_gap_epi16); aux0 = _mm512_subs_epi16(current4, vopen_extend_gap_epi16); aux4 = _mm512_max_epi16(aux4, aux0); maxCol[j] = _mm512_max_epi16(maxCol[j], aux0); // update row buffer row2[j] = current4; // update max score score = _mm512_max_epi16(score,current4); } } // update maxRow maxRow[i] = aux1; maxRow[i+1] = aux2; maxRow[i+2] = aux3; maxRow[i+3] = aux4; // update lastCol lastCol[i] = auxLastCol; lastCol[i+1] = current1; lastCol[i+2] = current2; lastCol[i+3] = current3; auxLastCol = current4; // swap buffers tmp_row = row1; row1 = row2; row2 = tmp_row; } } // store max value (low) aux256[0] = _mm512_extracti32x8_epi32 (score,0); aux1 = _mm512_add_epi32(_mm512_cvtepi16_epi32(aux256[0]),v32768); _mm512_store_si512 (ptr_scores+bb1*2,aux1); // check overflow (low) aux256[1] = _mm256_cmpeq_epi16(aux256[0],v32767); overflow_low_flag = _mm256_testz_si256(aux256[1],v127); // store max value (high) aux256[0] = _mm512_extracti32x8_epi32 (score,1); aux1 = _mm512_add_epi32(_mm512_cvtepi16_epi32(aux256[0]),v32768); _mm512_store_si512 (ptr_scores+bb1*2+1,aux1); // check overflow (high) aux256[1] = _mm256_cmpeq_epi16(aux256[0],v32767); overflow_high_flag = _mm256_testz_si256(aux256[1],v32767); // if overflow if ((overflow_low_flag == 0) || (overflow_high_flag == 0)){ // check overflow in low 16 bits bb2_start = overflow_low_flag; // check overflow in high 16 bits bb2_end = 2 - overflow_high_flag; // recalculate using 32-bit signed integer precision for (bb2=bb2_start; bb2<bb2_end ; bb2++){ // init buffers #pragma unroll(AVX512BW_UNROLL_COUNT) for (i=0; i<m[q] ; i++ ) maxRow[i] = _mm512_setzero_si512(); #pragma unroll(AVX512BW_UNROLL_COUNT) for (i=0; i<m[q] ; i++ ) lastCol[i] = _mm512_setzero_si512(); // set score to 0 score = _mm512_setzero_si512(); disp_3 = disp_2 + bb2*AVX512BW_INT32_VECTOR_LENGTH; for (k=0; k < nbb; k++){ // calculate dim1 disp_4 = k*block_size; dim1 = n[s]-disp_4; dim1 = (block_size < dim1 ? block_size : dim1); // calculate dim2 dim2 = dim1 / DB_SEQ_LEN_MULT; // calculate SP sub-block length disp_1 = dim1*AVX512BW_INT8_VECTOR_LENGTH; // init buffers #pragma unroll(AVX512BW_UNROLL_COUNT) for (i=0; i<dim1+1 ; i++ ) maxCol[i] = _mm512_setzero_si512(); #pragma unroll(AVX512BW_UNROLL_COUNT) for (i=0; i<dim1+1 ; i++ ) row1[i] = _mm512_setzero_si512(); auxLastCol = _mm512_setzero_si512(); // get bIndexes ptr_b_block = (__m512i*)(ptr_b) + disp_4; #pragma unroll(AVX512BW_UNROLL_COUNT) for (i=0; i<dim1 ; i++ ) { // indexes >= 16 bIndexes_hi[i] = _mm512_sub_epi8(ptr_b_block[i], v16); // indexes < 16 mask = _mm512_cmplt_epi8_mask(ptr_b_block[i],v16); bIndexes_lo[i] = _mm512_mask_mov_epi8(vneg32, mask, ptr_b_block[i]); } for( i = 0; i < m[q]; i+=QUERY_SEQ_LEN_MULT){ // update row[0] with lastCol[i-1] row1[0] = lastCol[i]; previous2 = lastCol[i+1]; previous3 = lastCol[i+2]; previous4 = lastCol[i+3]; // store maxRow in auxiliars aux1 = maxRow[i]; aux2 = maxRow[i+1]; aux3 = maxRow[i+2]; aux4 = maxRow[i+3]; for (ii=0; ii<dim2 ; ii++) { #pragma unroll(DB_SEQ_LEN_MULT) for( j=ii*DB_SEQ_LEN_MULT+1, jj=0; jj < DB_SEQ_LEN_MULT; jj++, j++) { //calcuate the scoring matrix value aux5 = _mm512_shuffle_epi8(queryProfile[(i)*2],bIndexes_lo[j-1]); aux6 = _mm512_shuffle_epi8(queryProfile[(i)*2+1],bIndexes_hi[j-1]); _mm512_store_si512(submatValues, _mm512_or_si512(aux5,aux6)); //calcuate the diagonal value current1 = _mm512_add_epi32(row1[j-1], _mm512_cvtepi8_epi32(_mm_loadu_si128((__m128i *) (submatValues+disp_3)))); // update previous previous2 = current1; // calculate current1 max value current1 = _mm512_max_epi32(current1, aux1); current1 = _mm512_max_epi32(current1, maxCol[j]); current1 = _mm512_max_epi32(current1, vzero); // update maxRow and maxCol aux1 = _mm512_sub_epi32(aux1, vextend_gap_epi32); maxCol[j] = _mm512_sub_epi32(maxCol[j], vextend_gap_epi32); aux0 = _mm512_sub_epi32(current1, vopen_extend_gap_epi32); aux1 = _mm512_max_epi32(aux1, aux0); maxCol[j] = _mm512_max_epi32(maxCol[j], aux0); // update max score score = _mm512_max_epi32(score,current1); //calcuate the scoring matrix value aux5 = _mm512_shuffle_epi8(queryProfile[(i+1)*2],bIndexes_lo[j-1]); aux6 = _mm512_shuffle_epi8(queryProfile[(i+1)*2+1],bIndexes_hi[j-1]); _mm512_store_si512(submatValues, _mm512_or_si512(aux5,aux6)); //calcuate the diagonal value current2 = _mm512_add_epi32(previous2, _mm512_cvtepi8_epi32(_mm_loadu_si128((__m128i *) (submatValues+disp_3)))); // update previous previous2 = current1; // calculate current2 max value current2 = _mm512_max_epi32(current2, aux2); current2 = _mm512_max_epi32(current2, maxCol[j]); current2 = _mm512_max_epi32(current2, vzero); // update maxRow and maxCol aux2 = _mm512_sub_epi32(aux2, vextend_gap_epi32); maxCol[j] = _mm512_sub_epi32(maxCol[j], vextend_gap_epi32); aux0 = _mm512_sub_epi32(current2, vopen_extend_gap_epi32); aux2 = _mm512_max_epi32(aux2, aux0); maxCol[j] = _mm512_max_epi32(maxCol[j], aux0); // update max score score = _mm512_max_epi32(score,current2); //calcuate the scoring matrix value aux5 = _mm512_shuffle_epi8(queryProfile[(i+2)*2],bIndexes_lo[j-1]); aux6 = _mm512_shuffle_epi8(queryProfile[(i+2)*2+1],bIndexes_hi[j-1]); _mm512_store_si512(submatValues, _mm512_or_si512(aux5,aux6)); //calcuate the diagonal value current3 = _mm512_add_epi32(previous3, _mm512_cvtepi8_epi32(_mm_loadu_si128((__m128i *) (submatValues+disp_3)))); // update previous previous3 = current2; // calculate current3 max value current3 = _mm512_max_epi32(current3, aux3); current3 = _mm512_max_epi32(current3, maxCol[j]); current3 = _mm512_max_epi32(current3, vzero); // update maxRow and maxCol aux3 = _mm512_sub_epi32(aux3, vextend_gap_epi32); maxCol[j] = _mm512_sub_epi32(maxCol[j], vextend_gap_epi32); aux0 = _mm512_sub_epi32(current3, vopen_extend_gap_epi32); aux3 = _mm512_max_epi32(aux3, aux0); maxCol[j] = _mm512_max_epi32(maxCol[j], aux0); // update max score score = _mm512_max_epi32(score,current3); //calcuate the scoring matrix value aux5 = _mm512_shuffle_epi8(queryProfile[(i+3)*2],bIndexes_lo[j-1]); aux6 = _mm512_shuffle_epi8(queryProfile[(i+3)*2+1],bIndexes_hi[j-1]); _mm512_store_si512(submatValues, _mm512_or_si512(aux5,aux6)); //calcuate the diagonal value current4 = _mm512_add_epi32(previous4, _mm512_cvtepi8_epi32(_mm_loadu_si128((__m128i *) (submatValues+disp_3)))); // update previous previous4 = current3; // calculate current4 max value current4 = _mm512_max_epi32(current4, aux4); current4 = _mm512_max_epi32(current4, maxCol[j]); current4 = _mm512_max_epi32(current4, vzero); // update maxRow and maxCol aux4 = _mm512_sub_epi32(aux4, vextend_gap_epi32); maxCol[j] = _mm512_sub_epi32(maxCol[j], vextend_gap_epi32); aux0 = _mm512_sub_epi32(current4, vopen_extend_gap_epi32); aux4 = _mm512_max_epi32(aux4, aux0); maxCol[j] = _mm512_max_epi32(maxCol[j], aux0); // update row buffer row2[j] = current4; // update max score score = _mm512_max_epi32(score,current4); } } // update maxRow maxRow[i] = aux1; maxRow[i+1] = aux2; maxRow[i+2] = aux3; maxRow[i+3] = aux4; // update lastCol lastCol[i] = auxLastCol; lastCol[i+1] = current1; lastCol[i+2] = current2; lastCol[i+3] = current3; auxLastCol = current4; // swap buffers tmp_row = row1; row1 = row2; row2 = tmp_row; } } // store max value _mm512_store_si512 (ptr_scores+bb1*2+bb2,score); } } } } } _mm_free(row1); _mm_free(row2); _mm_free(maxCol); _mm_free(maxRow); _mm_free(lastCol); _mm_free(bIndexes_lo);_mm_free(bIndexes_hi); } _mm_free(queryProfiles); *workTime = dwalltime()-tick; }
macro.c
// // Macro kernel from BLIS framework, implementation from ulmBLAS // // // Packing complete panels from A (i.e. without padding) // static void pack_MRxk(int k, const double *A, int incRowA, int incColA, double *buffer) { int i, j; for (j=0; j<k; ++j) { for (i=0; i<MR; ++i) { buffer[i] = A[i*incRowA]; } buffer += MR; A += incColA; } } // // Packing panels from A with padding if required // static void pack_A(int mc, int kc, const double *A, int incRowA, int incColA, double *buffer) { int mp = mc / MR; int _mr = mc % MR; int i, j; for (i=0; i<mp; ++i) { pack_MRxk(kc, A, incRowA, incColA, buffer); buffer += kc*MR; A += MR*incRowA; } if (_mr>0) { for (j=0; j<kc; ++j) { for (i=0; i<_mr; ++i) { buffer[i] = A[i*incRowA]; } for (i=_mr; i<MR; ++i) { buffer[i] = MAX; } buffer += MR; A += incColA; } } } // // Packing complete panels from B (i.e. without padding) // static void pack_kxNR(int k, const double *B, int incRowB, int incColB, double *buffer) { int i, j; for (i=0; i<k; ++i) { for (j=0; j<NR; ++j) { buffer[j] = B[j*incColB]; } buffer += NR; B += incRowB; } } // // Packing panels from B with padding if required // static void pack_B(int kc, int nc, const double *B, int incRowB, int incColB, double *buffer) { int np = nc / NR; int _nr = nc % NR; int i, j; for (j=0; j<np; ++j) { pack_kxNR(kc, B, incRowB, incColB, buffer); buffer += kc*NR; B += NR*incColB; } if (_nr>0) { for (i=0; i<kc; ++i) { for (j=0; j<_nr; ++j) { buffer[j] = B[j*incColB]; } for (j=_nr; j<NR; ++j) { buffer[j] = MAX; } buffer += NR; B += incRowB; } } } // // Compute Y += alpha*X // static void dgeaxpy(int m, int n, const double *X, int incRowX, int incColX, double *Y, int incRowY, int incColY) { int i, j; for (j=0; j<n; ++j) { for (i=0; i<m; ++i) { // omp atomic here? but how with min() and Y on both sides while (Y[i*incRowY+j*incColY] > X[i*incRowX+j*incColX]) Y[i*incRowY+j*incColY] = X[i*incRowX+j*incColX]; } } } // // Compute X *= alpha // // // Macro Kernel for the multiplication of blocks of A and B. We assume that // these blocks were previously packed to buffers _A and _B. // static void dgemm_macro_kernel(int mc, int nc, int kc, double *C, int incRowC, int incColC) { int mp = (mc+MR-1) / MR; int np = (nc+NR-1) / NR; int _mr = mc % MR; int _nr = nc % NR; int mr, nr; int i, j; const double *nextA; const double *nextB; //#pragma omp parallel for default(shared) private(j, i, mr, nr, nextA, nextB) num_threads(8) for (j=0; j<np; ++j) { nr = (j!=np-1 || _nr==0) ? NR : _nr; nextB = &_B[j*kc*NR]; // diagonal blocks could probably run to smaller value for (i=0; i<mp; ++i) { mr = (i!=mp-1 || _mr==0) ? MR : _mr; nextA = &_A[(i+1)*kc*MR]; if (i==mp-1) { nextA = _A; nextB = &_B[(j+1)*kc*NR]; if (j==np-1) { nextB = _B; } } if (mr==MR && nr==NR) { dgemm_micro_kernel(kc, &_A[i*kc*MR], &_B[j*kc*NR], false, //full block &C[i*MR*incRowC+j*NR*incColC], incRowC, incColC, nextA, nextB); } else { // partial block at the edge dgemm_micro_kernel(kc, &_A[i*kc*MR], &_B[j*kc*NR], true, // partial block _C, 1, MR, nextA, nextB); dgeaxpy(mr, nr, _C, 1, MR, &C[i*MR*incRowC+j*NR*incColC], incRowC, incColC); } } } }
wave_para.c
/* Generated by Cython 0.15.1 on Mon Feb 11 15:34:10 2013 */ #define PY_SSIZE_T_CLEAN #include "Python.h" #ifndef Py_PYTHON_H #error Python headers needed to compile C extensions, please install development version of Python. #else #include <stddef.h> /* For offsetof */ #ifndef offsetof #define offsetof(type, member) ( (size_t) & ((type*)0) -> member ) #endif #if !defined(WIN32) && !defined(MS_WINDOWS) #ifndef __stdcall #define __stdcall #endif #ifndef __cdecl #define __cdecl #endif #ifndef __fastcall #define __fastcall #endif #endif #ifndef DL_IMPORT #define DL_IMPORT(t) t #endif #ifndef DL_EXPORT #define DL_EXPORT(t) t #endif #ifndef PY_LONG_LONG #define PY_LONG_LONG LONG_LONG #endif #if PY_VERSION_HEX < 0x02040000 #define METH_COEXIST 0 #define PyDict_CheckExact(op) (Py_TYPE(op) == &PyDict_Type) #define PyDict_Contains(d,o) PySequence_Contains(d,o) #endif #if PY_VERSION_HEX < 0x02050000 typedef int Py_ssize_t; #define PY_SSIZE_T_MAX INT_MAX #define PY_SSIZE_T_MIN INT_MIN #define PY_FORMAT_SIZE_T "" #define PyInt_FromSsize_t(z) PyInt_FromLong(z) #define PyInt_AsSsize_t(o) __Pyx_PyInt_AsInt(o) #define PyNumber_Index(o) PyNumber_Int(o) #define PyIndex_Check(o) PyNumber_Check(o) #define PyErr_WarnEx(category, message, stacklevel) PyErr_Warn(category, message) #endif #if PY_VERSION_HEX < 0x02060000 #define Py_REFCNT(ob) (((PyObject*)(ob))->ob_refcnt) #define Py_TYPE(ob) (((PyObject*)(ob))->ob_type) #define Py_SIZE(ob) (((PyVarObject*)(ob))->ob_size) #define PyVarObject_HEAD_INIT(type, size) \ PyObject_HEAD_INIT(type) size, #define PyType_Modified(t) typedef struct { void *buf; PyObject *obj; Py_ssize_t len; Py_ssize_t itemsize; int readonly; int ndim; char *format; Py_ssize_t *shape; Py_ssize_t *strides; Py_ssize_t *suboffsets; void *internal; } Py_buffer; #define PyBUF_SIMPLE 0 #define PyBUF_WRITABLE 0x0001 #define PyBUF_FORMAT 0x0004 #define PyBUF_ND 0x0008 #define PyBUF_STRIDES (0x0010 | PyBUF_ND) #define PyBUF_C_CONTIGUOUS (0x0020 | PyBUF_STRIDES) #define PyBUF_F_CONTIGUOUS (0x0040 | PyBUF_STRIDES) #define PyBUF_ANY_CONTIGUOUS (0x0080 | PyBUF_STRIDES) #define PyBUF_INDIRECT (0x0100 | PyBUF_STRIDES) #endif #if PY_MAJOR_VERSION < 3 #define __Pyx_BUILTIN_MODULE_NAME "__builtin__" #else #define __Pyx_BUILTIN_MODULE_NAME "builtins" #endif #if PY_MAJOR_VERSION >= 3 #define Py_TPFLAGS_CHECKTYPES 0 #define Py_TPFLAGS_HAVE_INDEX 0 #endif #if (PY_VERSION_HEX < 0x02060000) || (PY_MAJOR_VERSION >= 3) #define Py_TPFLAGS_HAVE_NEWBUFFER 0 #endif #if PY_MAJOR_VERSION >= 3 #define PyBaseString_Type PyUnicode_Type #define PyStringObject PyUnicodeObject #define PyString_Type PyUnicode_Type #define PyString_Check PyUnicode_Check #define PyString_CheckExact PyUnicode_CheckExact #endif #if PY_VERSION_HEX < 0x02060000 #define PyBytesObject PyStringObject #define PyBytes_Type PyString_Type #define PyBytes_Check PyString_Check #define PyBytes_CheckExact PyString_CheckExact #define PyBytes_FromString PyString_FromString #define PyBytes_FromStringAndSize PyString_FromStringAndSize #define PyBytes_FromFormat PyString_FromFormat #define PyBytes_DecodeEscape PyString_DecodeEscape #define PyBytes_AsString PyString_AsString #define PyBytes_AsStringAndSize PyString_AsStringAndSize #define PyBytes_Size PyString_Size #define PyBytes_AS_STRING PyString_AS_STRING #define PyBytes_GET_SIZE PyString_GET_SIZE #define PyBytes_Repr PyString_Repr #define PyBytes_Concat PyString_Concat #define PyBytes_ConcatAndDel PyString_ConcatAndDel #endif #if PY_VERSION_HEX < 0x02060000 #define PySet_Check(obj) PyObject_TypeCheck(obj, &PySet_Type) #define PyFrozenSet_Check(obj) PyObject_TypeCheck(obj, &PyFrozenSet_Type) #endif #ifndef PySet_CheckExact #define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type) #endif #define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type) #if PY_MAJOR_VERSION >= 3 #define PyIntObject PyLongObject #define PyInt_Type PyLong_Type #define PyInt_Check(op) PyLong_Check(op) #define PyInt_CheckExact(op) PyLong_CheckExact(op) #define PyInt_FromString PyLong_FromString #define PyInt_FromUnicode PyLong_FromUnicode #define PyInt_FromLong PyLong_FromLong #define PyInt_FromSize_t PyLong_FromSize_t #define PyInt_FromSsize_t PyLong_FromSsize_t #define PyInt_AsLong PyLong_AsLong #define PyInt_AS_LONG PyLong_AS_LONG #define PyInt_AsSsize_t PyLong_AsSsize_t #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask #endif #if PY_MAJOR_VERSION >= 3 #define PyBoolObject PyLongObject #endif #if PY_VERSION_HEX < 0x03020000 typedef long Py_hash_t; #define __Pyx_PyInt_FromHash_t PyInt_FromLong #define __Pyx_PyInt_AsHash_t PyInt_AsLong #else #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t #define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y) #else #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y) #endif #if (PY_MAJOR_VERSION < 3) || (PY_VERSION_HEX >= 0x03010300) #define __Pyx_PySequence_GetSlice(obj, a, b) PySequence_GetSlice(obj, a, b) #define __Pyx_PySequence_SetSlice(obj, a, b, value) PySequence_SetSlice(obj, a, b, value) #define __Pyx_PySequence_DelSlice(obj, a, b) PySequence_DelSlice(obj, a, b) #else #define __Pyx_PySequence_GetSlice(obj, a, b) (unlikely(!(obj)) ? \ (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), (PyObject*)0) : \ (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_GetSlice(obj, a, b)) : \ (PyErr_Format(PyExc_TypeError, "'%.200s' object is unsliceable", (obj)->ob_type->tp_name), (PyObject*)0))) #define __Pyx_PySequence_SetSlice(obj, a, b, value) (unlikely(!(obj)) ? \ (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), -1) : \ (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_SetSlice(obj, a, b, value)) : \ (PyErr_Format(PyExc_TypeError, "'%.200s' object doesn't support slice assignment", (obj)->ob_type->tp_name), -1))) #define __Pyx_PySequence_DelSlice(obj, a, b) (unlikely(!(obj)) ? \ (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), -1) : \ (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_DelSlice(obj, a, b)) : \ (PyErr_Format(PyExc_TypeError, "'%.200s' object doesn't support slice deletion", (obj)->ob_type->tp_name), -1))) #endif #if PY_MAJOR_VERSION >= 3 #define PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : PyInstanceMethod_New(func)) #endif #if PY_VERSION_HEX < 0x02050000 #define __Pyx_GetAttrString(o,n) PyObject_GetAttrString((o),((char *)(n))) #define __Pyx_SetAttrString(o,n,a) PyObject_SetAttrString((o),((char *)(n)),(a)) #define __Pyx_DelAttrString(o,n) PyObject_DelAttrString((o),((char *)(n))) #else #define __Pyx_GetAttrString(o,n) PyObject_GetAttrString((o),(n)) #define __Pyx_SetAttrString(o,n,a) PyObject_SetAttrString((o),(n),(a)) #define __Pyx_DelAttrString(o,n) PyObject_DelAttrString((o),(n)) #endif #if PY_VERSION_HEX < 0x02050000 #define __Pyx_NAMESTR(n) ((char *)(n)) #define __Pyx_DOCSTR(n) ((char *)(n)) #else #define __Pyx_NAMESTR(n) (n) #define __Pyx_DOCSTR(n) (n) #endif #ifndef __PYX_EXTERN_C #ifdef __cplusplus #define __PYX_EXTERN_C extern "C" #else #define __PYX_EXTERN_C extern #endif #endif #if defined(WIN32) || defined(MS_WINDOWS) #define _USE_MATH_DEFINES #endif #include <math.h> #define __PYX_HAVE__cwave__omp #define __PYX_HAVE_API__cwave__omp #include "stdio.h" #include "stdlib.h" #include "numpy/arrayobject.h" #include "numpy/ufuncobject.h" #ifdef _OPENMP #include <omp.h> #endif /* _OPENMP */ #ifdef PYREX_WITHOUT_ASSERTIONS #define CYTHON_WITHOUT_ASSERTIONS #endif /* inline attribute */ #ifndef CYTHON_INLINE #if defined(__GNUC__) #define CYTHON_INLINE __inline__ #elif defined(_MSC_VER) #define CYTHON_INLINE __inline #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define CYTHON_INLINE inline #else #define CYTHON_INLINE #endif #endif /* unused attribute */ #ifndef CYTHON_UNUSED # if defined(__GNUC__) # if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif # elif defined(__ICC) || defined(__INTEL_COMPILER) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif #endif typedef struct {PyObject **p; char *s; const long n; const char* encoding; const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; /*proto*/ /* Type Conversion Predeclarations */ #define __Pyx_PyBytes_FromUString(s) PyBytes_FromString((char*)s) #define __Pyx_PyBytes_AsUString(s) ((unsigned char*) PyBytes_AsString(s)) #define __Pyx_Owned_Py_None(b) (Py_INCREF(Py_None), Py_None) #define __Pyx_PyBool_FromLong(b) ((b) ? (Py_INCREF(Py_True), Py_True) : (Py_INCREF(Py_False), Py_False)) static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*); static CYTHON_INLINE PyObject* __Pyx_PyNumber_Int(PyObject* x); static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*); static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t); static CYTHON_INLINE size_t __Pyx_PyInt_AsSize_t(PyObject*); #define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x)) #ifdef __GNUC__ /* Test for GCC > 2.95 */ #if __GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)) #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #else /* __GNUC__ > 2 ... */ #define likely(x) (x) #define unlikely(x) (x) #endif /* __GNUC__ > 2 ... */ #else /* __GNUC__ */ #define likely(x) (x) #define unlikely(x) (x) #endif /* __GNUC__ */ static PyObject *__pyx_m; static PyObject *__pyx_b; static PyObject *__pyx_empty_tuple; static PyObject *__pyx_empty_bytes; static int __pyx_lineno; static int __pyx_clineno = 0; static const char * __pyx_cfilenm= __FILE__; static const char *__pyx_filename; #if !defined(CYTHON_CCOMPLEX) #if defined(__cplusplus) #define CYTHON_CCOMPLEX 1 #elif defined(_Complex_I) #define CYTHON_CCOMPLEX 1 #else #define CYTHON_CCOMPLEX 0 #endif #endif #if CYTHON_CCOMPLEX #ifdef __cplusplus #include <complex> #else #include <complex.h> #endif #endif #if CYTHON_CCOMPLEX && !defined(__cplusplus) && defined(__sun__) && defined(__GNUC__) #undef _Complex_I #define _Complex_I 1.0fj #endif static const char *__pyx_f[] = { "wave_para.pyx", "numpy.pxd", }; /* "numpy.pxd":719 * # in Cython to enable them only on the right systems. * * ctypedef npy_int8 int8_t # <<<<<<<<<<<<<< * ctypedef npy_int16 int16_t * ctypedef npy_int32 int32_t */ typedef npy_int8 __pyx_t_5numpy_int8_t; /* "numpy.pxd":720 * * ctypedef npy_int8 int8_t * ctypedef npy_int16 int16_t # <<<<<<<<<<<<<< * ctypedef npy_int32 int32_t * ctypedef npy_int64 int64_t */ typedef npy_int16 __pyx_t_5numpy_int16_t; /* "numpy.pxd":721 * ctypedef npy_int8 int8_t * ctypedef npy_int16 int16_t * ctypedef npy_int32 int32_t # <<<<<<<<<<<<<< * ctypedef npy_int64 int64_t * #ctypedef npy_int96 int96_t */ typedef npy_int32 __pyx_t_5numpy_int32_t; /* "numpy.pxd":722 * ctypedef npy_int16 int16_t * ctypedef npy_int32 int32_t * ctypedef npy_int64 int64_t # <<<<<<<<<<<<<< * #ctypedef npy_int96 int96_t * #ctypedef npy_int128 int128_t */ typedef npy_int64 __pyx_t_5numpy_int64_t; /* "numpy.pxd":726 * #ctypedef npy_int128 int128_t * * ctypedef npy_uint8 uint8_t # <<<<<<<<<<<<<< * ctypedef npy_uint16 uint16_t * ctypedef npy_uint32 uint32_t */ typedef npy_uint8 __pyx_t_5numpy_uint8_t; /* "numpy.pxd":727 * * ctypedef npy_uint8 uint8_t * ctypedef npy_uint16 uint16_t # <<<<<<<<<<<<<< * ctypedef npy_uint32 uint32_t * ctypedef npy_uint64 uint64_t */ typedef npy_uint16 __pyx_t_5numpy_uint16_t; /* "numpy.pxd":728 * ctypedef npy_uint8 uint8_t * ctypedef npy_uint16 uint16_t * ctypedef npy_uint32 uint32_t # <<<<<<<<<<<<<< * ctypedef npy_uint64 uint64_t * #ctypedef npy_uint96 uint96_t */ typedef npy_uint32 __pyx_t_5numpy_uint32_t; /* "numpy.pxd":729 * ctypedef npy_uint16 uint16_t * ctypedef npy_uint32 uint32_t * ctypedef npy_uint64 uint64_t # <<<<<<<<<<<<<< * #ctypedef npy_uint96 uint96_t * #ctypedef npy_uint128 uint128_t */ typedef npy_uint64 __pyx_t_5numpy_uint64_t; /* "numpy.pxd":733 * #ctypedef npy_uint128 uint128_t * * ctypedef npy_float32 float32_t # <<<<<<<<<<<<<< * ctypedef npy_float64 float64_t * #ctypedef npy_float80 float80_t */ typedef npy_float32 __pyx_t_5numpy_float32_t; /* "numpy.pxd":734 * * ctypedef npy_float32 float32_t * ctypedef npy_float64 float64_t # <<<<<<<<<<<<<< * #ctypedef npy_float80 float80_t * #ctypedef npy_float128 float128_t */ typedef npy_float64 __pyx_t_5numpy_float64_t; /* "numpy.pxd":743 * # The int types are mapped a bit surprising -- * # numpy.int corresponds to 'l' and numpy.long to 'q' * ctypedef npy_long int_t # <<<<<<<<<<<<<< * ctypedef npy_longlong long_t * ctypedef npy_longlong longlong_t */ typedef npy_long __pyx_t_5numpy_int_t; /* "numpy.pxd":744 * # numpy.int corresponds to 'l' and numpy.long to 'q' * ctypedef npy_long int_t * ctypedef npy_longlong long_t # <<<<<<<<<<<<<< * ctypedef npy_longlong longlong_t * */ typedef npy_longlong __pyx_t_5numpy_long_t; /* "numpy.pxd":745 * ctypedef npy_long int_t * ctypedef npy_longlong long_t * ctypedef npy_longlong longlong_t # <<<<<<<<<<<<<< * * ctypedef npy_ulong uint_t */ typedef npy_longlong __pyx_t_5numpy_longlong_t; /* "numpy.pxd":747 * ctypedef npy_longlong longlong_t * * ctypedef npy_ulong uint_t # <<<<<<<<<<<<<< * ctypedef npy_ulonglong ulong_t * ctypedef npy_ulonglong ulonglong_t */ typedef npy_ulong __pyx_t_5numpy_uint_t; /* "numpy.pxd":748 * * ctypedef npy_ulong uint_t * ctypedef npy_ulonglong ulong_t # <<<<<<<<<<<<<< * ctypedef npy_ulonglong ulonglong_t * */ typedef npy_ulonglong __pyx_t_5numpy_ulong_t; /* "numpy.pxd":749 * ctypedef npy_ulong uint_t * ctypedef npy_ulonglong ulong_t * ctypedef npy_ulonglong ulonglong_t # <<<<<<<<<<<<<< * * ctypedef npy_intp intp_t */ typedef npy_ulonglong __pyx_t_5numpy_ulonglong_t; /* "numpy.pxd":751 * ctypedef npy_ulonglong ulonglong_t * * ctypedef npy_intp intp_t # <<<<<<<<<<<<<< * ctypedef npy_uintp uintp_t * */ typedef npy_intp __pyx_t_5numpy_intp_t; /* "numpy.pxd":752 * * ctypedef npy_intp intp_t * ctypedef npy_uintp uintp_t # <<<<<<<<<<<<<< * * ctypedef npy_double float_t */ typedef npy_uintp __pyx_t_5numpy_uintp_t; /* "numpy.pxd":754 * ctypedef npy_uintp uintp_t * * ctypedef npy_double float_t # <<<<<<<<<<<<<< * ctypedef npy_double double_t * ctypedef npy_longdouble longdouble_t */ typedef npy_double __pyx_t_5numpy_float_t; /* "numpy.pxd":755 * * ctypedef npy_double float_t * ctypedef npy_double double_t # <<<<<<<<<<<<<< * ctypedef npy_longdouble longdouble_t * */ typedef npy_double __pyx_t_5numpy_double_t; /* "numpy.pxd":756 * ctypedef npy_double float_t * ctypedef npy_double double_t * ctypedef npy_longdouble longdouble_t # <<<<<<<<<<<<<< * * ctypedef npy_cfloat cfloat_t */ typedef npy_longdouble __pyx_t_5numpy_longdouble_t; /* "wave_para.pyx":7 * * DTYPE = np.double * ctypedef np.double_t DTYPE_t # <<<<<<<<<<<<<< * @cython.boundscheck(False) * def wave(double t, double t_stop, double dt, double dx, double dy, np.ndarray[DTYPE_t, ndim=2, negative_indices=False] u, np.ndarray[DTYPE_t, ndim=2, negative_indices=False] um, np.ndarray[DTYPE_t, ndim=2, negative_indices=False] k): */ typedef __pyx_t_5numpy_double_t __pyx_t_10cwave__omp_DTYPE_t; #if CYTHON_CCOMPLEX #ifdef __cplusplus typedef ::std::complex< float > __pyx_t_float_complex; #else typedef float _Complex __pyx_t_float_complex; #endif #else typedef struct { float real, imag; } __pyx_t_float_complex; #endif #if CYTHON_CCOMPLEX #ifdef __cplusplus typedef ::std::complex< double > __pyx_t_double_complex; #else typedef double _Complex __pyx_t_double_complex; #endif #else typedef struct { double real, imag; } __pyx_t_double_complex; #endif /*--- Type declarations ---*/ /* "numpy.pxd":758 * ctypedef npy_longdouble longdouble_t * * ctypedef npy_cfloat cfloat_t # <<<<<<<<<<<<<< * ctypedef npy_cdouble cdouble_t * ctypedef npy_clongdouble clongdouble_t */ typedef npy_cfloat __pyx_t_5numpy_cfloat_t; /* "numpy.pxd":759 * * ctypedef npy_cfloat cfloat_t * ctypedef npy_cdouble cdouble_t # <<<<<<<<<<<<<< * ctypedef npy_clongdouble clongdouble_t * */ typedef npy_cdouble __pyx_t_5numpy_cdouble_t; /* "numpy.pxd":760 * ctypedef npy_cfloat cfloat_t * ctypedef npy_cdouble cdouble_t * ctypedef npy_clongdouble clongdouble_t # <<<<<<<<<<<<<< * * ctypedef npy_cdouble complex_t */ typedef npy_clongdouble __pyx_t_5numpy_clongdouble_t; /* "numpy.pxd":762 * ctypedef npy_clongdouble clongdouble_t * * ctypedef npy_cdouble complex_t # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew1(a): */ typedef npy_cdouble __pyx_t_5numpy_complex_t; #ifndef CYTHON_REFNANNY #define CYTHON_REFNANNY 0 #endif #if CYTHON_REFNANNY typedef struct { void (*INCREF)(void*, PyObject*, int); void (*DECREF)(void*, PyObject*, int); void (*GOTREF)(void*, PyObject*, int); void (*GIVEREF)(void*, PyObject*, int); void* (*SetupContext)(const char*, int, const char*); void (*FinishContext)(void**); } __Pyx_RefNannyAPIStruct; static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL; static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); /*proto*/ #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL; #define __Pyx_RefNannySetupContext(name) __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__) #define __Pyx_RefNannyFinishContext() __Pyx_RefNanny->FinishContext(&__pyx_refnanny) #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0) #define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0) #define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0) #define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0) #else #define __Pyx_RefNannyDeclarations #define __Pyx_RefNannySetupContext(name) #define __Pyx_RefNannyFinishContext() #define __Pyx_INCREF(r) Py_INCREF(r) #define __Pyx_DECREF(r) Py_DECREF(r) #define __Pyx_GOTREF(r) #define __Pyx_GIVEREF(r) #define __Pyx_XINCREF(r) Py_XINCREF(r) #define __Pyx_XDECREF(r) Py_XDECREF(r) #define __Pyx_XGOTREF(r) #define __Pyx_XGIVEREF(r) #endif /* CYTHON_REFNANNY */ static PyObject *__Pyx_GetName(PyObject *dict, PyObject *name); /*proto*/ static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); /*proto*/ static void __Pyx_RaiseDoubleKeywordsError( const char* func_name, PyObject* kw_name); /*proto*/ static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[], PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, const char* function_name); /*proto*/ static int __Pyx_ArgTypeTest(PyObject *obj, PyTypeObject *type, int none_allowed, const char *name, int exact); /*proto*/ /* Run-time type information about structs used with buffers */ struct __Pyx_StructField_; typedef struct { const char* name; /* for error messages only */ struct __Pyx_StructField_* fields; size_t size; /* sizeof(type) */ char typegroup; /* _R_eal, _C_omplex, Signed _I_nt, _U_nsigned int, _S_truct, _P_ointer, _O_bject */ } __Pyx_TypeInfo; typedef struct __Pyx_StructField_ { __Pyx_TypeInfo* type; const char* name; size_t offset; } __Pyx_StructField; typedef struct { __Pyx_StructField* field; size_t parent_offset; } __Pyx_BufFmt_StackElem; static CYTHON_INLINE int __Pyx_GetBufferAndValidate(Py_buffer* buf, PyObject* obj, __Pyx_TypeInfo* dtype, int flags, int nd, int cast, __Pyx_BufFmt_StackElem* stack); static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info); static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type); /*proto*/ static void __Pyx_RaiseBufferFallbackError(void); /*proto*/ static CYTHON_INLINE void __Pyx_ErrRestore(PyObject *type, PyObject *value, PyObject *tb); /*proto*/ static CYTHON_INLINE void __Pyx_ErrFetch(PyObject **type, PyObject **value, PyObject **tb); /*proto*/ #include <string.h> void __pyx_init_nan(void); static float __PYX_NAN; #define __Pyx_BufPtrStrided2d(type, buf, i0, s0, i1, s1) (type)((char*)buf + i0 * s0 + i1 * s1) static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause); /*proto*/ static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index); static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected); static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void); static void __Pyx_UnpackTupleError(PyObject *, Py_ssize_t index); /*proto*/ #if PY_MAJOR_VERSION < 3 static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags); static void __Pyx_ReleaseBuffer(Py_buffer *view); #else #define __Pyx_GetBuffer PyObject_GetBuffer #define __Pyx_ReleaseBuffer PyBuffer_Release #endif Py_ssize_t __Pyx_zeros[] = {0, 0}; Py_ssize_t __Pyx_minusones[] = {-1, -1}; static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, long level); /*proto*/ static CYTHON_INLINE PyObject *__Pyx_PyInt_to_py_Py_intptr_t(Py_intptr_t); #ifndef __PYX_FORCE_INIT_THREADS #if PY_VERSION_HEX < 0x02040200 #define __PYX_FORCE_INIT_THREADS 1 #else #define __PYX_FORCE_INIT_THREADS 0 #endif #endif #if CYTHON_CCOMPLEX #ifdef __cplusplus #define __Pyx_CREAL(z) ((z).real()) #define __Pyx_CIMAG(z) ((z).imag()) #else #define __Pyx_CREAL(z) (__real__(z)) #define __Pyx_CIMAG(z) (__imag__(z)) #endif #else #define __Pyx_CREAL(z) ((z).real) #define __Pyx_CIMAG(z) ((z).imag) #endif #if defined(_WIN32) && defined(__cplusplus) && CYTHON_CCOMPLEX #define __Pyx_SET_CREAL(z,x) ((z).real(x)) #define __Pyx_SET_CIMAG(z,y) ((z).imag(y)) #else #define __Pyx_SET_CREAL(z,x) __Pyx_CREAL(z) = (x) #define __Pyx_SET_CIMAG(z,y) __Pyx_CIMAG(z) = (y) #endif static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float, float); #if CYTHON_CCOMPLEX #define __Pyx_c_eqf(a, b) ((a)==(b)) #define __Pyx_c_sumf(a, b) ((a)+(b)) #define __Pyx_c_difff(a, b) ((a)-(b)) #define __Pyx_c_prodf(a, b) ((a)*(b)) #define __Pyx_c_quotf(a, b) ((a)/(b)) #define __Pyx_c_negf(a) (-(a)) #ifdef __cplusplus #define __Pyx_c_is_zerof(z) ((z)==(float)0) #define __Pyx_c_conjf(z) (::std::conj(z)) #if 1 #define __Pyx_c_absf(z) (::std::abs(z)) #define __Pyx_c_powf(a, b) (::std::pow(a, b)) #endif #else #define __Pyx_c_is_zerof(z) ((z)==0) #define __Pyx_c_conjf(z) (conjf(z)) #if 1 #define __Pyx_c_absf(z) (cabsf(z)) #define __Pyx_c_powf(a, b) (cpowf(a, b)) #endif #endif #else static CYTHON_INLINE int __Pyx_c_eqf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sumf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_difff(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prodf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quotf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_negf(__pyx_t_float_complex); static CYTHON_INLINE int __Pyx_c_is_zerof(__pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conjf(__pyx_t_float_complex); #if 1 static CYTHON_INLINE float __Pyx_c_absf(__pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_powf(__pyx_t_float_complex, __pyx_t_float_complex); #endif #endif static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double, double); #if CYTHON_CCOMPLEX #define __Pyx_c_eq(a, b) ((a)==(b)) #define __Pyx_c_sum(a, b) ((a)+(b)) #define __Pyx_c_diff(a, b) ((a)-(b)) #define __Pyx_c_prod(a, b) ((a)*(b)) #define __Pyx_c_quot(a, b) ((a)/(b)) #define __Pyx_c_neg(a) (-(a)) #ifdef __cplusplus #define __Pyx_c_is_zero(z) ((z)==(double)0) #define __Pyx_c_conj(z) (::std::conj(z)) #if 1 #define __Pyx_c_abs(z) (::std::abs(z)) #define __Pyx_c_pow(a, b) (::std::pow(a, b)) #endif #else #define __Pyx_c_is_zero(z) ((z)==0) #define __Pyx_c_conj(z) (conj(z)) #if 1 #define __Pyx_c_abs(z) (cabs(z)) #define __Pyx_c_pow(a, b) (cpow(a, b)) #endif #endif #else static CYTHON_INLINE int __Pyx_c_eq(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg(__pyx_t_double_complex); static CYTHON_INLINE int __Pyx_c_is_zero(__pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj(__pyx_t_double_complex); #if 1 static CYTHON_INLINE double __Pyx_c_abs(__pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow(__pyx_t_double_complex, __pyx_t_double_complex); #endif #endif static CYTHON_INLINE unsigned char __Pyx_PyInt_AsUnsignedChar(PyObject *); static CYTHON_INLINE unsigned short __Pyx_PyInt_AsUnsignedShort(PyObject *); static CYTHON_INLINE unsigned int __Pyx_PyInt_AsUnsignedInt(PyObject *); static CYTHON_INLINE char __Pyx_PyInt_AsChar(PyObject *); static CYTHON_INLINE short __Pyx_PyInt_AsShort(PyObject *); static CYTHON_INLINE int __Pyx_PyInt_AsInt(PyObject *); static CYTHON_INLINE signed char __Pyx_PyInt_AsSignedChar(PyObject *); static CYTHON_INLINE signed short __Pyx_PyInt_AsSignedShort(PyObject *); static CYTHON_INLINE signed int __Pyx_PyInt_AsSignedInt(PyObject *); static CYTHON_INLINE int __Pyx_PyInt_AsLongDouble(PyObject *); static CYTHON_INLINE unsigned long __Pyx_PyInt_AsUnsignedLong(PyObject *); static CYTHON_INLINE unsigned PY_LONG_LONG __Pyx_PyInt_AsUnsignedLongLong(PyObject *); static CYTHON_INLINE long __Pyx_PyInt_AsLong(PyObject *); static CYTHON_INLINE PY_LONG_LONG __Pyx_PyInt_AsLongLong(PyObject *); static CYTHON_INLINE signed long __Pyx_PyInt_AsSignedLong(PyObject *); static CYTHON_INLINE signed PY_LONG_LONG __Pyx_PyInt_AsSignedLongLong(PyObject *); static int __Pyx_check_binary_version(void); static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, size_t size, int strict); /*proto*/ static PyObject *__Pyx_ImportModule(const char *name); /*proto*/ static void __Pyx_AddTraceback(const char *funcname, int __pyx_clineno, int __pyx_lineno, const char *__pyx_filename); /*proto*/ static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); /*proto*/ /* Module declarations from 'cpython.buffer' */ /* Module declarations from 'cpython.ref' */ /* Module declarations from 'libc.stdio' */ /* Module declarations from 'cpython.object' */ /* Module declarations from 'libc.stdlib' */ /* Module declarations from 'numpy' */ /* Module declarations from 'numpy' */ static PyTypeObject *__pyx_ptype_5numpy_dtype = 0; static PyTypeObject *__pyx_ptype_5numpy_flatiter = 0; static PyTypeObject *__pyx_ptype_5numpy_broadcast = 0; static PyTypeObject *__pyx_ptype_5numpy_ndarray = 0; static PyTypeObject *__pyx_ptype_5numpy_ufunc = 0; static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew1(PyObject *); /*proto*/ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew2(PyObject *, PyObject *); /*proto*/ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew3(PyObject *, PyObject *, PyObject *); /*proto*/ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew4(PyObject *, PyObject *, PyObject *, PyObject *); /*proto*/ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew5(PyObject *, PyObject *, PyObject *, PyObject *, PyObject *); /*proto*/ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *, char *, char *, int *); /*proto*/ static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *, PyObject *); /*proto*/ static CYTHON_INLINE PyObject *__pyx_f_5numpy_get_array_base(PyArrayObject *); /*proto*/ /* Module declarations from 'cython.cython.view' */ /* Module declarations from 'cython' */ /* Module declarations from 'cwave__omp' */ static __Pyx_TypeInfo __Pyx_TypeInfo_nn___pyx_t_10cwave__omp_DTYPE_t = { "DTYPE_t", NULL, sizeof(__pyx_t_10cwave__omp_DTYPE_t), 'R' }; #define __Pyx_MODULE_NAME "cwave__omp" int __pyx_module_is_main_cwave__omp = 0; /* Implementation of 'cwave__omp' */ static PyObject *__pyx_builtin_xrange; static PyObject *__pyx_builtin_ValueError; static PyObject *__pyx_builtin_range; static PyObject *__pyx_builtin_RuntimeError; static char __pyx_k_1[] = "ndarray is not C contiguous"; static char __pyx_k_3[] = "ndarray is not Fortran contiguous"; static char __pyx_k_5[] = "Non-native byte order not supported"; static char __pyx_k_7[] = "unknown dtype code in numpy.pxd (%d)"; static char __pyx_k_8[] = "Format string allocated too short, see comment in numpy.pxd"; static char __pyx_k_11[] = "Format string allocated too short."; static char __pyx_k__B[] = "B"; static char __pyx_k__H[] = "H"; static char __pyx_k__I[] = "I"; static char __pyx_k__L[] = "L"; static char __pyx_k__O[] = "O"; static char __pyx_k__Q[] = "Q"; static char __pyx_k__b[] = "b"; static char __pyx_k__d[] = "d"; static char __pyx_k__f[] = "f"; static char __pyx_k__g[] = "g"; static char __pyx_k__h[] = "h"; static char __pyx_k__i[] = "i"; static char __pyx_k__k[] = "k"; static char __pyx_k__l[] = "l"; static char __pyx_k__q[] = "q"; static char __pyx_k__t[] = "t"; static char __pyx_k__u[] = "u"; static char __pyx_k__Zd[] = "Zd"; static char __pyx_k__Zf[] = "Zf"; static char __pyx_k__Zg[] = "Zg"; static char __pyx_k__dt[] = "dt"; static char __pyx_k__dx[] = "dx"; static char __pyx_k__dy[] = "dy"; static char __pyx_k__np[] = "np"; static char __pyx_k__um[] = "um"; static char __pyx_k__wave[] = "wave"; static char __pyx_k__DTYPE[] = "DTYPE"; static char __pyx_k__numpy[] = "numpy"; static char __pyx_k__range[] = "range"; static char __pyx_k__zeros[] = "zeros"; static char __pyx_k__double[] = "double"; static char __pyx_k__t_stop[] = "t_stop"; static char __pyx_k__xrange[] = "xrange"; static char __pyx_k____main__[] = "__main__"; static char __pyx_k____test__[] = "__test__"; static char __pyx_k__ValueError[] = "ValueError"; static char __pyx_k__cwave__omp[] = "cwave__omp"; static char __pyx_k__calculate_u[] = "calculate_u"; static char __pyx_k__RuntimeError[] = "RuntimeError"; static PyObject *__pyx_kp_u_1; static PyObject *__pyx_kp_u_11; static PyObject *__pyx_kp_u_3; static PyObject *__pyx_kp_u_5; static PyObject *__pyx_kp_u_7; static PyObject *__pyx_kp_u_8; static PyObject *__pyx_n_s__DTYPE; static PyObject *__pyx_n_s__RuntimeError; static PyObject *__pyx_n_s__ValueError; static PyObject *__pyx_n_s____main__; static PyObject *__pyx_n_s____test__; static PyObject *__pyx_n_s__calculate_u; static PyObject *__pyx_n_s__cwave__omp; static PyObject *__pyx_n_s__double; static PyObject *__pyx_n_s__dt; static PyObject *__pyx_n_s__dx; static PyObject *__pyx_n_s__dy; static PyObject *__pyx_n_s__k; static PyObject *__pyx_n_s__np; static PyObject *__pyx_n_s__numpy; static PyObject *__pyx_n_s__range; static PyObject *__pyx_n_s__t; static PyObject *__pyx_n_s__t_stop; static PyObject *__pyx_n_s__u; static PyObject *__pyx_n_s__um; static PyObject *__pyx_n_s__wave; static PyObject *__pyx_n_s__xrange; static PyObject *__pyx_n_s__zeros; static PyObject *__pyx_int_15; static PyObject *__pyx_k_tuple_2; static PyObject *__pyx_k_tuple_4; static PyObject *__pyx_k_tuple_6; static PyObject *__pyx_k_tuple_9; static PyObject *__pyx_k_tuple_10; static PyObject *__pyx_k_tuple_12; /* "wave_para.pyx":9 * ctypedef np.double_t DTYPE_t * @cython.boundscheck(False) * def wave(double t, double t_stop, double dt, double dx, double dy, np.ndarray[DTYPE_t, ndim=2, negative_indices=False] u, np.ndarray[DTYPE_t, ndim=2, negative_indices=False] um, np.ndarray[DTYPE_t, ndim=2, negative_indices=False] k): # <<<<<<<<<<<<<< * cdef np.ndarray[DTYPE_t, ndim=2, negative_indices=False] new_u * while t <= t_stop: */ static PyObject *__pyx_pf_10cwave__omp_wave(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_10cwave__omp_wave = {__Pyx_NAMESTR("wave"), (PyCFunction)__pyx_pf_10cwave__omp_wave, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(0)}; static PyObject *__pyx_pf_10cwave__omp_wave(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { double __pyx_v_t; double __pyx_v_t_stop; double __pyx_v_dt; double __pyx_v_dx; double __pyx_v_dy; PyArrayObject *__pyx_v_u = 0; PyArrayObject *__pyx_v_um = 0; PyArrayObject *__pyx_v_k = 0; PyArrayObject *__pyx_v_new_u = 0; Py_buffer __pyx_bstruct_new_u; Py_ssize_t __pyx_bstride_0_new_u = 0; Py_ssize_t __pyx_bstride_1_new_u = 0; Py_ssize_t __pyx_bshape_0_new_u = 0; Py_ssize_t __pyx_bshape_1_new_u = 0; Py_buffer __pyx_bstruct_k; Py_ssize_t __pyx_bstride_0_k = 0; Py_ssize_t __pyx_bstride_1_k = 0; Py_ssize_t __pyx_bshape_0_k = 0; Py_ssize_t __pyx_bshape_1_k = 0; Py_buffer __pyx_bstruct_um; Py_ssize_t __pyx_bstride_0_um = 0; Py_ssize_t __pyx_bstride_1_um = 0; Py_ssize_t __pyx_bshape_0_um = 0; Py_ssize_t __pyx_bshape_1_um = 0; Py_buffer __pyx_bstruct_u; Py_ssize_t __pyx_bstride_0_u = 0; Py_ssize_t __pyx_bstride_1_u = 0; Py_ssize_t __pyx_bshape_0_u = 0; Py_ssize_t __pyx_bshape_1_u = 0; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyArrayObject *__pyx_t_7 = NULL; int __pyx_t_8; PyObject *__pyx_t_9 = NULL; PyObject *__pyx_t_10 = NULL; PyObject *__pyx_t_11 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__t,&__pyx_n_s__t_stop,&__pyx_n_s__dt,&__pyx_n_s__dx,&__pyx_n_s__dy,&__pyx_n_s__u,&__pyx_n_s__um,&__pyx_n_s__k,0}; __Pyx_RefNannySetupContext("wave"); __pyx_self = __pyx_self; { PyObject* values[8] = {0,0,0,0,0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; switch (PyTuple_GET_SIZE(__pyx_args)) { case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7); case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (PyTuple_GET_SIZE(__pyx_args)) { case 0: values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__t); if (likely(values[0])) kw_args--; else goto __pyx_L5_argtuple_error; case 1: values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__t_stop); if (likely(values[1])) kw_args--; else { __Pyx_RaiseArgtupleInvalid("wave", 1, 8, 8, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 9; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__dt); if (likely(values[2])) kw_args--; else { __Pyx_RaiseArgtupleInvalid("wave", 1, 8, 8, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 9; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 3: values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__dx); if (likely(values[3])) kw_args--; else { __Pyx_RaiseArgtupleInvalid("wave", 1, 8, 8, 3); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 9; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 4: values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__dy); if (likely(values[4])) kw_args--; else { __Pyx_RaiseArgtupleInvalid("wave", 1, 8, 8, 4); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 9; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 5: values[5] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__u); if (likely(values[5])) kw_args--; else { __Pyx_RaiseArgtupleInvalid("wave", 1, 8, 8, 5); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 9; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 6: values[6] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__um); if (likely(values[6])) kw_args--; else { __Pyx_RaiseArgtupleInvalid("wave", 1, 8, 8, 6); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 9; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 7: values[7] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__k); if (likely(values[7])) kw_args--; else { __Pyx_RaiseArgtupleInvalid("wave", 1, 8, 8, 7); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 9; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, PyTuple_GET_SIZE(__pyx_args), "wave") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 9; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 8) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[4] = PyTuple_GET_ITEM(__pyx_args, 4); values[5] = PyTuple_GET_ITEM(__pyx_args, 5); values[6] = PyTuple_GET_ITEM(__pyx_args, 6); values[7] = PyTuple_GET_ITEM(__pyx_args, 7); } __pyx_v_t = __pyx_PyFloat_AsDouble(values[0]); if (unlikely((__pyx_v_t == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 9; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_t_stop = __pyx_PyFloat_AsDouble(values[1]); if (unlikely((__pyx_v_t_stop == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 9; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_dt = __pyx_PyFloat_AsDouble(values[2]); if (unlikely((__pyx_v_dt == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 9; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_dx = __pyx_PyFloat_AsDouble(values[3]); if (unlikely((__pyx_v_dx == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 9; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_dy = __pyx_PyFloat_AsDouble(values[4]); if (unlikely((__pyx_v_dy == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 9; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_u = ((PyArrayObject *)values[5]); __pyx_v_um = ((PyArrayObject *)values[6]); __pyx_v_k = ((PyArrayObject *)values[7]); } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("wave", 1, 8, 8, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 9; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("cwave__omp.wave", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __Pyx_INCREF((PyObject *)__pyx_v_u); __Pyx_INCREF((PyObject *)__pyx_v_um); __pyx_bstruct_new_u.buf = NULL; __pyx_bstruct_u.buf = NULL; __pyx_bstruct_um.buf = NULL; __pyx_bstruct_k.buf = NULL; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_u), __pyx_ptype_5numpy_ndarray, 1, "u", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 9; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_um), __pyx_ptype_5numpy_ndarray, 1, "um", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 9; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_k), __pyx_ptype_5numpy_ndarray, 1, "k", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 9; __pyx_clineno = __LINE__; goto __pyx_L1_error;} { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_u, (PyObject*)__pyx_v_u, &__Pyx_TypeInfo_nn___pyx_t_10cwave__omp_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 9; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_bstride_0_u = __pyx_bstruct_u.strides[0]; __pyx_bstride_1_u = __pyx_bstruct_u.strides[1]; __pyx_bshape_0_u = __pyx_bstruct_u.shape[0]; __pyx_bshape_1_u = __pyx_bstruct_u.shape[1]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_um, (PyObject*)__pyx_v_um, &__Pyx_TypeInfo_nn___pyx_t_10cwave__omp_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 9; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_bstride_0_um = __pyx_bstruct_um.strides[0]; __pyx_bstride_1_um = __pyx_bstruct_um.strides[1]; __pyx_bshape_0_um = __pyx_bstruct_um.shape[0]; __pyx_bshape_1_um = __pyx_bstruct_um.shape[1]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_k, (PyObject*)__pyx_v_k, &__Pyx_TypeInfo_nn___pyx_t_10cwave__omp_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 9; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_bstride_0_k = __pyx_bstruct_k.strides[0]; __pyx_bstride_1_k = __pyx_bstruct_k.strides[1]; __pyx_bshape_0_k = __pyx_bstruct_k.shape[0]; __pyx_bshape_1_k = __pyx_bstruct_k.shape[1]; /* "wave_para.pyx":11 * def wave(double t, double t_stop, double dt, double dx, double dy, np.ndarray[DTYPE_t, ndim=2, negative_indices=False] u, np.ndarray[DTYPE_t, ndim=2, negative_indices=False] um, np.ndarray[DTYPE_t, ndim=2, negative_indices=False] k): * cdef np.ndarray[DTYPE_t, ndim=2, negative_indices=False] new_u * while t <= t_stop: # <<<<<<<<<<<<<< * t += dt * new_u = calculate_u(dt, dx, dy, u, um, k) */ while (1) { __pyx_t_1 = (__pyx_v_t <= __pyx_v_t_stop); if (!__pyx_t_1) break; /* "wave_para.pyx":12 * cdef np.ndarray[DTYPE_t, ndim=2, negative_indices=False] new_u * while t <= t_stop: * t += dt # <<<<<<<<<<<<<< * new_u = calculate_u(dt, dx, dy, u, um, k) * um = u */ __pyx_v_t = (__pyx_v_t + __pyx_v_dt); /* "wave_para.pyx":13 * while t <= t_stop: * t += dt * new_u = calculate_u(dt, dx, dy, u, um, k) # <<<<<<<<<<<<<< * um = u * u = new_u */ __pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__calculate_u); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 13; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyFloat_FromDouble(__pyx_v_dt); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 13; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyFloat_FromDouble(__pyx_v_dx); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 13; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PyFloat_FromDouble(__pyx_v_dy); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 13; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = PyTuple_New(6); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 13; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_6)); PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_6, 1, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_6, 2, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_5); __Pyx_INCREF(((PyObject *)__pyx_v_u)); PyTuple_SET_ITEM(__pyx_t_6, 3, ((PyObject *)__pyx_v_u)); __Pyx_GIVEREF(((PyObject *)__pyx_v_u)); __Pyx_INCREF(((PyObject *)__pyx_v_um)); PyTuple_SET_ITEM(__pyx_t_6, 4, ((PyObject *)__pyx_v_um)); __Pyx_GIVEREF(((PyObject *)__pyx_v_um)); __Pyx_INCREF(((PyObject *)__pyx_v_k)); PyTuple_SET_ITEM(__pyx_t_6, 5, ((PyObject *)__pyx_v_k)); __Pyx_GIVEREF(((PyObject *)__pyx_v_k)); __pyx_t_3 = 0; __pyx_t_4 = 0; __pyx_t_5 = 0; __pyx_t_5 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_6), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 13; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_6)); __pyx_t_6 = 0; if (!(likely(((__pyx_t_5) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_5, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 13; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_7 = ((PyArrayObject *)__pyx_t_5); { __Pyx_BufFmt_StackElem __pyx_stack[1]; __Pyx_SafeReleaseBuffer(&__pyx_bstruct_new_u); __pyx_t_8 = __Pyx_GetBufferAndValidate(&__pyx_bstruct_new_u, (PyObject*)__pyx_t_7, &__Pyx_TypeInfo_nn___pyx_t_10cwave__omp_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack); if (unlikely(__pyx_t_8 < 0)) { PyErr_Fetch(&__pyx_t_9, &__pyx_t_10, &__pyx_t_11); if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_new_u, (PyObject*)__pyx_v_new_u, &__Pyx_TypeInfo_nn___pyx_t_10cwave__omp_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) { Py_XDECREF(__pyx_t_9); Py_XDECREF(__pyx_t_10); Py_XDECREF(__pyx_t_11); __Pyx_RaiseBufferFallbackError(); } else { PyErr_Restore(__pyx_t_9, __pyx_t_10, __pyx_t_11); } } __pyx_bstride_0_new_u = __pyx_bstruct_new_u.strides[0]; __pyx_bstride_1_new_u = __pyx_bstruct_new_u.strides[1]; __pyx_bshape_0_new_u = __pyx_bstruct_new_u.shape[0]; __pyx_bshape_1_new_u = __pyx_bstruct_new_u.shape[1]; if (unlikely(__pyx_t_8 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 13; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_7 = 0; __Pyx_XDECREF(((PyObject *)__pyx_v_new_u)); __pyx_v_new_u = ((PyArrayObject *)__pyx_t_5); __pyx_t_5 = 0; /* "wave_para.pyx":14 * t += dt * new_u = calculate_u(dt, dx, dy, u, um, k) * um = u # <<<<<<<<<<<<<< * u = new_u * return u */ { __Pyx_BufFmt_StackElem __pyx_stack[1]; __Pyx_SafeReleaseBuffer(&__pyx_bstruct_um); __pyx_t_8 = __Pyx_GetBufferAndValidate(&__pyx_bstruct_um, (PyObject*)((PyArrayObject *)__pyx_v_u), &__Pyx_TypeInfo_nn___pyx_t_10cwave__omp_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack); if (unlikely(__pyx_t_8 < 0)) { PyErr_Fetch(&__pyx_t_11, &__pyx_t_10, &__pyx_t_9); if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_um, (PyObject*)__pyx_v_um, &__Pyx_TypeInfo_nn___pyx_t_10cwave__omp_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) { Py_XDECREF(__pyx_t_11); Py_XDECREF(__pyx_t_10); Py_XDECREF(__pyx_t_9); __Pyx_RaiseBufferFallbackError(); } else { PyErr_Restore(__pyx_t_11, __pyx_t_10, __pyx_t_9); } } __pyx_bstride_0_um = __pyx_bstruct_um.strides[0]; __pyx_bstride_1_um = __pyx_bstruct_um.strides[1]; __pyx_bshape_0_um = __pyx_bstruct_um.shape[0]; __pyx_bshape_1_um = __pyx_bstruct_um.shape[1]; if (unlikely(__pyx_t_8 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __Pyx_INCREF(((PyObject *)__pyx_v_u)); __Pyx_DECREF(((PyObject *)__pyx_v_um)); __pyx_v_um = ((PyArrayObject *)__pyx_v_u); /* "wave_para.pyx":15 * new_u = calculate_u(dt, dx, dy, u, um, k) * um = u * u = new_u # <<<<<<<<<<<<<< * return u * */ { __Pyx_BufFmt_StackElem __pyx_stack[1]; __Pyx_SafeReleaseBuffer(&__pyx_bstruct_u); __pyx_t_8 = __Pyx_GetBufferAndValidate(&__pyx_bstruct_u, (PyObject*)((PyArrayObject *)__pyx_v_new_u), &__Pyx_TypeInfo_nn___pyx_t_10cwave__omp_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack); if (unlikely(__pyx_t_8 < 0)) { PyErr_Fetch(&__pyx_t_9, &__pyx_t_10, &__pyx_t_11); if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_u, (PyObject*)__pyx_v_u, &__Pyx_TypeInfo_nn___pyx_t_10cwave__omp_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) { Py_XDECREF(__pyx_t_9); Py_XDECREF(__pyx_t_10); Py_XDECREF(__pyx_t_11); __Pyx_RaiseBufferFallbackError(); } else { PyErr_Restore(__pyx_t_9, __pyx_t_10, __pyx_t_11); } } __pyx_bstride_0_u = __pyx_bstruct_u.strides[0]; __pyx_bstride_1_u = __pyx_bstruct_u.strides[1]; __pyx_bshape_0_u = __pyx_bstruct_u.shape[0]; __pyx_bshape_1_u = __pyx_bstruct_u.shape[1]; if (unlikely(__pyx_t_8 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __Pyx_INCREF(((PyObject *)__pyx_v_new_u)); __Pyx_DECREF(((PyObject *)__pyx_v_u)); __pyx_v_u = ((PyArrayObject *)__pyx_v_new_u); } /* "wave_para.pyx":16 * um = u * u = new_u * return u # <<<<<<<<<<<<<< * * @cython.boundscheck(False) */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_u)); __pyx_r = ((PyObject *)__pyx_v_u); goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); __Pyx_SafeReleaseBuffer(&__pyx_bstruct_new_u); __Pyx_SafeReleaseBuffer(&__pyx_bstruct_k); __Pyx_SafeReleaseBuffer(&__pyx_bstruct_um); __Pyx_SafeReleaseBuffer(&__pyx_bstruct_u); __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} __Pyx_AddTraceback("cwave__omp.wave", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; goto __pyx_L2; __pyx_L0:; __Pyx_SafeReleaseBuffer(&__pyx_bstruct_new_u); __Pyx_SafeReleaseBuffer(&__pyx_bstruct_k); __Pyx_SafeReleaseBuffer(&__pyx_bstruct_um); __Pyx_SafeReleaseBuffer(&__pyx_bstruct_u); __pyx_L2:; __Pyx_XDECREF((PyObject *)__pyx_v_new_u); __Pyx_XDECREF((PyObject *)__pyx_v_u); __Pyx_XDECREF((PyObject *)__pyx_v_um); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "wave_para.pyx":20 * @cython.boundscheck(False) * @cython.cdivision(True) * def calculate_u(double dt, double dx, double dy, np.ndarray[DTYPE_t, ndim=2, negative_indices=False] u, np.ndarray[DTYPE_t, ndim=2, negative_indices=False] um, np.ndarray[DTYPE_t, ndim=2, negative_indices=False] k): # <<<<<<<<<<<<<< * cdef np.ndarray[DTYPE_t, ndim=2, negative_indices=False] up = np.zeros((u.shape[0], u.shape[1])) * cdef int i,j */ static PyObject *__pyx_pf_10cwave__omp_1calculate_u(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_10cwave__omp_1calculate_u = {__Pyx_NAMESTR("calculate_u"), (PyCFunction)__pyx_pf_10cwave__omp_1calculate_u, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(0)}; static PyObject *__pyx_pf_10cwave__omp_1calculate_u(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { double __pyx_v_dt; double __pyx_v_dx; double __pyx_v_dy; PyArrayObject *__pyx_v_u = 0; PyArrayObject *__pyx_v_um = 0; PyArrayObject *__pyx_v_k = 0; PyArrayObject *__pyx_v_up = 0; int __pyx_v_i; int __pyx_v_j; Py_buffer __pyx_bstruct_k; Py_ssize_t __pyx_bstride_0_k = 0; Py_ssize_t __pyx_bstride_1_k = 0; Py_ssize_t __pyx_bshape_0_k = 0; Py_ssize_t __pyx_bshape_1_k = 0; Py_buffer __pyx_bstruct_up; Py_ssize_t __pyx_bstride_0_up = 0; Py_ssize_t __pyx_bstride_1_up = 0; Py_ssize_t __pyx_bshape_0_up = 0; Py_ssize_t __pyx_bshape_1_up = 0; Py_buffer __pyx_bstruct_um; Py_ssize_t __pyx_bstride_0_um = 0; Py_ssize_t __pyx_bstride_1_um = 0; Py_ssize_t __pyx_bshape_0_um = 0; Py_ssize_t __pyx_bshape_1_um = 0; Py_buffer __pyx_bstruct_u; Py_ssize_t __pyx_bstride_0_u = 0; Py_ssize_t __pyx_bstride_1_u = 0; Py_ssize_t __pyx_bshape_0_u = 0; Py_ssize_t __pyx_bshape_1_u = 0; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyArrayObject *__pyx_t_5 = NULL; long __pyx_t_6; long __pyx_t_7; long __pyx_t_8; long __pyx_t_9; int __pyx_t_10; int __pyx_t_11; int __pyx_t_12; int __pyx_t_13; int __pyx_t_14; long __pyx_t_15; int __pyx_t_16; int __pyx_t_17; int __pyx_t_18; long __pyx_t_19; int __pyx_t_20; int __pyx_t_21; int __pyx_t_22; int __pyx_t_23; int __pyx_t_24; long __pyx_t_25; int __pyx_t_26; int __pyx_t_27; int __pyx_t_28; long __pyx_t_29; int __pyx_t_30; int __pyx_t_31; long __pyx_t_32; int __pyx_t_33; int __pyx_t_34; int __pyx_t_35; long __pyx_t_36; int __pyx_t_37; int __pyx_t_38; int __pyx_t_39; int __pyx_t_40; int __pyx_t_41; long __pyx_t_42; int __pyx_t_43; int __pyx_t_44; int __pyx_t_45; long __pyx_t_46; int __pyx_t_47; int __pyx_t_48; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__dt,&__pyx_n_s__dx,&__pyx_n_s__dy,&__pyx_n_s__u,&__pyx_n_s__um,&__pyx_n_s__k,0}; __Pyx_RefNannySetupContext("calculate_u"); __pyx_self = __pyx_self; { PyObject* values[6] = {0,0,0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; switch (PyTuple_GET_SIZE(__pyx_args)) { case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (PyTuple_GET_SIZE(__pyx_args)) { case 0: values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__dt); if (likely(values[0])) kw_args--; else goto __pyx_L5_argtuple_error; case 1: values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__dx); if (likely(values[1])) kw_args--; else { __Pyx_RaiseArgtupleInvalid("calculate_u", 1, 6, 6, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 20; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__dy); if (likely(values[2])) kw_args--; else { __Pyx_RaiseArgtupleInvalid("calculate_u", 1, 6, 6, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 20; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 3: values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__u); if (likely(values[3])) kw_args--; else { __Pyx_RaiseArgtupleInvalid("calculate_u", 1, 6, 6, 3); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 20; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 4: values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__um); if (likely(values[4])) kw_args--; else { __Pyx_RaiseArgtupleInvalid("calculate_u", 1, 6, 6, 4); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 20; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 5: values[5] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__k); if (likely(values[5])) kw_args--; else { __Pyx_RaiseArgtupleInvalid("calculate_u", 1, 6, 6, 5); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 20; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, PyTuple_GET_SIZE(__pyx_args), "calculate_u") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 20; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 6) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[4] = PyTuple_GET_ITEM(__pyx_args, 4); values[5] = PyTuple_GET_ITEM(__pyx_args, 5); } __pyx_v_dt = __pyx_PyFloat_AsDouble(values[0]); if (unlikely((__pyx_v_dt == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 20; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_dx = __pyx_PyFloat_AsDouble(values[1]); if (unlikely((__pyx_v_dx == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 20; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_dy = __pyx_PyFloat_AsDouble(values[2]); if (unlikely((__pyx_v_dy == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 20; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_u = ((PyArrayObject *)values[3]); __pyx_v_um = ((PyArrayObject *)values[4]); __pyx_v_k = ((PyArrayObject *)values[5]); } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("calculate_u", 1, 6, 6, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 20; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("cwave__omp.calculate_u", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_bstruct_up.buf = NULL; __pyx_bstruct_u.buf = NULL; __pyx_bstruct_um.buf = NULL; __pyx_bstruct_k.buf = NULL; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_u), __pyx_ptype_5numpy_ndarray, 1, "u", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 20; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_um), __pyx_ptype_5numpy_ndarray, 1, "um", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 20; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_k), __pyx_ptype_5numpy_ndarray, 1, "k", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 20; __pyx_clineno = __LINE__; goto __pyx_L1_error;} { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_u, (PyObject*)__pyx_v_u, &__Pyx_TypeInfo_nn___pyx_t_10cwave__omp_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 20; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_bstride_0_u = __pyx_bstruct_u.strides[0]; __pyx_bstride_1_u = __pyx_bstruct_u.strides[1]; __pyx_bshape_0_u = __pyx_bstruct_u.shape[0]; __pyx_bshape_1_u = __pyx_bstruct_u.shape[1]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_um, (PyObject*)__pyx_v_um, &__Pyx_TypeInfo_nn___pyx_t_10cwave__omp_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 20; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_bstride_0_um = __pyx_bstruct_um.strides[0]; __pyx_bstride_1_um = __pyx_bstruct_um.strides[1]; __pyx_bshape_0_um = __pyx_bstruct_um.shape[0]; __pyx_bshape_1_um = __pyx_bstruct_um.shape[1]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_k, (PyObject*)__pyx_v_k, &__Pyx_TypeInfo_nn___pyx_t_10cwave__omp_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 20; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_bstride_0_k = __pyx_bstruct_k.strides[0]; __pyx_bstride_1_k = __pyx_bstruct_k.strides[1]; __pyx_bshape_0_k = __pyx_bstruct_k.shape[0]; __pyx_bshape_1_k = __pyx_bstruct_k.shape[1]; /* "wave_para.pyx":21 * @cython.cdivision(True) * def calculate_u(double dt, double dx, double dy, np.ndarray[DTYPE_t, ndim=2, negative_indices=False] u, np.ndarray[DTYPE_t, ndim=2, negative_indices=False] um, np.ndarray[DTYPE_t, ndim=2, negative_indices=False] k): * cdef np.ndarray[DTYPE_t, ndim=2, negative_indices=False] up = np.zeros((u.shape[0], u.shape[1])) # <<<<<<<<<<<<<< * cdef int i,j * with nogil, parallel(): */ __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 21; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__zeros); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 21; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyInt_to_py_Py_intptr_t((__pyx_v_u->dimensions[0])); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 21; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = __Pyx_PyInt_to_py_Py_intptr_t((__pyx_v_u->dimensions[1])); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 21; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 21; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_4)); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_3); __pyx_t_1 = 0; __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 21; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_3)); PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_t_4)); __Pyx_GIVEREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0; __pyx_t_4 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 21; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; if (!(likely(((__pyx_t_4) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_4, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 21; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_5 = ((PyArrayObject *)__pyx_t_4); { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_up, (PyObject*)__pyx_t_5, &__Pyx_TypeInfo_nn___pyx_t_10cwave__omp_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 2, 0, __pyx_stack) == -1)) { __pyx_v_up = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_bstruct_up.buf = NULL; {__pyx_filename = __pyx_f[0]; __pyx_lineno = 21; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } else {__pyx_bstride_0_up = __pyx_bstruct_up.strides[0]; __pyx_bstride_1_up = __pyx_bstruct_up.strides[1]; __pyx_bshape_0_up = __pyx_bstruct_up.shape[0]; __pyx_bshape_1_up = __pyx_bstruct_up.shape[1]; } } __pyx_t_5 = 0; __pyx_v_up = ((PyArrayObject *)__pyx_t_4); __pyx_t_4 = 0; /* "wave_para.pyx":23 * cdef np.ndarray[DTYPE_t, ndim=2, negative_indices=False] up = np.zeros((u.shape[0], u.shape[1])) * cdef int i,j * with nogil, parallel(): # <<<<<<<<<<<<<< * for i in prange(1, u.shape[0]-1): * for j in xrange(1, u.shape[1]-1): */ { #ifdef WITH_THREAD PyThreadState *_save = NULL; #endif Py_UNBLOCK_THREADS /*try:*/ { { #ifdef _OPENMP #pragma omp parallel private(__pyx_t_16, __pyx_t_21, __pyx_t_24, __pyx_t_42, __pyx_t_41, __pyx_t_10, __pyx_t_6, __pyx_t_34, __pyx_t_29, __pyx_t_28, __pyx_t_31, __pyx_t_48, __pyx_t_37, __pyx_t_14, __pyx_t_22, __pyx_t_25, __pyx_t_36, __pyx_t_30, __pyx_t_13, __pyx_t_19, __pyx_t_44, __pyx_t_33, __pyx_t_43, __pyx_t_39, __pyx_t_8, __pyx_t_18, __pyx_t_23, __pyx_t_26, __pyx_t_7, __pyx_t_12, __pyx_t_17, __pyx_t_20, __pyx_t_46, __pyx_t_45, __pyx_t_40, __pyx_t_11, __pyx_t_47, __pyx_t_38, __pyx_t_35, __pyx_t_32, __pyx_t_27, __pyx_t_15, __pyx_t_9) #endif /* _OPENMP */ { /* "wave_para.pyx":24 * cdef int i,j * with nogil, parallel(): * for i in prange(1, u.shape[0]-1): # <<<<<<<<<<<<<< * for j in xrange(1, u.shape[1]-1): * up[i,j] = 2*u[i,j] - um[i,j] + \ */ __pyx_t_6 = ((__pyx_v_u->dimensions[0]) - 1); if (1 == 0) abort(); { __pyx_t_8 = (__pyx_t_6 - 1) / 1; if (__pyx_t_8 > 0) { __pyx_v_i = 0; #ifdef _OPENMP #pragma omp for lastprivate(__pyx_v_j) firstprivate(__pyx_v_i) lastprivate(__pyx_v_i) #endif /* _OPENMP */ for (__pyx_t_7 = 0; __pyx_t_7 < __pyx_t_8; __pyx_t_7++){ { __pyx_v_i = 1 + 1 * __pyx_t_7; /* Initialize private variables to invalid values */ __pyx_v_j = ((int)0xbad0bad0); /* "wave_para.pyx":25 * with nogil, parallel(): * for i in prange(1, u.shape[0]-1): * for j in xrange(1, u.shape[1]-1): # <<<<<<<<<<<<<< * up[i,j] = 2*u[i,j] - um[i,j] + \ * (dt/dx)**2*( */ __pyx_t_9 = ((__pyx_v_u->dimensions[1]) - 1); for (__pyx_t_10 = 1; __pyx_t_10 < __pyx_t_9; __pyx_t_10+=1) { __pyx_v_j = __pyx_t_10; /* "wave_para.pyx":26 * for i in prange(1, u.shape[0]-1): * for j in xrange(1, u.shape[1]-1): * up[i,j] = 2*u[i,j] - um[i,j] + \ # <<<<<<<<<<<<<< * (dt/dx)**2*( * (0.5*(k[i+1,j] + k[i,j])*(u[i+1,j] - u[i,j]) - */ __pyx_t_11 = __pyx_v_i; __pyx_t_12 = __pyx_v_j; __pyx_t_13 = __pyx_v_i; __pyx_t_14 = __pyx_v_j; /* "wave_para.pyx":28 * up[i,j] = 2*u[i,j] - um[i,j] + \ * (dt/dx)**2*( * (0.5*(k[i+1,j] + k[i,j])*(u[i+1,j] - u[i,j]) - # <<<<<<<<<<<<<< * 0.5*(k[i,j] + k[i-1,j])*(u[i,j] - u[i-1,j]))) + \ * (dt/dy)**2*( */ __pyx_t_15 = (__pyx_v_i + 1); __pyx_t_16 = __pyx_v_j; __pyx_t_17 = __pyx_v_i; __pyx_t_18 = __pyx_v_j; __pyx_t_19 = (__pyx_v_i + 1); __pyx_t_20 = __pyx_v_j; __pyx_t_21 = __pyx_v_i; __pyx_t_22 = __pyx_v_j; /* "wave_para.pyx":29 * (dt/dx)**2*( * (0.5*(k[i+1,j] + k[i,j])*(u[i+1,j] - u[i,j]) - * 0.5*(k[i,j] + k[i-1,j])*(u[i,j] - u[i-1,j]))) + \ # <<<<<<<<<<<<<< * (dt/dy)**2*( * (0.5*(k[i,j+1] + k[i,j])*(u[i,j+1] - u[i,j]) - */ __pyx_t_23 = __pyx_v_i; __pyx_t_24 = __pyx_v_j; __pyx_t_25 = (__pyx_v_i - 1); __pyx_t_26 = __pyx_v_j; __pyx_t_27 = __pyx_v_i; __pyx_t_28 = __pyx_v_j; __pyx_t_29 = (__pyx_v_i - 1); __pyx_t_30 = __pyx_v_j; /* "wave_para.pyx":31 * 0.5*(k[i,j] + k[i-1,j])*(u[i,j] - u[i-1,j]))) + \ * (dt/dy)**2*( * (0.5*(k[i,j+1] + k[i,j])*(u[i,j+1] - u[i,j]) - # <<<<<<<<<<<<<< * 0.5*(k[i,j] + k[i,j-1])*(u[i,j] - u[i,j-1]))) * return up */ __pyx_t_31 = __pyx_v_i; __pyx_t_32 = (__pyx_v_j + 1); __pyx_t_33 = __pyx_v_i; __pyx_t_34 = __pyx_v_j; __pyx_t_35 = __pyx_v_i; __pyx_t_36 = (__pyx_v_j + 1); __pyx_t_37 = __pyx_v_i; __pyx_t_38 = __pyx_v_j; /* "wave_para.pyx":32 * (dt/dy)**2*( * (0.5*(k[i,j+1] + k[i,j])*(u[i,j+1] - u[i,j]) - * 0.5*(k[i,j] + k[i,j-1])*(u[i,j] - u[i,j-1]))) # <<<<<<<<<<<<<< * return up * */ __pyx_t_39 = __pyx_v_i; __pyx_t_40 = __pyx_v_j; __pyx_t_41 = __pyx_v_i; __pyx_t_42 = (__pyx_v_j - 1); __pyx_t_43 = __pyx_v_i; __pyx_t_44 = __pyx_v_j; __pyx_t_45 = __pyx_v_i; __pyx_t_46 = (__pyx_v_j - 1); /* "wave_para.pyx":26 * for i in prange(1, u.shape[0]-1): * for j in xrange(1, u.shape[1]-1): * up[i,j] = 2*u[i,j] - um[i,j] + \ # <<<<<<<<<<<<<< * (dt/dx)**2*( * (0.5*(k[i+1,j] + k[i,j])*(u[i+1,j] - u[i,j]) - */ __pyx_t_47 = __pyx_v_i; __pyx_t_48 = __pyx_v_j; *__Pyx_BufPtrStrided2d(__pyx_t_10cwave__omp_DTYPE_t *, __pyx_bstruct_up.buf, __pyx_t_47, __pyx_bstride_0_up, __pyx_t_48, __pyx_bstride_1_up) = ((((2.0 * (*__Pyx_BufPtrStrided2d(__pyx_t_10cwave__omp_DTYPE_t *, __pyx_bstruct_u.buf, __pyx_t_11, __pyx_bstride_0_u, __pyx_t_12, __pyx_bstride_1_u))) - (*__Pyx_BufPtrStrided2d(__pyx_t_10cwave__omp_DTYPE_t *, __pyx_bstruct_um.buf, __pyx_t_13, __pyx_bstride_0_um, __pyx_t_14, __pyx_bstride_1_um))) + (pow((__pyx_v_dt / __pyx_v_dx), 2.0) * (((0.5 * ((*__Pyx_BufPtrStrided2d(__pyx_t_10cwave__omp_DTYPE_t *, __pyx_bstruct_k.buf, __pyx_t_15, __pyx_bstride_0_k, __pyx_t_16, __pyx_bstride_1_k)) + (*__Pyx_BufPtrStrided2d(__pyx_t_10cwave__omp_DTYPE_t *, __pyx_bstruct_k.buf, __pyx_t_17, __pyx_bstride_0_k, __pyx_t_18, __pyx_bstride_1_k)))) * ((*__Pyx_BufPtrStrided2d(__pyx_t_10cwave__omp_DTYPE_t *, __pyx_bstruct_u.buf, __pyx_t_19, __pyx_bstride_0_u, __pyx_t_20, __pyx_bstride_1_u)) - (*__Pyx_BufPtrStrided2d(__pyx_t_10cwave__omp_DTYPE_t *, __pyx_bstruct_u.buf, __pyx_t_21, __pyx_bstride_0_u, __pyx_t_22, __pyx_bstride_1_u)))) - ((0.5 * ((*__Pyx_BufPtrStrided2d(__pyx_t_10cwave__omp_DTYPE_t *, __pyx_bstruct_k.buf, __pyx_t_23, __pyx_bstride_0_k, __pyx_t_24, __pyx_bstride_1_k)) + (*__Pyx_BufPtrStrided2d(__pyx_t_10cwave__omp_DTYPE_t *, __pyx_bstruct_k.buf, __pyx_t_25, __pyx_bstride_0_k, __pyx_t_26, __pyx_bstride_1_k)))) * ((*__Pyx_BufPtrStrided2d(__pyx_t_10cwave__omp_DTYPE_t *, __pyx_bstruct_u.buf, __pyx_t_27, __pyx_bstride_0_u, __pyx_t_28, __pyx_bstride_1_u)) - (*__Pyx_BufPtrStrided2d(__pyx_t_10cwave__omp_DTYPE_t *, __pyx_bstruct_u.buf, __pyx_t_29, __pyx_bstride_0_u, __pyx_t_30, __pyx_bstride_1_u))))))) + (pow((__pyx_v_dt / __pyx_v_dy), 2.0) * (((0.5 * ((*__Pyx_BufPtrStrided2d(__pyx_t_10cwave__omp_DTYPE_t *, __pyx_bstruct_k.buf, __pyx_t_31, __pyx_bstride_0_k, __pyx_t_32, __pyx_bstride_1_k)) + (*__Pyx_BufPtrStrided2d(__pyx_t_10cwave__omp_DTYPE_t *, __pyx_bstruct_k.buf, __pyx_t_33, __pyx_bstride_0_k, __pyx_t_34, __pyx_bstride_1_k)))) * ((*__Pyx_BufPtrStrided2d(__pyx_t_10cwave__omp_DTYPE_t *, __pyx_bstruct_u.buf, __pyx_t_35, __pyx_bstride_0_u, __pyx_t_36, __pyx_bstride_1_u)) - (*__Pyx_BufPtrStrided2d(__pyx_t_10cwave__omp_DTYPE_t *, __pyx_bstruct_u.buf, __pyx_t_37, __pyx_bstride_0_u, __pyx_t_38, __pyx_bstride_1_u)))) - ((0.5 * ((*__Pyx_BufPtrStrided2d(__pyx_t_10cwave__omp_DTYPE_t *, __pyx_bstruct_k.buf, __pyx_t_39, __pyx_bstride_0_k, __pyx_t_40, __pyx_bstride_1_k)) + (*__Pyx_BufPtrStrided2d(__pyx_t_10cwave__omp_DTYPE_t *, __pyx_bstruct_k.buf, __pyx_t_41, __pyx_bstride_0_k, __pyx_t_42, __pyx_bstride_1_k)))) * ((*__Pyx_BufPtrStrided2d(__pyx_t_10cwave__omp_DTYPE_t *, __pyx_bstruct_u.buf, __pyx_t_43, __pyx_bstride_0_u, __pyx_t_44, __pyx_bstride_1_u)) - (*__Pyx_BufPtrStrided2d(__pyx_t_10cwave__omp_DTYPE_t *, __pyx_bstruct_u.buf, __pyx_t_45, __pyx_bstride_0_u, __pyx_t_46, __pyx_bstride_1_u))))))); } } } } } } } } /* "wave_para.pyx":23 * cdef np.ndarray[DTYPE_t, ndim=2, negative_indices=False] up = np.zeros((u.shape[0], u.shape[1])) * cdef int i,j * with nogil, parallel(): # <<<<<<<<<<<<<< * for i in prange(1, u.shape[0]-1): * for j in xrange(1, u.shape[1]-1): */ /*finally:*/ { Py_BLOCK_THREADS } } /* "wave_para.pyx":33 * (0.5*(k[i,j+1] + k[i,j])*(u[i,j+1] - u[i,j]) - * 0.5*(k[i,j] + k[i,j-1])*(u[i,j] - u[i,j-1]))) * return up # <<<<<<<<<<<<<< * */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_up)); __pyx_r = ((PyObject *)__pyx_v_up); goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); __Pyx_SafeReleaseBuffer(&__pyx_bstruct_k); __Pyx_SafeReleaseBuffer(&__pyx_bstruct_up); __Pyx_SafeReleaseBuffer(&__pyx_bstruct_um); __Pyx_SafeReleaseBuffer(&__pyx_bstruct_u); __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} __Pyx_AddTraceback("cwave__omp.calculate_u", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; goto __pyx_L2; __pyx_L0:; __Pyx_SafeReleaseBuffer(&__pyx_bstruct_k); __Pyx_SafeReleaseBuffer(&__pyx_bstruct_up); __Pyx_SafeReleaseBuffer(&__pyx_bstruct_um); __Pyx_SafeReleaseBuffer(&__pyx_bstruct_u); __pyx_L2:; __Pyx_XDECREF((PyObject *)__pyx_v_up); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "numpy.pxd":190 * # experimental exception made for __getbuffer__ and __releasebuffer__ * # -- the details of this may change. * def __getbuffer__(ndarray self, Py_buffer* info, int flags): # <<<<<<<<<<<<<< * # This implementation of getbuffer is geared towards Cython * # requirements, and does not yet fullfill the PEP. */ static CYTHON_UNUSED int __pyx_pf_5numpy_7ndarray___getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static CYTHON_UNUSED int __pyx_pf_5numpy_7ndarray___getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_v_copy_shape; int __pyx_v_i; int __pyx_v_ndim; int __pyx_v_endian_detector; int __pyx_v_little_endian; int __pyx_v_t; char *__pyx_v_f; PyArray_Descr *__pyx_v_descr = 0; int __pyx_v_offset; int __pyx_v_hasfields; int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; PyObject *__pyx_t_4 = NULL; int __pyx_t_5; int __pyx_t_6; int __pyx_t_7; PyObject *__pyx_t_8 = NULL; char *__pyx_t_9; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__getbuffer__"); if (__pyx_v_info != NULL) { __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); __Pyx_GIVEREF(__pyx_v_info->obj); } /* "numpy.pxd":196 * # of flags * * if info == NULL: return # <<<<<<<<<<<<<< * * cdef int copy_shape, i, ndim */ __pyx_t_1 = (__pyx_v_info == NULL); if (__pyx_t_1) { __pyx_r = 0; goto __pyx_L0; goto __pyx_L5; } __pyx_L5:; /* "numpy.pxd":199 * * cdef int copy_shape, i, ndim * cdef int endian_detector = 1 # <<<<<<<<<<<<<< * cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) * */ __pyx_v_endian_detector = 1; /* "numpy.pxd":200 * cdef int copy_shape, i, ndim * cdef int endian_detector = 1 * cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) # <<<<<<<<<<<<<< * * ndim = PyArray_NDIM(self) */ __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0); /* "numpy.pxd":202 * cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) * * ndim = PyArray_NDIM(self) # <<<<<<<<<<<<<< * * if sizeof(npy_intp) != sizeof(Py_ssize_t): */ __pyx_v_ndim = PyArray_NDIM(((PyArrayObject *)__pyx_v_self)); /* "numpy.pxd":204 * ndim = PyArray_NDIM(self) * * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< * copy_shape = 1 * else: */ __pyx_t_1 = ((sizeof(npy_intp)) != (sizeof(Py_ssize_t))); if (__pyx_t_1) { /* "numpy.pxd":205 * * if sizeof(npy_intp) != sizeof(Py_ssize_t): * copy_shape = 1 # <<<<<<<<<<<<<< * else: * copy_shape = 0 */ __pyx_v_copy_shape = 1; goto __pyx_L6; } /*else*/ { /* "numpy.pxd":207 * copy_shape = 1 * else: * copy_shape = 0 # <<<<<<<<<<<<<< * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) */ __pyx_v_copy_shape = 0; } __pyx_L6:; /* "numpy.pxd":209 * copy_shape = 0 * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_C_CONTIGUOUS) == PyBUF_C_CONTIGUOUS); if (__pyx_t_1) { /* "numpy.pxd":210 * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): # <<<<<<<<<<<<<< * raise ValueError(u"ndarray is not C contiguous") * */ __pyx_t_2 = (!PyArray_CHKFLAGS(((PyArrayObject *)__pyx_v_self), NPY_C_CONTIGUOUS)); __pyx_t_3 = __pyx_t_2; } else { __pyx_t_3 = __pyx_t_1; } if (__pyx_t_3) { /* "numpy.pxd":211 * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<< * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) */ __pyx_t_4 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_2), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 211; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 211; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L7; } __pyx_L7:; /* "numpy.pxd":213 * raise ValueError(u"ndarray is not C contiguous") * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") */ __pyx_t_3 = ((__pyx_v_flags & PyBUF_F_CONTIGUOUS) == PyBUF_F_CONTIGUOUS); if (__pyx_t_3) { /* "numpy.pxd":214 * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): # <<<<<<<<<<<<<< * raise ValueError(u"ndarray is not Fortran contiguous") * */ __pyx_t_1 = (!PyArray_CHKFLAGS(((PyArrayObject *)__pyx_v_self), NPY_F_CONTIGUOUS)); __pyx_t_2 = __pyx_t_1; } else { __pyx_t_2 = __pyx_t_3; } if (__pyx_t_2) { /* "numpy.pxd":215 * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<< * * info.buf = PyArray_DATA(self) */ __pyx_t_4 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_4), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L8; } __pyx_L8:; /* "numpy.pxd":217 * raise ValueError(u"ndarray is not Fortran contiguous") * * info.buf = PyArray_DATA(self) # <<<<<<<<<<<<<< * info.ndim = ndim * if copy_shape: */ __pyx_v_info->buf = PyArray_DATA(((PyArrayObject *)__pyx_v_self)); /* "numpy.pxd":218 * * info.buf = PyArray_DATA(self) * info.ndim = ndim # <<<<<<<<<<<<<< * if copy_shape: * # Allocate new buffer for strides and shape info. */ __pyx_v_info->ndim = __pyx_v_ndim; /* "numpy.pxd":219 * info.buf = PyArray_DATA(self) * info.ndim = ndim * if copy_shape: # <<<<<<<<<<<<<< * # Allocate new buffer for strides and shape info. * # This is allocated as one block, strides first. */ if (__pyx_v_copy_shape) { /* "numpy.pxd":222 * # Allocate new buffer for strides and shape info. * # This is allocated as one block, strides first. * info.strides = <Py_ssize_t*>stdlib.malloc(sizeof(Py_ssize_t) * <size_t>ndim * 2) # <<<<<<<<<<<<<< * info.shape = info.strides + ndim * for i in range(ndim): */ __pyx_v_info->strides = ((Py_ssize_t *)malloc((((sizeof(Py_ssize_t)) * ((size_t)__pyx_v_ndim)) * 2))); /* "numpy.pxd":223 * # This is allocated as one block, strides first. * info.strides = <Py_ssize_t*>stdlib.malloc(sizeof(Py_ssize_t) * <size_t>ndim * 2) * info.shape = info.strides + ndim # <<<<<<<<<<<<<< * for i in range(ndim): * info.strides[i] = PyArray_STRIDES(self)[i] */ __pyx_v_info->shape = (__pyx_v_info->strides + __pyx_v_ndim); /* "numpy.pxd":224 * info.strides = <Py_ssize_t*>stdlib.malloc(sizeof(Py_ssize_t) * <size_t>ndim * 2) * info.shape = info.strides + ndim * for i in range(ndim): # <<<<<<<<<<<<<< * info.strides[i] = PyArray_STRIDES(self)[i] * info.shape[i] = PyArray_DIMS(self)[i] */ __pyx_t_5 = __pyx_v_ndim; for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { __pyx_v_i = __pyx_t_6; /* "numpy.pxd":225 * info.shape = info.strides + ndim * for i in range(ndim): * info.strides[i] = PyArray_STRIDES(self)[i] # <<<<<<<<<<<<<< * info.shape[i] = PyArray_DIMS(self)[i] * else: */ (__pyx_v_info->strides[__pyx_v_i]) = (PyArray_STRIDES(((PyArrayObject *)__pyx_v_self))[__pyx_v_i]); /* "numpy.pxd":226 * for i in range(ndim): * info.strides[i] = PyArray_STRIDES(self)[i] * info.shape[i] = PyArray_DIMS(self)[i] # <<<<<<<<<<<<<< * else: * info.strides = <Py_ssize_t*>PyArray_STRIDES(self) */ (__pyx_v_info->shape[__pyx_v_i]) = (PyArray_DIMS(((PyArrayObject *)__pyx_v_self))[__pyx_v_i]); } goto __pyx_L9; } /*else*/ { /* "numpy.pxd":228 * info.shape[i] = PyArray_DIMS(self)[i] * else: * info.strides = <Py_ssize_t*>PyArray_STRIDES(self) # <<<<<<<<<<<<<< * info.shape = <Py_ssize_t*>PyArray_DIMS(self) * info.suboffsets = NULL */ __pyx_v_info->strides = ((Py_ssize_t *)PyArray_STRIDES(((PyArrayObject *)__pyx_v_self))); /* "numpy.pxd":229 * else: * info.strides = <Py_ssize_t*>PyArray_STRIDES(self) * info.shape = <Py_ssize_t*>PyArray_DIMS(self) # <<<<<<<<<<<<<< * info.suboffsets = NULL * info.itemsize = PyArray_ITEMSIZE(self) */ __pyx_v_info->shape = ((Py_ssize_t *)PyArray_DIMS(((PyArrayObject *)__pyx_v_self))); } __pyx_L9:; /* "numpy.pxd":230 * info.strides = <Py_ssize_t*>PyArray_STRIDES(self) * info.shape = <Py_ssize_t*>PyArray_DIMS(self) * info.suboffsets = NULL # <<<<<<<<<<<<<< * info.itemsize = PyArray_ITEMSIZE(self) * info.readonly = not PyArray_ISWRITEABLE(self) */ __pyx_v_info->suboffsets = NULL; /* "numpy.pxd":231 * info.shape = <Py_ssize_t*>PyArray_DIMS(self) * info.suboffsets = NULL * info.itemsize = PyArray_ITEMSIZE(self) # <<<<<<<<<<<<<< * info.readonly = not PyArray_ISWRITEABLE(self) * */ __pyx_v_info->itemsize = PyArray_ITEMSIZE(((PyArrayObject *)__pyx_v_self)); /* "numpy.pxd":232 * info.suboffsets = NULL * info.itemsize = PyArray_ITEMSIZE(self) * info.readonly = not PyArray_ISWRITEABLE(self) # <<<<<<<<<<<<<< * * cdef int t */ __pyx_v_info->readonly = (!PyArray_ISWRITEABLE(((PyArrayObject *)__pyx_v_self))); /* "numpy.pxd":235 * * cdef int t * cdef char* f = NULL # <<<<<<<<<<<<<< * cdef dtype descr = self.descr * cdef list stack */ __pyx_v_f = NULL; /* "numpy.pxd":236 * cdef int t * cdef char* f = NULL * cdef dtype descr = self.descr # <<<<<<<<<<<<<< * cdef list stack * cdef int offset */ __Pyx_INCREF(((PyObject *)((PyArrayObject *)__pyx_v_self)->descr)); __pyx_v_descr = ((PyArrayObject *)__pyx_v_self)->descr; /* "numpy.pxd":240 * cdef int offset * * cdef bint hasfields = PyDataType_HASFIELDS(descr) # <<<<<<<<<<<<<< * * if not hasfields and not copy_shape: */ __pyx_v_hasfields = PyDataType_HASFIELDS(__pyx_v_descr); /* "numpy.pxd":242 * cdef bint hasfields = PyDataType_HASFIELDS(descr) * * if not hasfields and not copy_shape: # <<<<<<<<<<<<<< * # do not call releasebuffer * info.obj = None */ __pyx_t_2 = (!__pyx_v_hasfields); if (__pyx_t_2) { __pyx_t_3 = (!__pyx_v_copy_shape); __pyx_t_1 = __pyx_t_3; } else { __pyx_t_1 = __pyx_t_2; } if (__pyx_t_1) { /* "numpy.pxd":244 * if not hasfields and not copy_shape: * # do not call releasebuffer * info.obj = None # <<<<<<<<<<<<<< * else: * # need to call releasebuffer */ __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = Py_None; goto __pyx_L12; } /*else*/ { /* "numpy.pxd":247 * else: * # need to call releasebuffer * info.obj = self # <<<<<<<<<<<<<< * * if not hasfields: */ __Pyx_INCREF(__pyx_v_self); __Pyx_GIVEREF(__pyx_v_self); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = __pyx_v_self; } __pyx_L12:; /* "numpy.pxd":249 * info.obj = self * * if not hasfields: # <<<<<<<<<<<<<< * t = descr.type_num * if ((descr.byteorder == '>' and little_endian) or */ __pyx_t_1 = (!__pyx_v_hasfields); if (__pyx_t_1) { /* "numpy.pxd":250 * * if not hasfields: * t = descr.type_num # <<<<<<<<<<<<<< * if ((descr.byteorder == '>' and little_endian) or * (descr.byteorder == '<' and not little_endian)): */ __pyx_v_t = __pyx_v_descr->type_num; /* "numpy.pxd":251 * if not hasfields: * t = descr.type_num * if ((descr.byteorder == '>' and little_endian) or # <<<<<<<<<<<<<< * (descr.byteorder == '<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ __pyx_t_1 = (__pyx_v_descr->byteorder == '>'); if (__pyx_t_1) { __pyx_t_2 = __pyx_v_little_endian; } else { __pyx_t_2 = __pyx_t_1; } if (!__pyx_t_2) { /* "numpy.pxd":252 * t = descr.type_num * if ((descr.byteorder == '>' and little_endian) or * (descr.byteorder == '<' and not little_endian)): # <<<<<<<<<<<<<< * raise ValueError(u"Non-native byte order not supported") * if t == NPY_BYTE: f = "b" */ __pyx_t_1 = (__pyx_v_descr->byteorder == '<'); if (__pyx_t_1) { __pyx_t_3 = (!__pyx_v_little_endian); __pyx_t_7 = __pyx_t_3; } else { __pyx_t_7 = __pyx_t_1; } __pyx_t_1 = __pyx_t_7; } else { __pyx_t_1 = __pyx_t_2; } if (__pyx_t_1) { /* "numpy.pxd":253 * if ((descr.byteorder == '>' and little_endian) or * (descr.byteorder == '<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" */ __pyx_t_4 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_6), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 253; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 253; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L14; } __pyx_L14:; /* "numpy.pxd":254 * (descr.byteorder == '<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") * if t == NPY_BYTE: f = "b" # <<<<<<<<<<<<<< * elif t == NPY_UBYTE: f = "B" * elif t == NPY_SHORT: f = "h" */ __pyx_t_1 = (__pyx_v_t == NPY_BYTE); if (__pyx_t_1) { __pyx_v_f = __pyx_k__b; goto __pyx_L15; } /* "numpy.pxd":255 * raise ValueError(u"Non-native byte order not supported") * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" # <<<<<<<<<<<<<< * elif t == NPY_SHORT: f = "h" * elif t == NPY_USHORT: f = "H" */ __pyx_t_1 = (__pyx_v_t == NPY_UBYTE); if (__pyx_t_1) { __pyx_v_f = __pyx_k__B; goto __pyx_L15; } /* "numpy.pxd":256 * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" * elif t == NPY_SHORT: f = "h" # <<<<<<<<<<<<<< * elif t == NPY_USHORT: f = "H" * elif t == NPY_INT: f = "i" */ __pyx_t_1 = (__pyx_v_t == NPY_SHORT); if (__pyx_t_1) { __pyx_v_f = __pyx_k__h; goto __pyx_L15; } /* "numpy.pxd":257 * elif t == NPY_UBYTE: f = "B" * elif t == NPY_SHORT: f = "h" * elif t == NPY_USHORT: f = "H" # <<<<<<<<<<<<<< * elif t == NPY_INT: f = "i" * elif t == NPY_UINT: f = "I" */ __pyx_t_1 = (__pyx_v_t == NPY_USHORT); if (__pyx_t_1) { __pyx_v_f = __pyx_k__H; goto __pyx_L15; } /* "numpy.pxd":258 * elif t == NPY_SHORT: f = "h" * elif t == NPY_USHORT: f = "H" * elif t == NPY_INT: f = "i" # <<<<<<<<<<<<<< * elif t == NPY_UINT: f = "I" * elif t == NPY_LONG: f = "l" */ __pyx_t_1 = (__pyx_v_t == NPY_INT); if (__pyx_t_1) { __pyx_v_f = __pyx_k__i; goto __pyx_L15; } /* "numpy.pxd":259 * elif t == NPY_USHORT: f = "H" * elif t == NPY_INT: f = "i" * elif t == NPY_UINT: f = "I" # <<<<<<<<<<<<<< * elif t == NPY_LONG: f = "l" * elif t == NPY_ULONG: f = "L" */ __pyx_t_1 = (__pyx_v_t == NPY_UINT); if (__pyx_t_1) { __pyx_v_f = __pyx_k__I; goto __pyx_L15; } /* "numpy.pxd":260 * elif t == NPY_INT: f = "i" * elif t == NPY_UINT: f = "I" * elif t == NPY_LONG: f = "l" # <<<<<<<<<<<<<< * elif t == NPY_ULONG: f = "L" * elif t == NPY_LONGLONG: f = "q" */ __pyx_t_1 = (__pyx_v_t == NPY_LONG); if (__pyx_t_1) { __pyx_v_f = __pyx_k__l; goto __pyx_L15; } /* "numpy.pxd":261 * elif t == NPY_UINT: f = "I" * elif t == NPY_LONG: f = "l" * elif t == NPY_ULONG: f = "L" # <<<<<<<<<<<<<< * elif t == NPY_LONGLONG: f = "q" * elif t == NPY_ULONGLONG: f = "Q" */ __pyx_t_1 = (__pyx_v_t == NPY_ULONG); if (__pyx_t_1) { __pyx_v_f = __pyx_k__L; goto __pyx_L15; } /* "numpy.pxd":262 * elif t == NPY_LONG: f = "l" * elif t == NPY_ULONG: f = "L" * elif t == NPY_LONGLONG: f = "q" # <<<<<<<<<<<<<< * elif t == NPY_ULONGLONG: f = "Q" * elif t == NPY_FLOAT: f = "f" */ __pyx_t_1 = (__pyx_v_t == NPY_LONGLONG); if (__pyx_t_1) { __pyx_v_f = __pyx_k__q; goto __pyx_L15; } /* "numpy.pxd":263 * elif t == NPY_ULONG: f = "L" * elif t == NPY_LONGLONG: f = "q" * elif t == NPY_ULONGLONG: f = "Q" # <<<<<<<<<<<<<< * elif t == NPY_FLOAT: f = "f" * elif t == NPY_DOUBLE: f = "d" */ __pyx_t_1 = (__pyx_v_t == NPY_ULONGLONG); if (__pyx_t_1) { __pyx_v_f = __pyx_k__Q; goto __pyx_L15; } /* "numpy.pxd":264 * elif t == NPY_LONGLONG: f = "q" * elif t == NPY_ULONGLONG: f = "Q" * elif t == NPY_FLOAT: f = "f" # <<<<<<<<<<<<<< * elif t == NPY_DOUBLE: f = "d" * elif t == NPY_LONGDOUBLE: f = "g" */ __pyx_t_1 = (__pyx_v_t == NPY_FLOAT); if (__pyx_t_1) { __pyx_v_f = __pyx_k__f; goto __pyx_L15; } /* "numpy.pxd":265 * elif t == NPY_ULONGLONG: f = "Q" * elif t == NPY_FLOAT: f = "f" * elif t == NPY_DOUBLE: f = "d" # <<<<<<<<<<<<<< * elif t == NPY_LONGDOUBLE: f = "g" * elif t == NPY_CFLOAT: f = "Zf" */ __pyx_t_1 = (__pyx_v_t == NPY_DOUBLE); if (__pyx_t_1) { __pyx_v_f = __pyx_k__d; goto __pyx_L15; } /* "numpy.pxd":266 * elif t == NPY_FLOAT: f = "f" * elif t == NPY_DOUBLE: f = "d" * elif t == NPY_LONGDOUBLE: f = "g" # <<<<<<<<<<<<<< * elif t == NPY_CFLOAT: f = "Zf" * elif t == NPY_CDOUBLE: f = "Zd" */ __pyx_t_1 = (__pyx_v_t == NPY_LONGDOUBLE); if (__pyx_t_1) { __pyx_v_f = __pyx_k__g; goto __pyx_L15; } /* "numpy.pxd":267 * elif t == NPY_DOUBLE: f = "d" * elif t == NPY_LONGDOUBLE: f = "g" * elif t == NPY_CFLOAT: f = "Zf" # <<<<<<<<<<<<<< * elif t == NPY_CDOUBLE: f = "Zd" * elif t == NPY_CLONGDOUBLE: f = "Zg" */ __pyx_t_1 = (__pyx_v_t == NPY_CFLOAT); if (__pyx_t_1) { __pyx_v_f = __pyx_k__Zf; goto __pyx_L15; } /* "numpy.pxd":268 * elif t == NPY_LONGDOUBLE: f = "g" * elif t == NPY_CFLOAT: f = "Zf" * elif t == NPY_CDOUBLE: f = "Zd" # <<<<<<<<<<<<<< * elif t == NPY_CLONGDOUBLE: f = "Zg" * elif t == NPY_OBJECT: f = "O" */ __pyx_t_1 = (__pyx_v_t == NPY_CDOUBLE); if (__pyx_t_1) { __pyx_v_f = __pyx_k__Zd; goto __pyx_L15; } /* "numpy.pxd":269 * elif t == NPY_CFLOAT: f = "Zf" * elif t == NPY_CDOUBLE: f = "Zd" * elif t == NPY_CLONGDOUBLE: f = "Zg" # <<<<<<<<<<<<<< * elif t == NPY_OBJECT: f = "O" * else: */ __pyx_t_1 = (__pyx_v_t == NPY_CLONGDOUBLE); if (__pyx_t_1) { __pyx_v_f = __pyx_k__Zg; goto __pyx_L15; } /* "numpy.pxd":270 * elif t == NPY_CDOUBLE: f = "Zd" * elif t == NPY_CLONGDOUBLE: f = "Zg" * elif t == NPY_OBJECT: f = "O" # <<<<<<<<<<<<<< * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) */ __pyx_t_1 = (__pyx_v_t == NPY_OBJECT); if (__pyx_t_1) { __pyx_v_f = __pyx_k__O; goto __pyx_L15; } /*else*/ { /* "numpy.pxd":272 * elif t == NPY_OBJECT: f = "O" * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<< * info.format = f * return */ __pyx_t_4 = PyInt_FromLong(__pyx_v_t); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 272; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_8 = PyNumber_Remainder(((PyObject *)__pyx_kp_u_7), __pyx_t_4); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 272; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_8)); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 272; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_4)); PyTuple_SET_ITEM(__pyx_t_4, 0, ((PyObject *)__pyx_t_8)); __Pyx_GIVEREF(((PyObject *)__pyx_t_8)); __pyx_t_8 = 0; __pyx_t_8 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 272; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_8, 0, 0, 0); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 272; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_L15:; /* "numpy.pxd":273 * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) * info.format = f # <<<<<<<<<<<<<< * return * else: */ __pyx_v_info->format = __pyx_v_f; /* "numpy.pxd":274 * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) * info.format = f * return # <<<<<<<<<<<<<< * else: * info.format = <char*>stdlib.malloc(_buffer_format_string_len) */ __pyx_r = 0; goto __pyx_L0; goto __pyx_L13; } /*else*/ { /* "numpy.pxd":276 * return * else: * info.format = <char*>stdlib.malloc(_buffer_format_string_len) # <<<<<<<<<<<<<< * info.format[0] = '^' # Native data types, manual alignment * offset = 0 */ __pyx_v_info->format = ((char *)malloc(255)); /* "numpy.pxd":277 * else: * info.format = <char*>stdlib.malloc(_buffer_format_string_len) * info.format[0] = '^' # Native data types, manual alignment # <<<<<<<<<<<<<< * offset = 0 * f = _util_dtypestring(descr, info.format + 1, */ (__pyx_v_info->format[0]) = '^'; /* "numpy.pxd":278 * info.format = <char*>stdlib.malloc(_buffer_format_string_len) * info.format[0] = '^' # Native data types, manual alignment * offset = 0 # <<<<<<<<<<<<<< * f = _util_dtypestring(descr, info.format + 1, * info.format + _buffer_format_string_len, */ __pyx_v_offset = 0; /* "numpy.pxd":281 * f = _util_dtypestring(descr, info.format + 1, * info.format + _buffer_format_string_len, * &offset) # <<<<<<<<<<<<<< * f[0] = 0 # Terminate format string * */ __pyx_t_9 = __pyx_f_5numpy__util_dtypestring(__pyx_v_descr, (__pyx_v_info->format + 1), (__pyx_v_info->format + 255), (&__pyx_v_offset)); if (unlikely(__pyx_t_9 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 279; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_f = __pyx_t_9; /* "numpy.pxd":282 * info.format + _buffer_format_string_len, * &offset) * f[0] = 0 # Terminate format string # <<<<<<<<<<<<<< * * def __releasebuffer__(ndarray self, Py_buffer* info): */ (__pyx_v_f[0]) = 0; } __pyx_L13:; __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("numpy.ndarray.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; if (__pyx_v_info != NULL && __pyx_v_info->obj != NULL) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = NULL; } goto __pyx_L2; __pyx_L0:; if (__pyx_v_info != NULL && __pyx_v_info->obj == Py_None) { __Pyx_GOTREF(Py_None); __Pyx_DECREF(Py_None); __pyx_v_info->obj = NULL; } __pyx_L2:; __Pyx_XDECREF((PyObject *)__pyx_v_descr); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "numpy.pxd":284 * f[0] = 0 # Terminate format string * * def __releasebuffer__(ndarray self, Py_buffer* info): # <<<<<<<<<<<<<< * if PyArray_HASFIELDS(self): * stdlib.free(info.format) */ static CYTHON_UNUSED void __pyx_pf_5numpy_7ndarray_1__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info); /*proto*/ static CYTHON_UNUSED void __pyx_pf_5numpy_7ndarray_1__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info) { __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("__releasebuffer__"); /* "numpy.pxd":285 * * def __releasebuffer__(ndarray self, Py_buffer* info): * if PyArray_HASFIELDS(self): # <<<<<<<<<<<<<< * stdlib.free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): */ __pyx_t_1 = PyArray_HASFIELDS(((PyArrayObject *)__pyx_v_self)); if (__pyx_t_1) { /* "numpy.pxd":286 * def __releasebuffer__(ndarray self, Py_buffer* info): * if PyArray_HASFIELDS(self): * stdlib.free(info.format) # <<<<<<<<<<<<<< * if sizeof(npy_intp) != sizeof(Py_ssize_t): * stdlib.free(info.strides) */ free(__pyx_v_info->format); goto __pyx_L5; } __pyx_L5:; /* "numpy.pxd":287 * if PyArray_HASFIELDS(self): * stdlib.free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< * stdlib.free(info.strides) * # info.shape was stored after info.strides in the same block */ __pyx_t_1 = ((sizeof(npy_intp)) != (sizeof(Py_ssize_t))); if (__pyx_t_1) { /* "numpy.pxd":288 * stdlib.free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): * stdlib.free(info.strides) # <<<<<<<<<<<<<< * # info.shape was stored after info.strides in the same block * */ free(__pyx_v_info->strides); goto __pyx_L6; } __pyx_L6:; __Pyx_RefNannyFinishContext(); } /* "numpy.pxd":764 * ctypedef npy_cdouble complex_t * * cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(1, <void*>a) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew1(PyObject *__pyx_v_a) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew1"); /* "numpy.pxd":765 * * cdef inline object PyArray_MultiIterNew1(a): * return PyArray_MultiIterNew(1, <void*>a) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew2(a, b): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(1, ((void *)__pyx_v_a)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 765; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew1", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "numpy.pxd":767 * return PyArray_MultiIterNew(1, <void*>a) * * cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(2, <void*>a, <void*>b) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew2(PyObject *__pyx_v_a, PyObject *__pyx_v_b) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew2"); /* "numpy.pxd":768 * * cdef inline object PyArray_MultiIterNew2(a, b): * return PyArray_MultiIterNew(2, <void*>a, <void*>b) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew3(a, b, c): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(2, ((void *)__pyx_v_a), ((void *)__pyx_v_b)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 768; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew2", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "numpy.pxd":770 * return PyArray_MultiIterNew(2, <void*>a, <void*>b) * * cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew3(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew3"); /* "numpy.pxd":771 * * cdef inline object PyArray_MultiIterNew3(a, b, c): * return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(3, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 771; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew3", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "numpy.pxd":773 * return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c) * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew4(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew4"); /* "numpy.pxd":774 * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): * return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(4, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 774; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew4", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "numpy.pxd":776 * return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d) * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew5(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d, PyObject *__pyx_v_e) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew5"); /* "numpy.pxd":777 * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): * return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e) # <<<<<<<<<<<<<< * * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(5, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d), ((void *)__pyx_v_e)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 777; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew5", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "numpy.pxd":779 * return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e) * * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: # <<<<<<<<<<<<<< * # Recursive utility function used in __getbuffer__ to get format * # string. The new location in the format string is returned. */ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx_v_descr, char *__pyx_v_f, char *__pyx_v_end, int *__pyx_v_offset) { PyArray_Descr *__pyx_v_child = 0; int __pyx_v_endian_detector; int __pyx_v_little_endian; PyObject *__pyx_v_fields = 0; PyObject *__pyx_v_childname = NULL; PyObject *__pyx_v_new_offset = NULL; PyObject *__pyx_v_t = NULL; char *__pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; Py_ssize_t __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; int __pyx_t_6; int __pyx_t_7; int __pyx_t_8; int __pyx_t_9; long __pyx_t_10; char *__pyx_t_11; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("_util_dtypestring"); /* "numpy.pxd":786 * cdef int delta_offset * cdef tuple i * cdef int endian_detector = 1 # <<<<<<<<<<<<<< * cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) * cdef tuple fields */ __pyx_v_endian_detector = 1; /* "numpy.pxd":787 * cdef tuple i * cdef int endian_detector = 1 * cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) # <<<<<<<<<<<<<< * cdef tuple fields * */ __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0); /* "numpy.pxd":790 * cdef tuple fields * * for childname in descr.names: # <<<<<<<<<<<<<< * fields = descr.fields[childname] * child, new_offset = fields */ if (unlikely(((PyObject *)__pyx_v_descr->names) == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 790; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_1 = ((PyObject *)__pyx_v_descr->names); __Pyx_INCREF(__pyx_t_1); __pyx_t_2 = 0; for (;;) { if (__pyx_t_2 >= PyTuple_GET_SIZE(__pyx_t_1)) break; __pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2); __Pyx_INCREF(__pyx_t_3); __pyx_t_2++; __Pyx_XDECREF(__pyx_v_childname); __pyx_v_childname = __pyx_t_3; __pyx_t_3 = 0; /* "numpy.pxd":791 * * for childname in descr.names: * fields = descr.fields[childname] # <<<<<<<<<<<<<< * child, new_offset = fields * */ __pyx_t_3 = PyObject_GetItem(__pyx_v_descr->fields, __pyx_v_childname); if (!__pyx_t_3) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 791; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); if (!(likely(PyTuple_CheckExact(__pyx_t_3))||((__pyx_t_3) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected tuple, got %.200s", Py_TYPE(__pyx_t_3)->tp_name), 0))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 791; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_XDECREF(((PyObject *)__pyx_v_fields)); __pyx_v_fields = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "numpy.pxd":792 * for childname in descr.names: * fields = descr.fields[childname] * child, new_offset = fields # <<<<<<<<<<<<<< * * if (end - f) - (new_offset - offset[0]) < 15: */ if (likely(PyTuple_CheckExact(((PyObject *)__pyx_v_fields)))) { PyObject* sequence = ((PyObject *)__pyx_v_fields); if (unlikely(PyTuple_GET_SIZE(sequence) != 2)) { if (PyTuple_GET_SIZE(sequence) > 2) __Pyx_RaiseTooManyValuesError(2); else __Pyx_RaiseNeedMoreValuesError(PyTuple_GET_SIZE(sequence)); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 792; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_3 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_4 = PyTuple_GET_ITEM(sequence, 1); __Pyx_INCREF(__pyx_t_3); __Pyx_INCREF(__pyx_t_4); } else { __Pyx_UnpackTupleError(((PyObject *)__pyx_v_fields), 2); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 792; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_dtype))))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 792; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_XDECREF(((PyObject *)__pyx_v_child)); __pyx_v_child = ((PyArray_Descr *)__pyx_t_3); __pyx_t_3 = 0; __Pyx_XDECREF(__pyx_v_new_offset); __pyx_v_new_offset = __pyx_t_4; __pyx_t_4 = 0; /* "numpy.pxd":794 * child, new_offset = fields * * if (end - f) - (new_offset - offset[0]) < 15: # <<<<<<<<<<<<<< * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") * */ __pyx_t_4 = PyInt_FromLong((__pyx_v_end - __pyx_v_f)); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyInt_FromLong((__pyx_v_offset[0])); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyNumber_Subtract(__pyx_v_new_offset, __pyx_t_3); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyNumber_Subtract(__pyx_t_4, __pyx_t_5); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = PyObject_RichCompare(__pyx_t_3, __pyx_int_15, Py_LT); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_6) { /* "numpy.pxd":795 * * if (end - f) - (new_offset - offset[0]) < 15: * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<< * * if ((child.byteorder == '>' and little_endian) or */ __pyx_t_5 = PyObject_Call(__pyx_builtin_RuntimeError, ((PyObject *)__pyx_k_tuple_9), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L5; } __pyx_L5:; /* "numpy.pxd":797 * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") * * if ((child.byteorder == '>' and little_endian) or # <<<<<<<<<<<<<< * (child.byteorder == '<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ __pyx_t_6 = (__pyx_v_child->byteorder == '>'); if (__pyx_t_6) { __pyx_t_7 = __pyx_v_little_endian; } else { __pyx_t_7 = __pyx_t_6; } if (!__pyx_t_7) { /* "numpy.pxd":798 * * if ((child.byteorder == '>' and little_endian) or * (child.byteorder == '<' and not little_endian)): # <<<<<<<<<<<<<< * raise ValueError(u"Non-native byte order not supported") * # One could encode it in the format string and have Cython */ __pyx_t_6 = (__pyx_v_child->byteorder == '<'); if (__pyx_t_6) { __pyx_t_8 = (!__pyx_v_little_endian); __pyx_t_9 = __pyx_t_8; } else { __pyx_t_9 = __pyx_t_6; } __pyx_t_6 = __pyx_t_9; } else { __pyx_t_6 = __pyx_t_7; } if (__pyx_t_6) { /* "numpy.pxd":799 * if ((child.byteorder == '>' and little_endian) or * (child.byteorder == '<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * # One could encode it in the format string and have Cython * # complain instead, BUT: < and > in format strings also imply */ __pyx_t_5 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_10), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L6; } __pyx_L6:; /* "numpy.pxd":809 * * # Output padding bytes * while offset[0] < new_offset: # <<<<<<<<<<<<<< * f[0] = 120 # "x"; pad byte * f += 1 */ while (1) { __pyx_t_5 = PyInt_FromLong((__pyx_v_offset[0])); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 809; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_t_5, __pyx_v_new_offset, Py_LT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 809; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 809; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (!__pyx_t_6) break; /* "numpy.pxd":810 * # Output padding bytes * while offset[0] < new_offset: * f[0] = 120 # "x"; pad byte # <<<<<<<<<<<<<< * f += 1 * offset[0] += 1 */ (__pyx_v_f[0]) = 120; /* "numpy.pxd":811 * while offset[0] < new_offset: * f[0] = 120 # "x"; pad byte * f += 1 # <<<<<<<<<<<<<< * offset[0] += 1 * */ __pyx_v_f = (__pyx_v_f + 1); /* "numpy.pxd":812 * f[0] = 120 # "x"; pad byte * f += 1 * offset[0] += 1 # <<<<<<<<<<<<<< * * offset[0] += child.itemsize */ __pyx_t_10 = 0; (__pyx_v_offset[__pyx_t_10]) = ((__pyx_v_offset[__pyx_t_10]) + 1); } /* "numpy.pxd":814 * offset[0] += 1 * * offset[0] += child.itemsize # <<<<<<<<<<<<<< * * if not PyDataType_HASFIELDS(child): */ __pyx_t_10 = 0; (__pyx_v_offset[__pyx_t_10]) = ((__pyx_v_offset[__pyx_t_10]) + __pyx_v_child->elsize); /* "numpy.pxd":816 * offset[0] += child.itemsize * * if not PyDataType_HASFIELDS(child): # <<<<<<<<<<<<<< * t = child.type_num * if end - f < 5: */ __pyx_t_6 = (!PyDataType_HASFIELDS(__pyx_v_child)); if (__pyx_t_6) { /* "numpy.pxd":817 * * if not PyDataType_HASFIELDS(child): * t = child.type_num # <<<<<<<<<<<<<< * if end - f < 5: * raise RuntimeError(u"Format string allocated too short.") */ __pyx_t_3 = PyInt_FromLong(__pyx_v_child->type_num); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 817; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_XDECREF(__pyx_v_t); __pyx_v_t = __pyx_t_3; __pyx_t_3 = 0; /* "numpy.pxd":818 * if not PyDataType_HASFIELDS(child): * t = child.type_num * if end - f < 5: # <<<<<<<<<<<<<< * raise RuntimeError(u"Format string allocated too short.") * */ __pyx_t_6 = ((__pyx_v_end - __pyx_v_f) < 5); if (__pyx_t_6) { /* "numpy.pxd":819 * t = child.type_num * if end - f < 5: * raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<< * * # Until ticket #99 is fixed, use integers to avoid warnings */ __pyx_t_3 = PyObject_Call(__pyx_builtin_RuntimeError, ((PyObject *)__pyx_k_tuple_12), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 819; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 819; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L10; } __pyx_L10:; /* "numpy.pxd":822 * * # Until ticket #99 is fixed, use integers to avoid warnings * if t == NPY_BYTE: f[0] = 98 #"b" # <<<<<<<<<<<<<< * elif t == NPY_UBYTE: f[0] = 66 #"B" * elif t == NPY_SHORT: f[0] = 104 #"h" */ __pyx_t_3 = PyInt_FromLong(NPY_BYTE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 822; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 822; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 822; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 98; goto __pyx_L11; } /* "numpy.pxd":823 * # Until ticket #99 is fixed, use integers to avoid warnings * if t == NPY_BYTE: f[0] = 98 #"b" * elif t == NPY_UBYTE: f[0] = 66 #"B" # <<<<<<<<<<<<<< * elif t == NPY_SHORT: f[0] = 104 #"h" * elif t == NPY_USHORT: f[0] = 72 #"H" */ __pyx_t_5 = PyInt_FromLong(NPY_UBYTE); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 66; goto __pyx_L11; } /* "numpy.pxd":824 * if t == NPY_BYTE: f[0] = 98 #"b" * elif t == NPY_UBYTE: f[0] = 66 #"B" * elif t == NPY_SHORT: f[0] = 104 #"h" # <<<<<<<<<<<<<< * elif t == NPY_USHORT: f[0] = 72 #"H" * elif t == NPY_INT: f[0] = 105 #"i" */ __pyx_t_3 = PyInt_FromLong(NPY_SHORT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 824; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 824; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 824; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 104; goto __pyx_L11; } /* "numpy.pxd":825 * elif t == NPY_UBYTE: f[0] = 66 #"B" * elif t == NPY_SHORT: f[0] = 104 #"h" * elif t == NPY_USHORT: f[0] = 72 #"H" # <<<<<<<<<<<<<< * elif t == NPY_INT: f[0] = 105 #"i" * elif t == NPY_UINT: f[0] = 73 #"I" */ __pyx_t_5 = PyInt_FromLong(NPY_USHORT); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 825; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 825; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 825; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 72; goto __pyx_L11; } /* "numpy.pxd":826 * elif t == NPY_SHORT: f[0] = 104 #"h" * elif t == NPY_USHORT: f[0] = 72 #"H" * elif t == NPY_INT: f[0] = 105 #"i" # <<<<<<<<<<<<<< * elif t == NPY_UINT: f[0] = 73 #"I" * elif t == NPY_LONG: f[0] = 108 #"l" */ __pyx_t_3 = PyInt_FromLong(NPY_INT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 105; goto __pyx_L11; } /* "numpy.pxd":827 * elif t == NPY_USHORT: f[0] = 72 #"H" * elif t == NPY_INT: f[0] = 105 #"i" * elif t == NPY_UINT: f[0] = 73 #"I" # <<<<<<<<<<<<<< * elif t == NPY_LONG: f[0] = 108 #"l" * elif t == NPY_ULONG: f[0] = 76 #"L" */ __pyx_t_5 = PyInt_FromLong(NPY_UINT); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 73; goto __pyx_L11; } /* "numpy.pxd":828 * elif t == NPY_INT: f[0] = 105 #"i" * elif t == NPY_UINT: f[0] = 73 #"I" * elif t == NPY_LONG: f[0] = 108 #"l" # <<<<<<<<<<<<<< * elif t == NPY_ULONG: f[0] = 76 #"L" * elif t == NPY_LONGLONG: f[0] = 113 #"q" */ __pyx_t_3 = PyInt_FromLong(NPY_LONG); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 108; goto __pyx_L11; } /* "numpy.pxd":829 * elif t == NPY_UINT: f[0] = 73 #"I" * elif t == NPY_LONG: f[0] = 108 #"l" * elif t == NPY_ULONG: f[0] = 76 #"L" # <<<<<<<<<<<<<< * elif t == NPY_LONGLONG: f[0] = 113 #"q" * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" */ __pyx_t_5 = PyInt_FromLong(NPY_ULONG); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 76; goto __pyx_L11; } /* "numpy.pxd":830 * elif t == NPY_LONG: f[0] = 108 #"l" * elif t == NPY_ULONG: f[0] = 76 #"L" * elif t == NPY_LONGLONG: f[0] = 113 #"q" # <<<<<<<<<<<<<< * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" * elif t == NPY_FLOAT: f[0] = 102 #"f" */ __pyx_t_3 = PyInt_FromLong(NPY_LONGLONG); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 113; goto __pyx_L11; } /* "numpy.pxd":831 * elif t == NPY_ULONG: f[0] = 76 #"L" * elif t == NPY_LONGLONG: f[0] = 113 #"q" * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" # <<<<<<<<<<<<<< * elif t == NPY_FLOAT: f[0] = 102 #"f" * elif t == NPY_DOUBLE: f[0] = 100 #"d" */ __pyx_t_5 = PyInt_FromLong(NPY_ULONGLONG); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 81; goto __pyx_L11; } /* "numpy.pxd":832 * elif t == NPY_LONGLONG: f[0] = 113 #"q" * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" * elif t == NPY_FLOAT: f[0] = 102 #"f" # <<<<<<<<<<<<<< * elif t == NPY_DOUBLE: f[0] = 100 #"d" * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" */ __pyx_t_3 = PyInt_FromLong(NPY_FLOAT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 102; goto __pyx_L11; } /* "numpy.pxd":833 * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" * elif t == NPY_FLOAT: f[0] = 102 #"f" * elif t == NPY_DOUBLE: f[0] = 100 #"d" # <<<<<<<<<<<<<< * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf */ __pyx_t_5 = PyInt_FromLong(NPY_DOUBLE); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 100; goto __pyx_L11; } /* "numpy.pxd":834 * elif t == NPY_FLOAT: f[0] = 102 #"f" * elif t == NPY_DOUBLE: f[0] = 100 #"d" * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" # <<<<<<<<<<<<<< * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd */ __pyx_t_3 = PyInt_FromLong(NPY_LONGDOUBLE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 103; goto __pyx_L11; } /* "numpy.pxd":835 * elif t == NPY_DOUBLE: f[0] = 100 #"d" * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf # <<<<<<<<<<<<<< * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg */ __pyx_t_5 = PyInt_FromLong(NPY_CFLOAT); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 90; (__pyx_v_f[1]) = 102; __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L11; } /* "numpy.pxd":836 * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd # <<<<<<<<<<<<<< * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg * elif t == NPY_OBJECT: f[0] = 79 #"O" */ __pyx_t_3 = PyInt_FromLong(NPY_CDOUBLE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 90; (__pyx_v_f[1]) = 100; __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L11; } /* "numpy.pxd":837 * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg # <<<<<<<<<<<<<< * elif t == NPY_OBJECT: f[0] = 79 #"O" * else: */ __pyx_t_5 = PyInt_FromLong(NPY_CLONGDOUBLE); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 90; (__pyx_v_f[1]) = 103; __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L11; } /* "numpy.pxd":838 * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg * elif t == NPY_OBJECT: f[0] = 79 #"O" # <<<<<<<<<<<<<< * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) */ __pyx_t_3 = PyInt_FromLong(NPY_OBJECT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 79; goto __pyx_L11; } /*else*/ { /* "numpy.pxd":840 * elif t == NPY_OBJECT: f[0] = 79 #"O" * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<< * f += 1 * else: */ __pyx_t_5 = PyNumber_Remainder(((PyObject *)__pyx_kp_u_7), __pyx_v_t); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_5)); __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_3)); PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_t_5)); __Pyx_GIVEREF(((PyObject *)__pyx_t_5)); __pyx_t_5 = 0; __pyx_t_5 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0; __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_L11:; /* "numpy.pxd":841 * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) * f += 1 # <<<<<<<<<<<<<< * else: * # Cython ignores struct boundary information ("T{...}"), */ __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L9; } /*else*/ { /* "numpy.pxd":845 * # Cython ignores struct boundary information ("T{...}"), * # so don't output it * f = _util_dtypestring(child, f, end, offset) # <<<<<<<<<<<<<< * return f * */ __pyx_t_11 = __pyx_f_5numpy__util_dtypestring(__pyx_v_child, __pyx_v_f, __pyx_v_end, __pyx_v_offset); if (unlikely(__pyx_t_11 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 845; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_f = __pyx_t_11; } __pyx_L9:; } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "numpy.pxd":846 * # so don't output it * f = _util_dtypestring(child, f, end, offset) * return f # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_f; goto __pyx_L0; __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("numpy._util_dtypestring", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_child); __Pyx_XDECREF(__pyx_v_fields); __Pyx_XDECREF(__pyx_v_childname); __Pyx_XDECREF(__pyx_v_new_offset); __Pyx_XDECREF(__pyx_v_t); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "numpy.pxd":961 * * * cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<< * cdef PyObject* baseptr * if base is None: */ static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *__pyx_v_arr, PyObject *__pyx_v_base) { PyObject *__pyx_v_baseptr; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("set_array_base"); /* "numpy.pxd":963 * cdef inline void set_array_base(ndarray arr, object base): * cdef PyObject* baseptr * if base is None: # <<<<<<<<<<<<<< * baseptr = NULL * else: */ __pyx_t_1 = (__pyx_v_base == Py_None); if (__pyx_t_1) { /* "numpy.pxd":964 * cdef PyObject* baseptr * if base is None: * baseptr = NULL # <<<<<<<<<<<<<< * else: * Py_INCREF(base) # important to do this before decref below! */ __pyx_v_baseptr = NULL; goto __pyx_L3; } /*else*/ { /* "numpy.pxd":966 * baseptr = NULL * else: * Py_INCREF(base) # important to do this before decref below! # <<<<<<<<<<<<<< * baseptr = <PyObject*>base * Py_XDECREF(arr.base) */ Py_INCREF(__pyx_v_base); /* "numpy.pxd":967 * else: * Py_INCREF(base) # important to do this before decref below! * baseptr = <PyObject*>base # <<<<<<<<<<<<<< * Py_XDECREF(arr.base) * arr.base = baseptr */ __pyx_v_baseptr = ((PyObject *)__pyx_v_base); } __pyx_L3:; /* "numpy.pxd":968 * Py_INCREF(base) # important to do this before decref below! * baseptr = <PyObject*>base * Py_XDECREF(arr.base) # <<<<<<<<<<<<<< * arr.base = baseptr * */ Py_XDECREF(__pyx_v_arr->base); /* "numpy.pxd":969 * baseptr = <PyObject*>base * Py_XDECREF(arr.base) * arr.base = baseptr # <<<<<<<<<<<<<< * * cdef inline object get_array_base(ndarray arr): */ __pyx_v_arr->base = __pyx_v_baseptr; __Pyx_RefNannyFinishContext(); } /* "numpy.pxd":971 * arr.base = baseptr * * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< * if arr.base is NULL: * return None */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_get_array_base(PyArrayObject *__pyx_v_arr) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("get_array_base"); /* "numpy.pxd":972 * * cdef inline object get_array_base(ndarray arr): * if arr.base is NULL: # <<<<<<<<<<<<<< * return None * else: */ __pyx_t_1 = (__pyx_v_arr->base == NULL); if (__pyx_t_1) { /* "numpy.pxd":973 * cdef inline object get_array_base(ndarray arr): * if arr.base is NULL: * return None # <<<<<<<<<<<<<< * else: * return <object>arr.base */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(Py_None); __pyx_r = Py_None; goto __pyx_L0; goto __pyx_L3; } /*else*/ { /* "numpy.pxd":975 * return None * else: * return <object>arr.base # <<<<<<<<<<<<<< */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_arr->base)); __pyx_r = ((PyObject *)__pyx_v_arr->base); goto __pyx_L0; } __pyx_L3:; __pyx_r = Py_None; __Pyx_INCREF(Py_None); __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyMethodDef __pyx_methods[] = { {0, 0, 0, 0} }; #if PY_MAJOR_VERSION >= 3 static struct PyModuleDef __pyx_moduledef = { PyModuleDef_HEAD_INIT, __Pyx_NAMESTR("cwave__omp"), 0, /* m_doc */ -1, /* m_size */ __pyx_methods /* m_methods */, NULL, /* m_reload */ NULL, /* m_traverse */ NULL, /* m_clear */ NULL /* m_free */ }; #endif static __Pyx_StringTabEntry __pyx_string_tab[] = { {&__pyx_kp_u_1, __pyx_k_1, sizeof(__pyx_k_1), 0, 1, 0, 0}, {&__pyx_kp_u_11, __pyx_k_11, sizeof(__pyx_k_11), 0, 1, 0, 0}, {&__pyx_kp_u_3, __pyx_k_3, sizeof(__pyx_k_3), 0, 1, 0, 0}, {&__pyx_kp_u_5, __pyx_k_5, sizeof(__pyx_k_5), 0, 1, 0, 0}, {&__pyx_kp_u_7, __pyx_k_7, sizeof(__pyx_k_7), 0, 1, 0, 0}, {&__pyx_kp_u_8, __pyx_k_8, sizeof(__pyx_k_8), 0, 1, 0, 0}, {&__pyx_n_s__DTYPE, __pyx_k__DTYPE, sizeof(__pyx_k__DTYPE), 0, 0, 1, 1}, {&__pyx_n_s__RuntimeError, __pyx_k__RuntimeError, sizeof(__pyx_k__RuntimeError), 0, 0, 1, 1}, {&__pyx_n_s__ValueError, __pyx_k__ValueError, sizeof(__pyx_k__ValueError), 0, 0, 1, 1}, {&__pyx_n_s____main__, __pyx_k____main__, sizeof(__pyx_k____main__), 0, 0, 1, 1}, {&__pyx_n_s____test__, __pyx_k____test__, sizeof(__pyx_k____test__), 0, 0, 1, 1}, {&__pyx_n_s__calculate_u, __pyx_k__calculate_u, sizeof(__pyx_k__calculate_u), 0, 0, 1, 1}, {&__pyx_n_s__cwave__omp, __pyx_k__cwave__omp, sizeof(__pyx_k__cwave__omp), 0, 0, 1, 1}, {&__pyx_n_s__double, __pyx_k__double, sizeof(__pyx_k__double), 0, 0, 1, 1}, {&__pyx_n_s__dt, __pyx_k__dt, sizeof(__pyx_k__dt), 0, 0, 1, 1}, {&__pyx_n_s__dx, __pyx_k__dx, sizeof(__pyx_k__dx), 0, 0, 1, 1}, {&__pyx_n_s__dy, __pyx_k__dy, sizeof(__pyx_k__dy), 0, 0, 1, 1}, {&__pyx_n_s__k, __pyx_k__k, sizeof(__pyx_k__k), 0, 0, 1, 1}, {&__pyx_n_s__np, __pyx_k__np, sizeof(__pyx_k__np), 0, 0, 1, 1}, {&__pyx_n_s__numpy, __pyx_k__numpy, sizeof(__pyx_k__numpy), 0, 0, 1, 1}, {&__pyx_n_s__range, __pyx_k__range, sizeof(__pyx_k__range), 0, 0, 1, 1}, {&__pyx_n_s__t, __pyx_k__t, sizeof(__pyx_k__t), 0, 0, 1, 1}, {&__pyx_n_s__t_stop, __pyx_k__t_stop, sizeof(__pyx_k__t_stop), 0, 0, 1, 1}, {&__pyx_n_s__u, __pyx_k__u, sizeof(__pyx_k__u), 0, 0, 1, 1}, {&__pyx_n_s__um, __pyx_k__um, sizeof(__pyx_k__um), 0, 0, 1, 1}, {&__pyx_n_s__wave, __pyx_k__wave, sizeof(__pyx_k__wave), 0, 0, 1, 1}, {&__pyx_n_s__xrange, __pyx_k__xrange, sizeof(__pyx_k__xrange), 0, 0, 1, 1}, {&__pyx_n_s__zeros, __pyx_k__zeros, sizeof(__pyx_k__zeros), 0, 0, 1, 1}, {0, 0, 0, 0, 0, 0, 0} }; static int __Pyx_InitCachedBuiltins(void) { #if PY_MAJOR_VERSION >= 3 __pyx_builtin_xrange = __Pyx_GetName(__pyx_b, __pyx_n_s__range); if (!__pyx_builtin_xrange) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 25; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #else __pyx_builtin_xrange = __Pyx_GetName(__pyx_b, __pyx_n_s__xrange); if (!__pyx_builtin_xrange) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 25; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif __pyx_builtin_ValueError = __Pyx_GetName(__pyx_b, __pyx_n_s__ValueError); if (!__pyx_builtin_ValueError) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 211; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_builtin_range = __Pyx_GetName(__pyx_b, __pyx_n_s__range); if (!__pyx_builtin_range) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 224; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_builtin_RuntimeError = __Pyx_GetName(__pyx_b, __pyx_n_s__RuntimeError); if (!__pyx_builtin_RuntimeError) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;} return 0; __pyx_L1_error:; return -1; } static int __Pyx_InitCachedConstants(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants"); /* "numpy.pxd":211 * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<< * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) */ __pyx_k_tuple_2 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 211; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_2)); __Pyx_INCREF(((PyObject *)__pyx_kp_u_1)); PyTuple_SET_ITEM(__pyx_k_tuple_2, 0, ((PyObject *)__pyx_kp_u_1)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_1)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_2)); /* "numpy.pxd":215 * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<< * * info.buf = PyArray_DATA(self) */ __pyx_k_tuple_4 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_4)); __Pyx_INCREF(((PyObject *)__pyx_kp_u_3)); PyTuple_SET_ITEM(__pyx_k_tuple_4, 0, ((PyObject *)__pyx_kp_u_3)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_3)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_4)); /* "numpy.pxd":253 * if ((descr.byteorder == '>' and little_endian) or * (descr.byteorder == '<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" */ __pyx_k_tuple_6 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_6)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 253; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_6)); __Pyx_INCREF(((PyObject *)__pyx_kp_u_5)); PyTuple_SET_ITEM(__pyx_k_tuple_6, 0, ((PyObject *)__pyx_kp_u_5)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_5)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_6)); /* "numpy.pxd":795 * * if (end - f) - (new_offset - offset[0]) < 15: * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<< * * if ((child.byteorder == '>' and little_endian) or */ __pyx_k_tuple_9 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_9)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_9)); __Pyx_INCREF(((PyObject *)__pyx_kp_u_8)); PyTuple_SET_ITEM(__pyx_k_tuple_9, 0, ((PyObject *)__pyx_kp_u_8)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_8)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_9)); /* "numpy.pxd":799 * if ((child.byteorder == '>' and little_endian) or * (child.byteorder == '<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * # One could encode it in the format string and have Cython * # complain instead, BUT: < and > in format strings also imply */ __pyx_k_tuple_10 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_10)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_10)); __Pyx_INCREF(((PyObject *)__pyx_kp_u_5)); PyTuple_SET_ITEM(__pyx_k_tuple_10, 0, ((PyObject *)__pyx_kp_u_5)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_5)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_10)); /* "numpy.pxd":819 * t = child.type_num * if end - f < 5: * raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<< * * # Until ticket #99 is fixed, use integers to avoid warnings */ __pyx_k_tuple_12 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_12)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 819; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_k_tuple_12)); __Pyx_INCREF(((PyObject *)__pyx_kp_u_11)); PyTuple_SET_ITEM(__pyx_k_tuple_12, 0, ((PyObject *)__pyx_kp_u_11)); __Pyx_GIVEREF(((PyObject *)__pyx_kp_u_11)); __Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_12)); __Pyx_RefNannyFinishContext(); return 0; __pyx_L1_error:; __Pyx_RefNannyFinishContext(); return -1; } static int __Pyx_InitGlobals(void) { /* Initialize NaN. The sign is irrelevant, an exponent with all bits 1 and a nonzero mantissa means NaN. If the first bit in the mantissa is 1, it is a quiet NaN. */ memset(&__PYX_NAN, 0xFF, sizeof(__PYX_NAN)); PyEval_InitThreads(); if (__Pyx_InitStrings(__pyx_string_tab) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; __pyx_int_15 = PyInt_FromLong(15); if (unlikely(!__pyx_int_15)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; return 0; __pyx_L1_error:; return -1; } #if PY_MAJOR_VERSION < 3 PyMODINIT_FUNC initcwave__omp(void); /*proto*/ PyMODINIT_FUNC initcwave__omp(void) #else PyMODINIT_FUNC PyInit_cwave__omp(void); /*proto*/ PyMODINIT_FUNC PyInit_cwave__omp(void) #endif { PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; __Pyx_RefNannyDeclarations #if CYTHON_REFNANNY __Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny"); if (!__Pyx_RefNanny) { PyErr_Clear(); __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny"); if (!__Pyx_RefNanny) Py_FatalError("failed to import 'refnanny' module"); } #endif __Pyx_RefNannySetupContext("PyMODINIT_FUNC PyInit_cwave__omp(void)"); if ( __Pyx_check_binary_version() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #ifdef __pyx_binding_PyCFunctionType_USED if (__pyx_binding_PyCFunctionType_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif /*--- Library function declarations ---*/ /*--- Threads initialization code ---*/ #if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS #ifdef WITH_THREAD /* Python build with threading support? */ PyEval_InitThreads(); #endif #endif /*--- Module creation code ---*/ #if PY_MAJOR_VERSION < 3 __pyx_m = Py_InitModule4(__Pyx_NAMESTR("cwave__omp"), __pyx_methods, 0, 0, PYTHON_API_VERSION); #else __pyx_m = PyModule_Create(&__pyx_moduledef); #endif if (!__pyx_m) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; #if PY_MAJOR_VERSION < 3 Py_INCREF(__pyx_m); #endif __pyx_b = PyImport_AddModule(__Pyx_NAMESTR(__Pyx_BUILTIN_MODULE_NAME)); if (!__pyx_b) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; if (__Pyx_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; /*--- Initialize various global constants etc. ---*/ if (unlikely(__Pyx_InitGlobals() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (__pyx_module_is_main_cwave__omp) { if (__Pyx_SetAttrString(__pyx_m, "__name__", __pyx_n_s____main__) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; } /*--- Builtin init code ---*/ if (unlikely(__Pyx_InitCachedBuiltins() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /*--- Constants init code ---*/ if (unlikely(__Pyx_InitCachedConstants() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /*--- Global init code ---*/ /*--- Variable export code ---*/ /*--- Function export code ---*/ /*--- Type init code ---*/ /*--- Type import code ---*/ __pyx_ptype_5numpy_dtype = __Pyx_ImportType("numpy", "dtype", sizeof(PyArray_Descr), 0); if (unlikely(!__pyx_ptype_5numpy_dtype)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 151; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5numpy_flatiter = __Pyx_ImportType("numpy", "flatiter", sizeof(PyArrayIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_flatiter)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 161; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5numpy_broadcast = __Pyx_ImportType("numpy", "broadcast", sizeof(PyArrayMultiIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_broadcast)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 165; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5numpy_ndarray = __Pyx_ImportType("numpy", "ndarray", sizeof(PyArrayObject), 0); if (unlikely(!__pyx_ptype_5numpy_ndarray)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 174; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5numpy_ufunc = __Pyx_ImportType("numpy", "ufunc", sizeof(PyUFuncObject), 0); if (unlikely(!__pyx_ptype_5numpy_ufunc)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 857; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /*--- Variable import code ---*/ /*--- Function import code ---*/ /*--- Execution code ---*/ /* "wave_para.pyx":1 * import numpy as np # <<<<<<<<<<<<<< * cimport numpy as np * cimport cython */ __pyx_t_1 = __Pyx_Import(((PyObject *)__pyx_n_s__numpy), 0, -1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (PyObject_SetAttr(__pyx_m, __pyx_n_s__np, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "wave_para.pyx":6 * from cython.parallel import parallel, prange * * DTYPE = np.double # <<<<<<<<<<<<<< * ctypedef np.double_t DTYPE_t * @cython.boundscheck(False) */ __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 6; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__double); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 6; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (PyObject_SetAttr(__pyx_m, __pyx_n_s__DTYPE, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 6; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "wave_para.pyx":9 * ctypedef np.double_t DTYPE_t * @cython.boundscheck(False) * def wave(double t, double t_stop, double dt, double dx, double dy, np.ndarray[DTYPE_t, ndim=2, negative_indices=False] u, np.ndarray[DTYPE_t, ndim=2, negative_indices=False] um, np.ndarray[DTYPE_t, ndim=2, negative_indices=False] k): # <<<<<<<<<<<<<< * cdef np.ndarray[DTYPE_t, ndim=2, negative_indices=False] new_u * while t <= t_stop: */ __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_10cwave__omp_wave, NULL, __pyx_n_s__cwave__omp); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 9; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyObject_SetAttr(__pyx_m, __pyx_n_s__wave, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 9; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "wave_para.pyx":20 * @cython.boundscheck(False) * @cython.cdivision(True) * def calculate_u(double dt, double dx, double dy, np.ndarray[DTYPE_t, ndim=2, negative_indices=False] u, np.ndarray[DTYPE_t, ndim=2, negative_indices=False] um, np.ndarray[DTYPE_t, ndim=2, negative_indices=False] k): # <<<<<<<<<<<<<< * cdef np.ndarray[DTYPE_t, ndim=2, negative_indices=False] up = np.zeros((u.shape[0], u.shape[1])) * cdef int i,j */ __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_10cwave__omp_1calculate_u, NULL, __pyx_n_s__cwave__omp); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 20; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyObject_SetAttr(__pyx_m, __pyx_n_s__calculate_u, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 20; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "wave_para.pyx":1 * import numpy as np # <<<<<<<<<<<<<< * cimport numpy as np * cimport cython */ __pyx_t_2 = PyDict_New(); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(((PyObject *)__pyx_t_2)); if (PyObject_SetAttr(__pyx_m, __pyx_n_s____test__, ((PyObject *)__pyx_t_2)) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0; /* "numpy.pxd":971 * arr.base = baseptr * * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< * if arr.base is NULL: * return None */ goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); if (__pyx_m) { __Pyx_AddTraceback("init cwave__omp", __pyx_clineno, __pyx_lineno, __pyx_filename); Py_DECREF(__pyx_m); __pyx_m = 0; } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_ImportError, "init cwave__omp"); } __pyx_L0:; __Pyx_RefNannyFinishContext(); #if PY_MAJOR_VERSION < 3 return; #else return __pyx_m; #endif } /* Runtime support code */ #if CYTHON_REFNANNY static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) { PyObject *m = NULL, *p = NULL; void *r = NULL; m = PyImport_ImportModule((char *)modname); if (!m) goto end; p = PyObject_GetAttrString(m, (char *)"RefNannyAPI"); if (!p) goto end; r = PyLong_AsVoidPtr(p); end: Py_XDECREF(p); Py_XDECREF(m); return (__Pyx_RefNannyAPIStruct *)r; } #endif /* CYTHON_REFNANNY */ static PyObject *__Pyx_GetName(PyObject *dict, PyObject *name) { PyObject *result; result = PyObject_GetAttr(dict, name); if (!result) { if (dict != __pyx_b) { PyErr_Clear(); result = PyObject_GetAttr(__pyx_b, name); } if (!result) { PyErr_SetObject(PyExc_NameError, name); } } return result; } static void __Pyx_RaiseArgtupleInvalid( const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found) { Py_ssize_t num_expected; const char *more_or_less; if (num_found < num_min) { num_expected = num_min; more_or_less = "at least"; } else { num_expected = num_max; more_or_less = "at most"; } if (exact) { more_or_less = "exactly"; } PyErr_Format(PyExc_TypeError, "%s() takes %s %"PY_FORMAT_SIZE_T"d positional argument%s (%"PY_FORMAT_SIZE_T"d given)", func_name, more_or_less, num_expected, (num_expected == 1) ? "" : "s", num_found); } static void __Pyx_RaiseDoubleKeywordsError( const char* func_name, PyObject* kw_name) { PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION >= 3 "%s() got multiple values for keyword argument '%U'", func_name, kw_name); #else "%s() got multiple values for keyword argument '%s'", func_name, PyString_AS_STRING(kw_name)); #endif } static int __Pyx_ParseOptionalKeywords( PyObject *kwds, PyObject **argnames[], PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, const char* function_name) { PyObject *key = 0, *value = 0; Py_ssize_t pos = 0; PyObject*** name; PyObject*** first_kw_arg = argnames + num_pos_args; while (PyDict_Next(kwds, &pos, &key, &value)) { name = first_kw_arg; while (*name && (**name != key)) name++; if (*name) { values[name-argnames] = value; } else { #if PY_MAJOR_VERSION < 3 if (unlikely(!PyString_CheckExact(key)) && unlikely(!PyString_Check(key))) { #else if (unlikely(!PyUnicode_CheckExact(key)) && unlikely(!PyUnicode_Check(key))) { #endif goto invalid_keyword_type; } else { for (name = first_kw_arg; *name; name++) { #if PY_MAJOR_VERSION >= 3 if (PyUnicode_GET_SIZE(**name) == PyUnicode_GET_SIZE(key) && PyUnicode_Compare(**name, key) == 0) break; #else if (PyString_GET_SIZE(**name) == PyString_GET_SIZE(key) && _PyString_Eq(**name, key)) break; #endif } if (*name) { values[name-argnames] = value; } else { /* unexpected keyword found */ for (name=argnames; name != first_kw_arg; name++) { if (**name == key) goto arg_passed_twice; #if PY_MAJOR_VERSION >= 3 if (PyUnicode_GET_SIZE(**name) == PyUnicode_GET_SIZE(key) && PyUnicode_Compare(**name, key) == 0) goto arg_passed_twice; #else if (PyString_GET_SIZE(**name) == PyString_GET_SIZE(key) && _PyString_Eq(**name, key)) goto arg_passed_twice; #endif } if (kwds2) { if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad; } else { goto invalid_keyword; } } } } } return 0; arg_passed_twice: __Pyx_RaiseDoubleKeywordsError(function_name, **name); goto bad; invalid_keyword_type: PyErr_Format(PyExc_TypeError, "%s() keywords must be strings", function_name); goto bad; invalid_keyword: PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION < 3 "%s() got an unexpected keyword argument '%s'", function_name, PyString_AsString(key)); #else "%s() got an unexpected keyword argument '%U'", function_name, key); #endif bad: return -1; } static int __Pyx_ArgTypeTest(PyObject *obj, PyTypeObject *type, int none_allowed, const char *name, int exact) { if (!type) { PyErr_Format(PyExc_SystemError, "Missing type object"); return 0; } if (none_allowed && obj == Py_None) return 1; else if (exact) { if (Py_TYPE(obj) == type) return 1; } else { if (PyObject_TypeCheck(obj, type)) return 1; } PyErr_Format(PyExc_TypeError, "Argument '%s' has incorrect type (expected %s, got %s)", name, type->tp_name, Py_TYPE(obj)->tp_name); return 0; } static CYTHON_INLINE int __Pyx_IsLittleEndian(void) { unsigned int n = 1; return *(unsigned char*)(&n) != 0; } typedef struct { __Pyx_StructField root; __Pyx_BufFmt_StackElem* head; size_t fmt_offset; size_t new_count, enc_count; int is_complex; char enc_type; char new_packmode; char enc_packmode; } __Pyx_BufFmt_Context; static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, __Pyx_BufFmt_StackElem* stack, __Pyx_TypeInfo* type) { stack[0].field = &ctx->root; stack[0].parent_offset = 0; ctx->root.type = type; ctx->root.name = "buffer dtype"; ctx->root.offset = 0; ctx->head = stack; ctx->head->field = &ctx->root; ctx->fmt_offset = 0; ctx->head->parent_offset = 0; ctx->new_packmode = '@'; ctx->enc_packmode = '@'; ctx->new_count = 1; ctx->enc_count = 0; ctx->enc_type = 0; ctx->is_complex = 0; while (type->typegroup == 'S') { ++ctx->head; ctx->head->field = type->fields; ctx->head->parent_offset = 0; type = type->fields->type; } } static int __Pyx_BufFmt_ParseNumber(const char** ts) { int count; const char* t = *ts; if (*t < '0' || *t > '9') { return -1; } else { count = *t++ - '0'; while (*t >= '0' && *t < '9') { count *= 10; count += *t++ - '0'; } } *ts = t; return count; } static void __Pyx_BufFmt_RaiseUnexpectedChar(char ch) { PyErr_Format(PyExc_ValueError, "Unexpected format string character: '%c'", ch); } static const char* __Pyx_BufFmt_DescribeTypeChar(char ch, int is_complex) { switch (ch) { case 'b': return "'char'"; case 'B': return "'unsigned char'"; case 'h': return "'short'"; case 'H': return "'unsigned short'"; case 'i': return "'int'"; case 'I': return "'unsigned int'"; case 'l': return "'long'"; case 'L': return "'unsigned long'"; case 'q': return "'long long'"; case 'Q': return "'unsigned long long'"; case 'f': return (is_complex ? "'complex float'" : "'float'"); case 'd': return (is_complex ? "'complex double'" : "'double'"); case 'g': return (is_complex ? "'complex long double'" : "'long double'"); case 'T': return "a struct"; case 'O': return "Python object"; case 'P': return "a pointer"; case 0: return "end"; default: return "unparseable format string"; } } static size_t __Pyx_BufFmt_TypeCharToStandardSize(char ch, int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': return 1; case 'h': case 'H': return 2; case 'i': case 'I': case 'l': case 'L': return 4; case 'q': case 'Q': return 8; case 'f': return (is_complex ? 8 : 4); case 'd': return (is_complex ? 16 : 8); case 'g': { PyErr_SetString(PyExc_ValueError, "Python does not define a standard format string size for long double ('g').."); return 0; } case 'O': case 'P': return sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } static size_t __Pyx_BufFmt_TypeCharToNativeSize(char ch, int is_complex) { switch (ch) { case 'c': case 'b': case 'B': return 1; case 'h': case 'H': return sizeof(short); case 'i': case 'I': return sizeof(int); case 'l': case 'L': return sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(float) * (is_complex ? 2 : 1); case 'd': return sizeof(double) * (is_complex ? 2 : 1); case 'g': return sizeof(long double) * (is_complex ? 2 : 1); case 'O': case 'P': return sizeof(void*); default: { __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } } typedef struct { char c; short x; } __Pyx_st_short; typedef struct { char c; int x; } __Pyx_st_int; typedef struct { char c; long x; } __Pyx_st_long; typedef struct { char c; float x; } __Pyx_st_float; typedef struct { char c; double x; } __Pyx_st_double; typedef struct { char c; long double x; } __Pyx_st_longdouble; typedef struct { char c; void *x; } __Pyx_st_void_p; #ifdef HAVE_LONG_LONG typedef struct { char c; PY_LONG_LONG x; } __Pyx_st_longlong; #endif static size_t __Pyx_BufFmt_TypeCharToAlignment(char ch, int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': return 1; case 'h': case 'H': return sizeof(__Pyx_st_short) - sizeof(short); case 'i': case 'I': return sizeof(__Pyx_st_int) - sizeof(int); case 'l': case 'L': return sizeof(__Pyx_st_long) - sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(__Pyx_st_longlong) - sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(__Pyx_st_float) - sizeof(float); case 'd': return sizeof(__Pyx_st_double) - sizeof(double); case 'g': return sizeof(__Pyx_st_longdouble) - sizeof(long double); case 'P': case 'O': return sizeof(__Pyx_st_void_p) - sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } static char __Pyx_BufFmt_TypeCharToGroup(char ch, int is_complex) { switch (ch) { case 'c': case 'b': case 'h': case 'i': case 'l': case 'q': return 'I'; case 'B': case 'H': case 'I': case 'L': case 'Q': return 'U'; case 'f': case 'd': case 'g': return (is_complex ? 'C' : 'R'); case 'O': return 'O'; case 'P': return 'P'; default: { __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } } static void __Pyx_BufFmt_RaiseExpected(__Pyx_BufFmt_Context* ctx) { if (ctx->head == NULL || ctx->head->field == &ctx->root) { const char* expected; const char* quote; if (ctx->head == NULL) { expected = "end"; quote = ""; } else { expected = ctx->head->field->type->name; quote = "'"; } PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch, expected %s%s%s but got %s", quote, expected, quote, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex)); } else { __Pyx_StructField* field = ctx->head->field; __Pyx_StructField* parent = (ctx->head - 1)->field; PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch, expected '%s' but got %s in '%s.%s'", field->type->name, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex), parent->type->name, field->name); } } static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) { char group; size_t size, offset; if (ctx->enc_type == 0) return 0; group = __Pyx_BufFmt_TypeCharToGroup(ctx->enc_type, ctx->is_complex); do { __Pyx_StructField* field = ctx->head->field; __Pyx_TypeInfo* type = field->type; if (ctx->enc_packmode == '@' || ctx->enc_packmode == '^') { size = __Pyx_BufFmt_TypeCharToNativeSize(ctx->enc_type, ctx->is_complex); } else { size = __Pyx_BufFmt_TypeCharToStandardSize(ctx->enc_type, ctx->is_complex); } if (ctx->enc_packmode == '@') { size_t align_at = __Pyx_BufFmt_TypeCharToAlignment(ctx->enc_type, ctx->is_complex); size_t align_mod_offset; if (align_at == 0) return -1; align_mod_offset = ctx->fmt_offset % align_at; if (align_mod_offset > 0) ctx->fmt_offset += align_at - align_mod_offset; } if (type->size != size || type->typegroup != group) { if (type->typegroup == 'C' && type->fields != NULL) { /* special case -- treat as struct rather than complex number */ size_t parent_offset = ctx->head->parent_offset + field->offset; ++ctx->head; ctx->head->field = type->fields; ctx->head->parent_offset = parent_offset; continue; } __Pyx_BufFmt_RaiseExpected(ctx); return -1; } offset = ctx->head->parent_offset + field->offset; if (ctx->fmt_offset != offset) { PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch; next field is at offset %"PY_FORMAT_SIZE_T"d but %"PY_FORMAT_SIZE_T"d expected", (Py_ssize_t)ctx->fmt_offset, (Py_ssize_t)offset); return -1; } ctx->fmt_offset += size; --ctx->enc_count; /* Consume from buffer string */ /* Done checking, move to next field, pushing or popping struct stack if needed */ while (1) { if (field == &ctx->root) { ctx->head = NULL; if (ctx->enc_count != 0) { __Pyx_BufFmt_RaiseExpected(ctx); return -1; } break; /* breaks both loops as ctx->enc_count == 0 */ } ctx->head->field = ++field; if (field->type == NULL) { --ctx->head; field = ctx->head->field; continue; } else if (field->type->typegroup == 'S') { size_t parent_offset = ctx->head->parent_offset + field->offset; if (field->type->fields->type == NULL) continue; /* empty struct */ field = field->type->fields; ++ctx->head; ctx->head->field = field; ctx->head->parent_offset = parent_offset; break; } else { break; } } } while (ctx->enc_count); ctx->enc_type = 0; ctx->is_complex = 0; return 0; } static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts) { int got_Z = 0; while (1) { switch(*ts) { case 0: if (ctx->enc_type != 0 && ctx->head == NULL) { __Pyx_BufFmt_RaiseExpected(ctx); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; if (ctx->head != NULL) { __Pyx_BufFmt_RaiseExpected(ctx); return NULL; } return ts; case ' ': case 10: case 13: ++ts; break; case '<': if (!__Pyx_IsLittleEndian()) { PyErr_SetString(PyExc_ValueError, "Little-endian buffer not supported on big-endian compiler"); return NULL; } ctx->new_packmode = '='; ++ts; break; case '>': case '!': if (__Pyx_IsLittleEndian()) { PyErr_SetString(PyExc_ValueError, "Big-endian buffer not supported on little-endian compiler"); return NULL; } ctx->new_packmode = '='; ++ts; break; case '=': case '@': case '^': ctx->new_packmode = *ts++; break; case 'T': /* substruct */ { const char* ts_after_sub; size_t i, struct_count = ctx->new_count; ctx->new_count = 1; ++ts; if (*ts != '{') { PyErr_SetString(PyExc_ValueError, "Buffer acquisition: Expected '{' after 'T'"); return NULL; } ++ts; ts_after_sub = ts; for (i = 0; i != struct_count; ++i) { ts_after_sub = __Pyx_BufFmt_CheckString(ctx, ts); if (!ts_after_sub) return NULL; } ts = ts_after_sub; } break; case '}': /* end of substruct; either repeat or move on */ ++ts; return ts; case 'x': if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->fmt_offset += ctx->new_count; ctx->new_count = 1; ctx->enc_count = 0; ctx->enc_type = 0; ctx->enc_packmode = ctx->new_packmode; ++ts; break; case 'Z': got_Z = 1; ++ts; if (*ts != 'f' && *ts != 'd' && *ts != 'g') { __Pyx_BufFmt_RaiseUnexpectedChar('Z'); return NULL; } /* fall through */ case 'c': case 'b': case 'B': case 'h': case 'H': case 'i': case 'I': case 'l': case 'L': case 'q': case 'Q': case 'f': case 'd': case 'g': case 'O': if (ctx->enc_type == *ts && got_Z == ctx->is_complex && ctx->enc_packmode == ctx->new_packmode) { /* Continue pooling same type */ ctx->enc_count += ctx->new_count; } else { /* New type */ if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_count = ctx->new_count; ctx->enc_packmode = ctx->new_packmode; ctx->enc_type = *ts; ctx->is_complex = got_Z; } ++ts; ctx->new_count = 1; got_Z = 0; break; case ':': ++ts; while(*ts != ':') ++ts; ++ts; break; default: { int number = __Pyx_BufFmt_ParseNumber(&ts); if (number == -1) { /* First char was not a digit */ PyErr_Format(PyExc_ValueError, "Does not understand character buffer dtype format string ('%c')", *ts); return NULL; } ctx->new_count = (size_t)number; } } } } static CYTHON_INLINE void __Pyx_ZeroBuffer(Py_buffer* buf) { buf->buf = NULL; buf->obj = NULL; buf->strides = __Pyx_zeros; buf->shape = __Pyx_zeros; buf->suboffsets = __Pyx_minusones; } static CYTHON_INLINE int __Pyx_GetBufferAndValidate(Py_buffer* buf, PyObject* obj, __Pyx_TypeInfo* dtype, int flags, int nd, int cast, __Pyx_BufFmt_StackElem* stack) { if (obj == Py_None || obj == NULL) { __Pyx_ZeroBuffer(buf); return 0; } buf->buf = NULL; if (__Pyx_GetBuffer(obj, buf, flags) == -1) goto fail; if (buf->ndim != nd) { PyErr_Format(PyExc_ValueError, "Buffer has wrong number of dimensions (expected %d, got %d)", nd, buf->ndim); goto fail; } if (!cast) { __Pyx_BufFmt_Context ctx; __Pyx_BufFmt_Init(&ctx, stack, dtype); if (!__Pyx_BufFmt_CheckString(&ctx, buf->format)) goto fail; } if ((unsigned)buf->itemsize != dtype->size) { PyErr_Format(PyExc_ValueError, "Item size of buffer (%"PY_FORMAT_SIZE_T"d byte%s) does not match size of '%s' (%"PY_FORMAT_SIZE_T"d byte%s)", buf->itemsize, (buf->itemsize > 1) ? "s" : "", dtype->name, (Py_ssize_t)dtype->size, (dtype->size > 1) ? "s" : ""); goto fail; } if (buf->suboffsets == NULL) buf->suboffsets = __Pyx_minusones; return 0; fail:; __Pyx_ZeroBuffer(buf); return -1; } static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info) { if (info->buf == NULL) return; if (info->suboffsets == __Pyx_minusones) info->suboffsets = NULL; __Pyx_ReleaseBuffer(info); } static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) { if (unlikely(!type)) { PyErr_Format(PyExc_SystemError, "Missing type object"); return 0; } if (likely(PyObject_TypeCheck(obj, type))) return 1; PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s", Py_TYPE(obj)->tp_name, type->tp_name); return 0; } static void __Pyx_RaiseBufferFallbackError(void) { PyErr_Format(PyExc_ValueError, "Buffer acquisition failed on assignment; and then reacquiring the old buffer failed too!"); } static CYTHON_INLINE void __Pyx_ErrRestore(PyObject *type, PyObject *value, PyObject *tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; PyThreadState *tstate = PyThreadState_GET(); tmp_type = tstate->curexc_type; tmp_value = tstate->curexc_value; tmp_tb = tstate->curexc_traceback; tstate->curexc_type = type; tstate->curexc_value = value; tstate->curexc_traceback = tb; Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); } static CYTHON_INLINE void __Pyx_ErrFetch(PyObject **type, PyObject **value, PyObject **tb) { PyThreadState *tstate = PyThreadState_GET(); *type = tstate->curexc_type; *value = tstate->curexc_value; *tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; } #if PY_MAJOR_VERSION < 3 static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { /* cause is unused */ Py_XINCREF(type); Py_XINCREF(value); Py_XINCREF(tb); /* First, check the traceback argument, replacing None with NULL. */ if (tb == Py_None) { Py_DECREF(tb); tb = 0; } else if (tb != NULL && !PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto raise_error; } /* Next, replace a missing value with None */ if (value == NULL) { value = Py_None; Py_INCREF(value); } #if PY_VERSION_HEX < 0x02050000 if (!PyClass_Check(type)) #else if (!PyType_Check(type)) #endif { /* Raising an instance. The value should be a dummy. */ if (value != Py_None) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto raise_error; } /* Normalize to raise <class>, <instance> */ Py_DECREF(value); value = type; #if PY_VERSION_HEX < 0x02050000 if (PyInstance_Check(type)) { type = (PyObject*) ((PyInstanceObject*)type)->in_class; Py_INCREF(type); } else { type = 0; PyErr_SetString(PyExc_TypeError, "raise: exception must be an old-style class or instance"); goto raise_error; } #else type = (PyObject*) Py_TYPE(type); Py_INCREF(type); if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto raise_error; } #endif } __Pyx_ErrRestore(type, value, tb); return; raise_error: Py_XDECREF(value); Py_XDECREF(type); Py_XDECREF(tb); return; } #else /* Python 3+ */ static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { if (tb == Py_None) { tb = 0; } else if (tb && !PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto bad; } if (value == Py_None) value = 0; if (PyExceptionInstance_Check(type)) { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto bad; } value = type; type = (PyObject*) Py_TYPE(value); } else if (!PyExceptionClass_Check(type)) { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto bad; } if (cause) { PyObject *fixed_cause; if (PyExceptionClass_Check(cause)) { fixed_cause = PyObject_CallObject(cause, NULL); if (fixed_cause == NULL) goto bad; } else if (PyExceptionInstance_Check(cause)) { fixed_cause = cause; Py_INCREF(fixed_cause); } else { PyErr_SetString(PyExc_TypeError, "exception causes must derive from " "BaseException"); goto bad; } if (!value) { value = PyObject_CallObject(type, NULL); } PyException_SetCause(value, fixed_cause); } PyErr_SetObject(type, value); if (tb) { PyThreadState *tstate = PyThreadState_GET(); PyObject* tmp_tb = tstate->curexc_traceback; if (tb != tmp_tb) { Py_INCREF(tb); tstate->curexc_traceback = tb; Py_XDECREF(tmp_tb); } } bad: return; } #endif static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) { PyErr_Format(PyExc_ValueError, "need more than %"PY_FORMAT_SIZE_T"d value%s to unpack", index, (index == 1) ? "" : "s"); } static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) { PyErr_Format(PyExc_ValueError, "too many values to unpack (expected %"PY_FORMAT_SIZE_T"d)", expected); } static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); } static void __Pyx_UnpackTupleError(PyObject *t, Py_ssize_t index) { if (t == Py_None) { __Pyx_RaiseNoneNotIterableError(); } else if (PyTuple_GET_SIZE(t) < index) { __Pyx_RaiseNeedMoreValuesError(PyTuple_GET_SIZE(t)); } else { __Pyx_RaiseTooManyValuesError(index); } } #if PY_MAJOR_VERSION < 3 static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags) { #if PY_VERSION_HEX >= 0x02060000 if (PyObject_CheckBuffer(obj)) return PyObject_GetBuffer(obj, view, flags); #endif if (PyObject_TypeCheck(obj, __pyx_ptype_5numpy_ndarray)) return __pyx_pf_5numpy_7ndarray___getbuffer__(obj, view, flags); else { PyErr_Format(PyExc_TypeError, "'%100s' does not have the buffer interface", Py_TYPE(obj)->tp_name); return -1; } } static void __Pyx_ReleaseBuffer(Py_buffer *view) { PyObject* obj = view->obj; if (obj) { #if PY_VERSION_HEX >= 0x02060000 if (PyObject_CheckBuffer(obj)) {PyBuffer_Release(view); return;} #endif if (PyObject_TypeCheck(obj, __pyx_ptype_5numpy_ndarray)) __pyx_pf_5numpy_7ndarray_1__releasebuffer__(obj, view); Py_DECREF(obj); view->obj = NULL; } } #endif static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, long level) { PyObject *py_import = 0; PyObject *empty_list = 0; PyObject *module = 0; PyObject *global_dict = 0; PyObject *empty_dict = 0; PyObject *list; py_import = __Pyx_GetAttrString(__pyx_b, "__import__"); if (!py_import) goto bad; if (from_list) list = from_list; else { empty_list = PyList_New(0); if (!empty_list) goto bad; list = empty_list; } global_dict = PyModule_GetDict(__pyx_m); if (!global_dict) goto bad; empty_dict = PyDict_New(); if (!empty_dict) goto bad; #if PY_VERSION_HEX >= 0x02050000 { PyObject *py_level = PyInt_FromLong(level); if (!py_level) goto bad; module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, py_level, NULL); Py_DECREF(py_level); } #else if (level>0) { PyErr_SetString(PyExc_RuntimeError, "Relative import is not supported for Python <=2.4."); goto bad; } module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, NULL); #endif bad: Py_XDECREF(empty_list); Py_XDECREF(py_import); Py_XDECREF(empty_dict); return module; } static CYTHON_INLINE PyObject *__Pyx_PyInt_to_py_Py_intptr_t(Py_intptr_t val) { const Py_intptr_t neg_one = (Py_intptr_t)-1, const_zero = (Py_intptr_t)0; const int is_unsigned = const_zero < neg_one; if ((sizeof(Py_intptr_t) == sizeof(char)) || (sizeof(Py_intptr_t) == sizeof(short))) { return PyInt_FromLong((long)val); } else if ((sizeof(Py_intptr_t) == sizeof(int)) || (sizeof(Py_intptr_t) == sizeof(long))) { if (is_unsigned) return PyLong_FromUnsignedLong((unsigned long)val); else return PyInt_FromLong((long)val); } else if (sizeof(Py_intptr_t) == sizeof(PY_LONG_LONG)) { if (is_unsigned) return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG)val); else return PyLong_FromLongLong((PY_LONG_LONG)val); } else { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; return _PyLong_FromByteArray(bytes, sizeof(Py_intptr_t), little, !is_unsigned); } } #if CYTHON_CCOMPLEX #ifdef __cplusplus static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { return ::std::complex< float >(x, y); } #else static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { return x + y*(__pyx_t_float_complex)_Complex_I; } #endif #else static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { __pyx_t_float_complex z; z.real = x; z.imag = y; return z; } #endif #if CYTHON_CCOMPLEX #else static CYTHON_INLINE int __Pyx_c_eqf(__pyx_t_float_complex a, __pyx_t_float_complex b) { return (a.real == b.real) && (a.imag == b.imag); } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sumf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real + b.real; z.imag = a.imag + b.imag; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_difff(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real - b.real; z.imag = a.imag - b.imag; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prodf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real * b.real - a.imag * b.imag; z.imag = a.real * b.imag + a.imag * b.real; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quotf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; float denom = b.real * b.real + b.imag * b.imag; z.real = (a.real * b.real + a.imag * b.imag) / denom; z.imag = (a.imag * b.real - a.real * b.imag) / denom; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_negf(__pyx_t_float_complex a) { __pyx_t_float_complex z; z.real = -a.real; z.imag = -a.imag; return z; } static CYTHON_INLINE int __Pyx_c_is_zerof(__pyx_t_float_complex a) { return (a.real == 0) && (a.imag == 0); } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conjf(__pyx_t_float_complex a) { __pyx_t_float_complex z; z.real = a.real; z.imag = -a.imag; return z; } #if 1 static CYTHON_INLINE float __Pyx_c_absf(__pyx_t_float_complex z) { #if !defined(HAVE_HYPOT) || defined(_MSC_VER) return sqrtf(z.real*z.real + z.imag*z.imag); #else return hypotf(z.real, z.imag); #endif } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_powf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; float r, lnr, theta, z_r, z_theta; if (b.imag == 0 && b.real == (int)b.real) { if (b.real < 0) { float denom = a.real * a.real + a.imag * a.imag; a.real = a.real / denom; a.imag = -a.imag / denom; b.real = -b.real; } switch ((int)b.real) { case 0: z.real = 1; z.imag = 0; return z; case 1: return a; case 2: z = __Pyx_c_prodf(a, a); return __Pyx_c_prodf(a, a); case 3: z = __Pyx_c_prodf(a, a); return __Pyx_c_prodf(z, a); case 4: z = __Pyx_c_prodf(a, a); return __Pyx_c_prodf(z, z); } } if (a.imag == 0) { if (a.real == 0) { return a; } r = a.real; theta = 0; } else { r = __Pyx_c_absf(a); theta = atan2f(a.imag, a.real); } lnr = logf(r); z_r = expf(lnr * b.real - theta * b.imag); z_theta = theta * b.real + lnr * b.imag; z.real = z_r * cosf(z_theta); z.imag = z_r * sinf(z_theta); return z; } #endif #endif #if CYTHON_CCOMPLEX #ifdef __cplusplus static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { return ::std::complex< double >(x, y); } #else static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { return x + y*(__pyx_t_double_complex)_Complex_I; } #endif #else static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { __pyx_t_double_complex z; z.real = x; z.imag = y; return z; } #endif #if CYTHON_CCOMPLEX #else static CYTHON_INLINE int __Pyx_c_eq(__pyx_t_double_complex a, __pyx_t_double_complex b) { return (a.real == b.real) && (a.imag == b.imag); } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real + b.real; z.imag = a.imag + b.imag; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real - b.real; z.imag = a.imag - b.imag; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real * b.real - a.imag * b.imag; z.imag = a.real * b.imag + a.imag * b.real; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; double denom = b.real * b.real + b.imag * b.imag; z.real = (a.real * b.real + a.imag * b.imag) / denom; z.imag = (a.imag * b.real - a.real * b.imag) / denom; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg(__pyx_t_double_complex a) { __pyx_t_double_complex z; z.real = -a.real; z.imag = -a.imag; return z; } static CYTHON_INLINE int __Pyx_c_is_zero(__pyx_t_double_complex a) { return (a.real == 0) && (a.imag == 0); } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj(__pyx_t_double_complex a) { __pyx_t_double_complex z; z.real = a.real; z.imag = -a.imag; return z; } #if 1 static CYTHON_INLINE double __Pyx_c_abs(__pyx_t_double_complex z) { #if !defined(HAVE_HYPOT) || defined(_MSC_VER) return sqrt(z.real*z.real + z.imag*z.imag); #else return hypot(z.real, z.imag); #endif } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; double r, lnr, theta, z_r, z_theta; if (b.imag == 0 && b.real == (int)b.real) { if (b.real < 0) { double denom = a.real * a.real + a.imag * a.imag; a.real = a.real / denom; a.imag = -a.imag / denom; b.real = -b.real; } switch ((int)b.real) { case 0: z.real = 1; z.imag = 0; return z; case 1: return a; case 2: z = __Pyx_c_prod(a, a); return __Pyx_c_prod(a, a); case 3: z = __Pyx_c_prod(a, a); return __Pyx_c_prod(z, a); case 4: z = __Pyx_c_prod(a, a); return __Pyx_c_prod(z, z); } } if (a.imag == 0) { if (a.real == 0) { return a; } r = a.real; theta = 0; } else { r = __Pyx_c_abs(a); theta = atan2(a.imag, a.real); } lnr = log(r); z_r = exp(lnr * b.real - theta * b.imag); z_theta = theta * b.real + lnr * b.imag; z.real = z_r * cos(z_theta); z.imag = z_r * sin(z_theta); return z; } #endif #endif static CYTHON_INLINE unsigned char __Pyx_PyInt_AsUnsignedChar(PyObject* x) { const unsigned char neg_one = (unsigned char)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(unsigned char) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(unsigned char)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to unsigned char" : "value too large to convert to unsigned char"); } return (unsigned char)-1; } return (unsigned char)val; } return (unsigned char)__Pyx_PyInt_AsUnsignedLong(x); } static CYTHON_INLINE unsigned short __Pyx_PyInt_AsUnsignedShort(PyObject* x) { const unsigned short neg_one = (unsigned short)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(unsigned short) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(unsigned short)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to unsigned short" : "value too large to convert to unsigned short"); } return (unsigned short)-1; } return (unsigned short)val; } return (unsigned short)__Pyx_PyInt_AsUnsignedLong(x); } static CYTHON_INLINE unsigned int __Pyx_PyInt_AsUnsignedInt(PyObject* x) { const unsigned int neg_one = (unsigned int)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(unsigned int) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(unsigned int)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to unsigned int" : "value too large to convert to unsigned int"); } return (unsigned int)-1; } return (unsigned int)val; } return (unsigned int)__Pyx_PyInt_AsUnsignedLong(x); } static CYTHON_INLINE char __Pyx_PyInt_AsChar(PyObject* x) { const char neg_one = (char)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(char) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(char)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to char" : "value too large to convert to char"); } return (char)-1; } return (char)val; } return (char)__Pyx_PyInt_AsLong(x); } static CYTHON_INLINE short __Pyx_PyInt_AsShort(PyObject* x) { const short neg_one = (short)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(short) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(short)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to short" : "value too large to convert to short"); } return (short)-1; } return (short)val; } return (short)__Pyx_PyInt_AsLong(x); } static CYTHON_INLINE int __Pyx_PyInt_AsInt(PyObject* x) { const int neg_one = (int)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(int) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(int)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to int" : "value too large to convert to int"); } return (int)-1; } return (int)val; } return (int)__Pyx_PyInt_AsLong(x); } static CYTHON_INLINE signed char __Pyx_PyInt_AsSignedChar(PyObject* x) { const signed char neg_one = (signed char)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(signed char) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(signed char)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to signed char" : "value too large to convert to signed char"); } return (signed char)-1; } return (signed char)val; } return (signed char)__Pyx_PyInt_AsSignedLong(x); } static CYTHON_INLINE signed short __Pyx_PyInt_AsSignedShort(PyObject* x) { const signed short neg_one = (signed short)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(signed short) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(signed short)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to signed short" : "value too large to convert to signed short"); } return (signed short)-1; } return (signed short)val; } return (signed short)__Pyx_PyInt_AsSignedLong(x); } static CYTHON_INLINE signed int __Pyx_PyInt_AsSignedInt(PyObject* x) { const signed int neg_one = (signed int)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(signed int) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(signed int)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to signed int" : "value too large to convert to signed int"); } return (signed int)-1; } return (signed int)val; } return (signed int)__Pyx_PyInt_AsSignedLong(x); } static CYTHON_INLINE int __Pyx_PyInt_AsLongDouble(PyObject* x) { const int neg_one = (int)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (sizeof(int) < sizeof(long)) { long val = __Pyx_PyInt_AsLong(x); if (unlikely(val != (long)(int)val)) { if (!unlikely(val == -1 && PyErr_Occurred())) { PyErr_SetString(PyExc_OverflowError, (is_unsigned && unlikely(val < 0)) ? "can't convert negative value to int" : "value too large to convert to int"); } return (int)-1; } return (int)val; } return (int)__Pyx_PyInt_AsLong(x); } static CYTHON_INLINE unsigned long __Pyx_PyInt_AsUnsignedLong(PyObject* x) { const unsigned long neg_one = (unsigned long)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_VERSION_HEX < 0x03000000 if (likely(PyInt_Check(x))) { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to unsigned long"); return (unsigned long)-1; } return (unsigned long)val; } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to unsigned long"); return (unsigned long)-1; } return (unsigned long)PyLong_AsUnsignedLong(x); } else { return (unsigned long)PyLong_AsLong(x); } } else { unsigned long val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (unsigned long)-1; val = __Pyx_PyInt_AsUnsignedLong(tmp); Py_DECREF(tmp); return val; } } static CYTHON_INLINE unsigned PY_LONG_LONG __Pyx_PyInt_AsUnsignedLongLong(PyObject* x) { const unsigned PY_LONG_LONG neg_one = (unsigned PY_LONG_LONG)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_VERSION_HEX < 0x03000000 if (likely(PyInt_Check(x))) { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to unsigned PY_LONG_LONG"); return (unsigned PY_LONG_LONG)-1; } return (unsigned PY_LONG_LONG)val; } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to unsigned PY_LONG_LONG"); return (unsigned PY_LONG_LONG)-1; } return (unsigned PY_LONG_LONG)PyLong_AsUnsignedLongLong(x); } else { return (unsigned PY_LONG_LONG)PyLong_AsLongLong(x); } } else { unsigned PY_LONG_LONG val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (unsigned PY_LONG_LONG)-1; val = __Pyx_PyInt_AsUnsignedLongLong(tmp); Py_DECREF(tmp); return val; } } static CYTHON_INLINE long __Pyx_PyInt_AsLong(PyObject* x) { const long neg_one = (long)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_VERSION_HEX < 0x03000000 if (likely(PyInt_Check(x))) { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to long"); return (long)-1; } return (long)val; } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to long"); return (long)-1; } return (long)PyLong_AsUnsignedLong(x); } else { return (long)PyLong_AsLong(x); } } else { long val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (long)-1; val = __Pyx_PyInt_AsLong(tmp); Py_DECREF(tmp); return val; } } static CYTHON_INLINE PY_LONG_LONG __Pyx_PyInt_AsLongLong(PyObject* x) { const PY_LONG_LONG neg_one = (PY_LONG_LONG)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_VERSION_HEX < 0x03000000 if (likely(PyInt_Check(x))) { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to PY_LONG_LONG"); return (PY_LONG_LONG)-1; } return (PY_LONG_LONG)val; } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to PY_LONG_LONG"); return (PY_LONG_LONG)-1; } return (PY_LONG_LONG)PyLong_AsUnsignedLongLong(x); } else { return (PY_LONG_LONG)PyLong_AsLongLong(x); } } else { PY_LONG_LONG val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (PY_LONG_LONG)-1; val = __Pyx_PyInt_AsLongLong(tmp); Py_DECREF(tmp); return val; } } static CYTHON_INLINE signed long __Pyx_PyInt_AsSignedLong(PyObject* x) { const signed long neg_one = (signed long)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_VERSION_HEX < 0x03000000 if (likely(PyInt_Check(x))) { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to signed long"); return (signed long)-1; } return (signed long)val; } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to signed long"); return (signed long)-1; } return (signed long)PyLong_AsUnsignedLong(x); } else { return (signed long)PyLong_AsLong(x); } } else { signed long val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (signed long)-1; val = __Pyx_PyInt_AsSignedLong(tmp); Py_DECREF(tmp); return val; } } static CYTHON_INLINE signed PY_LONG_LONG __Pyx_PyInt_AsSignedLongLong(PyObject* x) { const signed PY_LONG_LONG neg_one = (signed PY_LONG_LONG)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_VERSION_HEX < 0x03000000 if (likely(PyInt_Check(x))) { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to signed PY_LONG_LONG"); return (signed PY_LONG_LONG)-1; } return (signed PY_LONG_LONG)val; } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to signed PY_LONG_LONG"); return (signed PY_LONG_LONG)-1; } return (signed PY_LONG_LONG)PyLong_AsUnsignedLongLong(x); } else { return (signed PY_LONG_LONG)PyLong_AsLongLong(x); } } else { signed PY_LONG_LONG val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (signed PY_LONG_LONG)-1; val = __Pyx_PyInt_AsSignedLongLong(tmp); Py_DECREF(tmp); return val; } } static int __Pyx_check_binary_version(void) { char ctversion[4], rtversion[4]; PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION); PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion()); if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) { char message[200]; PyOS_snprintf(message, sizeof(message), "compiletime version %s of module '%.100s' " "does not match runtime version %s", ctversion, __Pyx_MODULE_NAME, rtversion); #if PY_VERSION_HEX < 0x02050000 return PyErr_Warn(NULL, message); #else return PyErr_WarnEx(NULL, message, 1); #endif } return 0; } #ifndef __PYX_HAVE_RT_ImportType #define __PYX_HAVE_RT_ImportType static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, size_t size, int strict) { PyObject *py_module = 0; PyObject *result = 0; PyObject *py_name = 0; char warning[200]; py_module = __Pyx_ImportModule(module_name); if (!py_module) goto bad; #if PY_MAJOR_VERSION < 3 py_name = PyString_FromString(class_name); #else py_name = PyUnicode_FromString(class_name); #endif if (!py_name) goto bad; result = PyObject_GetAttr(py_module, py_name); Py_DECREF(py_name); py_name = 0; Py_DECREF(py_module); py_module = 0; if (!result) goto bad; if (!PyType_Check(result)) { PyErr_Format(PyExc_TypeError, "%s.%s is not a type object", module_name, class_name); goto bad; } if (!strict && ((PyTypeObject *)result)->tp_basicsize > (Py_ssize_t)size) { PyOS_snprintf(warning, sizeof(warning), "%s.%s size changed, may indicate binary incompatibility", module_name, class_name); #if PY_VERSION_HEX < 0x02050000 if (PyErr_Warn(NULL, warning) < 0) goto bad; #else if (PyErr_WarnEx(NULL, warning, 0) < 0) goto bad; #endif } else if (((PyTypeObject *)result)->tp_basicsize != (Py_ssize_t)size) { PyErr_Format(PyExc_ValueError, "%s.%s has the wrong size, try recompiling", module_name, class_name); goto bad; } return (PyTypeObject *)result; bad: Py_XDECREF(py_module); Py_XDECREF(result); return NULL; } #endif #ifndef __PYX_HAVE_RT_ImportModule #define __PYX_HAVE_RT_ImportModule static PyObject *__Pyx_ImportModule(const char *name) { PyObject *py_name = 0; PyObject *py_module = 0; #if PY_MAJOR_VERSION < 3 py_name = PyString_FromString(name); #else py_name = PyUnicode_FromString(name); #endif if (!py_name) goto bad; py_module = PyImport_Import(py_name); Py_DECREF(py_name); return py_module; bad: Py_XDECREF(py_name); return 0; } #endif #include "compile.h" #include "frameobject.h" #include "traceback.h" static void __Pyx_AddTraceback(const char *funcname, int __pyx_clineno, int __pyx_lineno, const char *__pyx_filename) { PyObject *py_srcfile = 0; PyObject *py_funcname = 0; PyObject *py_globals = 0; PyCodeObject *py_code = 0; PyFrameObject *py_frame = 0; #if PY_MAJOR_VERSION < 3 py_srcfile = PyString_FromString(__pyx_filename); #else py_srcfile = PyUnicode_FromString(__pyx_filename); #endif if (!py_srcfile) goto bad; if (__pyx_clineno) { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, __pyx_clineno); #else py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, __pyx_clineno); #endif } else { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromString(funcname); #else py_funcname = PyUnicode_FromString(funcname); #endif } if (!py_funcname) goto bad; py_globals = PyModule_GetDict(__pyx_m); if (!py_globals) goto bad; py_code = PyCode_New( 0, /*int argcount,*/ #if PY_MAJOR_VERSION >= 3 0, /*int kwonlyargcount,*/ #endif 0, /*int nlocals,*/ 0, /*int stacksize,*/ 0, /*int flags,*/ __pyx_empty_bytes, /*PyObject *code,*/ __pyx_empty_tuple, /*PyObject *consts,*/ __pyx_empty_tuple, /*PyObject *names,*/ __pyx_empty_tuple, /*PyObject *varnames,*/ __pyx_empty_tuple, /*PyObject *freevars,*/ __pyx_empty_tuple, /*PyObject *cellvars,*/ py_srcfile, /*PyObject *filename,*/ py_funcname, /*PyObject *name,*/ __pyx_lineno, /*int firstlineno,*/ __pyx_empty_bytes /*PyObject *lnotab*/ ); if (!py_code) goto bad; py_frame = PyFrame_New( PyThreadState_GET(), /*PyThreadState *tstate,*/ py_code, /*PyCodeObject *code,*/ py_globals, /*PyObject *globals,*/ 0 /*PyObject *locals*/ ); if (!py_frame) goto bad; py_frame->f_lineno = __pyx_lineno; PyTraceBack_Here(py_frame); bad: Py_XDECREF(py_srcfile); Py_XDECREF(py_funcname); Py_XDECREF(py_code); Py_XDECREF(py_frame); } static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) { while (t->p) { #if PY_MAJOR_VERSION < 3 if (t->is_unicode) { *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL); } else if (t->intern) { *t->p = PyString_InternFromString(t->s); } else { *t->p = PyString_FromStringAndSize(t->s, t->n - 1); } #else /* Python 3+ has unicode identifiers */ if (t->is_unicode | t->is_str) { if (t->intern) { *t->p = PyUnicode_InternFromString(t->s); } else if (t->encoding) { *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL); } else { *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1); } } else { *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1); } #endif if (!*t->p) return -1; ++t; } return 0; } /* Type Conversion Functions */ static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) { int is_true = x == Py_True; if (is_true | (x == Py_False) | (x == Py_None)) return is_true; else return PyObject_IsTrue(x); } static CYTHON_INLINE PyObject* __Pyx_PyNumber_Int(PyObject* x) { PyNumberMethods *m; const char *name = NULL; PyObject *res = NULL; #if PY_VERSION_HEX < 0x03000000 if (PyInt_Check(x) || PyLong_Check(x)) #else if (PyLong_Check(x)) #endif return Py_INCREF(x), x; m = Py_TYPE(x)->tp_as_number; #if PY_VERSION_HEX < 0x03000000 if (m && m->nb_int) { name = "int"; res = PyNumber_Int(x); } else if (m && m->nb_long) { name = "long"; res = PyNumber_Long(x); } #else if (m && m->nb_int) { name = "int"; res = PyNumber_Long(x); } #endif if (res) { #if PY_VERSION_HEX < 0x03000000 if (!PyInt_Check(res) && !PyLong_Check(res)) { #else if (!PyLong_Check(res)) { #endif PyErr_Format(PyExc_TypeError, "__%s__ returned non-%s (type %.200s)", name, name, Py_TYPE(res)->tp_name); Py_DECREF(res); return NULL; } } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_TypeError, "an integer is required"); } return res; } static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) { Py_ssize_t ival; PyObject* x = PyNumber_Index(b); if (!x) return -1; ival = PyInt_AsSsize_t(x); Py_DECREF(x); return ival; } static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) { #if PY_VERSION_HEX < 0x02050000 if (ival <= LONG_MAX) return PyInt_FromLong((long)ival); else { unsigned char *bytes = (unsigned char *) &ival; int one = 1; int little = (int)*(unsigned char*)&one; return _PyLong_FromByteArray(bytes, sizeof(size_t), little, 0); } #else return PyInt_FromSize_t(ival); #endif } static CYTHON_INLINE size_t __Pyx_PyInt_AsSize_t(PyObject* x) { unsigned PY_LONG_LONG val = __Pyx_PyInt_AsUnsignedLongLong(x); if (unlikely(val == (unsigned PY_LONG_LONG)-1 && PyErr_Occurred())) { return (size_t)-1; } else if (unlikely(val != (unsigned PY_LONG_LONG)(size_t)val)) { PyErr_SetString(PyExc_OverflowError, "value too large to convert to size_t"); return (size_t)-1; } return (size_t)val; } #endif /* Py_PYTHON_H */
PmlSpectralTimeStraggered.h
#pragma once #include "Grid.h" #include "FieldSolver.h" #include "Pml.h" #include "Constants.h" namespace pfc { template<GridTypes gridTypes> class PmlSpectralTimeStraggered : public PmlSpectral<gridTypes> { public: PmlSpectralTimeStraggered(SpectralFieldSolver<gridTypes>* solver, Int3 sizePML) : PmlSpectral<gridTypes>((SpectralFieldSolver<gridTypes>*)solver, sizePML), tmpFieldReal(solver->grid->numCells), tmpFieldComplex(FourierTransform::getSizeOfComplex(solver->grid->numCells)) {} virtual void updateBxSplit(); virtual void updateBySplit(); virtual void updateBzSplit(); virtual void updateExSplit(); virtual void updateEySplit(); virtual void updateEzSplit(); virtual void updateBSplit(); virtual void updateESplit(); virtual void doFirstStep(); virtual void computeTmpField(MemberOfFP3 coordK, ScalarField<complexFP>& field, double dt) {}; ScalarField<complexFP> tmpFieldComplex; ScalarField<FP> tmpFieldReal; protected: SpectralFieldSolver<gridTypes>* getFieldSolver() { return (SpectralFieldSolver<gridTypes>*)this->fieldSolver; } void updateFieldSplit(std::vector<FP>& field, double sign); }; template<GridTypes gridTypes> inline void PmlSpectralTimeStraggered<gridTypes>::updateFieldSplit(std::vector<FP>& field, double sign) { #pragma omp parallel for for (int idx = 0; idx < this->numCells; ++idx) { int i = this->cellIndex[idx].x; int j = this->cellIndex[idx].y; int k = this->cellIndex[idx].z; field[idx] += sign * tmpFieldReal(i, j, k); } } template<GridTypes gridTypes> inline void PmlSpectralTimeStraggered<gridTypes>::updateBxSplit() { SpectralFieldSolver<gridTypes>* fs = getFieldSolver(); computeTmpField(&FP3::y, fs->complexGrid->Ez, fs->grid->dt * 0.5); updateFieldSplit(this->byx, -1); computeTmpField(&FP3::z, fs->complexGrid->Ey, fs->grid->dt * 0.5); updateFieldSplit(this->bzx, +1); } template<GridTypes gridTypes> inline void PmlSpectralTimeStraggered<gridTypes>::updateBySplit() { SpectralFieldSolver<gridTypes>* fs = getFieldSolver(); computeTmpField(&FP3::z, fs->complexGrid->Ex, fs->grid->dt * 0.5); updateFieldSplit(this->bzy, -1); computeTmpField(&FP3::x, fs->complexGrid->Ez, fs->grid->dt * 0.5); updateFieldSplit(this->bxy, +1); } template<GridTypes gridTypes> inline void PmlSpectralTimeStraggered<gridTypes>::updateBzSplit() { SpectralFieldSolver<gridTypes>* fs = getFieldSolver(); computeTmpField(&FP3::x, fs->complexGrid->Ey, fs->grid->dt * 0.5); updateFieldSplit(this->bxz, -1); computeTmpField(&FP3::y, fs->complexGrid->Ex, fs->grid->dt * 0.5); updateFieldSplit(this->byz, +1); } template<GridTypes gridTypes> inline void PmlSpectralTimeStraggered<gridTypes>::updateExSplit() { SpectralFieldSolver<gridTypes>* fs = getFieldSolver(); computeTmpField(&FP3::y, fs->complexGrid->Bz, fs->grid->dt); updateFieldSplit(this->eyx, +1); computeTmpField(&FP3::z, fs->complexGrid->By, fs->grid->dt); updateFieldSplit(this->ezx, -1); } template<GridTypes gridTypes> inline void PmlSpectralTimeStraggered<gridTypes>::updateEySplit() { SpectralFieldSolver<gridTypes>* fs = getFieldSolver(); computeTmpField(&FP3::z, fs->complexGrid->Bx, fs->grid->dt); updateFieldSplit(this->ezy, +1); computeTmpField(&FP3::x, fs->complexGrid->Bz, fs->grid->dt); updateFieldSplit(this->exy, -1); } template<GridTypes gridTypes> inline void PmlSpectralTimeStraggered<gridTypes>::updateEzSplit() { SpectralFieldSolver<gridTypes>* fs = getFieldSolver(); computeTmpField(&FP3::x, fs->complexGrid->By, fs->grid->dt); updateFieldSplit(this->exz, +1); computeTmpField(&FP3::y, fs->complexGrid->Bx, fs->grid->dt); updateFieldSplit(this->eyz, -1); } template<GridTypes gridTypes> inline void PmlSpectralTimeStraggered<gridTypes>::updateBSplit() { updateBxSplit(); updateBySplit(); updateBzSplit(); } template<GridTypes gridTypes> inline void PmlSpectralTimeStraggered<gridTypes>::updateESplit() { updateExSplit(); updateEySplit(); updateEzSplit(); } template<GridTypes gridTypes> inline void PmlSpectralTimeStraggered<gridTypes>::doFirstStep() { updateBSplit(); updateESplit(); updateBSplit(); } }
core_ztslqt.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @precisions normal z -> c d s * **/ #include <plasma_core_blas.h> #include "plasma_types.h" #include "plasma_internal.h" #include "core_lapack.h" #include <omp.h> #undef REAL #define COMPLEX /***************************************************************************//** * * @ingroup core_tslqt * * Computes an LQ factorization of a rectangular matrix * formed by coupling side-by-side a complex m-by-m * lower triangular tile A1 and a complex m-by-n tile A2: * * | A1 A2 | = L * Q * * The tile Q is represented as a product of elementary reflectors * * Q = H(k)^H . . . H(2)^H H(1)^H, where k = min(m,n). * * Each H(i) has the form * * H(i) = I - tau * v * v^H * * where tau is a complex scalar, and v is a complex vector with * v(1:i-1) = 0 and v(i) = 1; v(i+1:n)^H is stored on exit in * A2(i,1:n), and tau in tau(i). * ******************************************************************************* * * @param[in] m * The number of rows of the tile A1 and A2. m >= 0. * The number of columns of the tile A1. * * @param[in] n * The number of columns of the tile A2. n >= 0. * * @param[in] ib * The inner-blocking size. ib >= 0. * * @param[in,out] A1 * On entry, the m-by-m tile A1. * On exit, the elements on and below the diagonal of the array * contain the m-by-m lower trapezoidal tile L; * the elements above the diagonal are not referenced. * * @param[in] lda1 * The leading dimension of the array A1. lda1 >= max(1,m). * * @param[in,out] A2 * On entry, the m-by-n tile A2. * On exit, all the elements with the array tau, represent * the unitary tile Q as a product of elementary reflectors * (see Further Details). * * @param[in] lda2 * The leading dimension of the tile A2. lda2 >= max(1,m). * * @param[out] T * The ib-by-m triangular factor T of the block reflector. * T is upper triangular by block (economic storage); * The rest of the array is not referenced. * * @param[in] ldt * The leading dimension of the array T. ldt >= ib. * * @param tau * Auxiliarry workspace array of length m. * * @param work * Auxiliary workspace array of length ib*m. * ******************************************************************************* * * @retval PlasmaSuccess successful exit * @retval < 0 if -i, the i-th argument had an illegal value * ******************************************************************************/ __attribute__((weak)) int plasma_core_ztslqt(int m, int n, int ib, plasma_complex64_t *A1, int lda1, plasma_complex64_t *A2, int lda2, plasma_complex64_t *T, int ldt, plasma_complex64_t *tau, plasma_complex64_t *work) { // Check input arguments. if (m < 0) { plasma_coreblas_error("illegal value of m"); return -1; } if (n < 0) { plasma_coreblas_error("illegal value of n"); return -2; } if (ib < 0) { plasma_coreblas_error("illegal value of ib"); return -3; } if (A1 == NULL) { plasma_coreblas_error("NULL A1"); return -4; } if (lda1 < imax(1, m) && m > 0) { plasma_coreblas_error("illegal value of lda1"); return -5; } if (A2 == NULL) { plasma_coreblas_error("NULL A2"); return -6; } if (lda2 < imax(1, m) && m > 0) { plasma_coreblas_error("illegal value of lda2"); return -7; } if (T == NULL) { plasma_coreblas_error("NULL T"); return -8; } if (ldt < imax(1, ib) && ib > 0) { plasma_coreblas_error("illegal value of ldt"); return -9; } if (tau == NULL) { plasma_coreblas_error("NULL tau"); return -10; } if (work == NULL) { plasma_coreblas_error("NULL work"); return -11; } // quick return if (m == 0 || n == 0 || ib == 0) return PlasmaSuccess; static plasma_complex64_t zone = 1.0; static plasma_complex64_t zzero = 0.0; for (int ii = 0; ii < m; ii += ib) { int sb = imin(m-ii, ib); for (int i = 0; i < sb; i++) { // Generate elementary reflector H(ii*ib+i) to annihilate // A(ii*ib+i,ii*ib+i:n). #ifdef COMPLEX LAPACKE_zlacgv_work(n, &A2[ii+i], lda2); LAPACKE_zlacgv_work(1, &A1[lda1*(ii+i)+ii+i], lda1); #endif LAPACKE_zlarfg_work(n+1, &A1[lda1*(ii+i)+ii+i], &A2[ii+i], lda2, &tau[ii+i]); plasma_complex64_t alpha = -(tau[ii+i]); if (ii+i+1 < m) { // Apply H(ii+i-1) to A(ii+i:ii+ib-1, ii+i-1:n) from the right. cblas_zcopy(sb-i-1, &A1[lda1*(ii+i)+(ii+i+1)], 1, work, 1); cblas_zgemv(CblasColMajor, (CBLAS_TRANSPOSE)PlasmaNoTrans, sb-i-1, n, CBLAS_SADDR(zone), &A2[ii+i+1], lda2, &A2[ii+i], lda2, CBLAS_SADDR(zone), work, 1); cblas_zaxpy(sb-i-1, CBLAS_SADDR(alpha), work, 1, &A1[lda1*(ii+i)+ii+i+1], 1); cblas_zgerc(CblasColMajor, sb-i-1, n, CBLAS_SADDR(alpha), work, 1, &A2[ii+i], lda2, &A2[ii+i+1], lda2); } // Calculate T. cblas_zgemv(CblasColMajor, (CBLAS_TRANSPOSE)PlasmaNoTrans, i, n, CBLAS_SADDR(alpha), &A2[ii], lda2, &A2[ii+i], lda2, CBLAS_SADDR(zzero), &T[ldt*(ii+i)], 1); #ifdef COMPLEX LAPACKE_zlacgv_work(n, &A2[ii+i], lda2); LAPACKE_zlacgv_work(1, &A1[lda1*(ii+i)+ii+i], lda1); #endif cblas_ztrmv( CblasColMajor, (CBLAS_UPLO)PlasmaUpper, (CBLAS_TRANSPOSE)PlasmaNoTrans, (CBLAS_DIAG)PlasmaNonUnit, i, &T[ldt*ii], ldt, &T[ldt*(ii+i)], 1); T[ldt*(ii+i)+i] = tau[ii+i]; } if (m > ii+sb) { plasma_core_ztsmlq(PlasmaRight, Plasma_ConjTrans, m-(ii+sb), sb, m-(ii+sb), n, ib, ib, &A1[lda1*ii+ii+sb], lda1, &A2[ii+sb], lda2, &A2[ii], lda2, &T[ldt*ii], ldt, work, lda1); } } return PlasmaSuccess; } /******************************************************************************/ void plasma_core_omp_ztslqt(int m, int n, int ib, plasma_complex64_t *A1, int lda1, plasma_complex64_t *A2, int lda2, plasma_complex64_t *T, int ldt, plasma_workspace_t work, plasma_sequence_t *sequence, plasma_request_t *request) { #pragma omp task depend(inout:A1[0:lda1*m]) \ depend(inout:A2[0:lda2*n]) \ depend(out:T[0:ib*m]) // T should be mxib, but is stored // as ibxm { if (sequence->status == PlasmaSuccess) { // Prepare workspaces. int tid = omp_get_thread_num(); plasma_complex64_t *tau = ((plasma_complex64_t*)work.spaces[tid]); // Call the kernel. int info = plasma_core_ztslqt(m, n, ib, A1, lda1, A2, lda2, T, ldt, tau, tau+m); if (info != PlasmaSuccess) { plasma_error("core_ztslqt() failed"); plasma_request_fail(sequence, request, PlasmaErrorInternal); } } } }
cg.c
// C extension for the cg_solve module. Implements a c routine of the // conjugate gradient algorithm to solve Ax=b, using sparse matrix A in // the csr format. // // See the module cg_solve.py // // Padarn Wilson 2012 // // Note Padarn 26/11/12: Currently the input matrix for the CG solve must // be a Sparse_CSR matrix python object - defined in anuga/utilities/sparse.py // // Note Padarn 5/12/12: I have tried a few optimization modifications which // didn't seem to save any time: // -- conversion of the long arrays to int arrays (to save memory passing time) // -- taking advantage of the symmetric quality of the matrix A to reduce the zAx loop // -- specifying different 'chunk' sizes for the openmp loops // -- using blas instead of own openmp loops #include "math.h" #include "stdio.h" #if defined(__APPLE__) // clang doesn't have openmp #else #include "omp.h" #endif // Dot product of two double vectors: a.b // @input N: int length of vectors a and b // a: first vector of doubles // b: second vector of double // @return: double result of a.b double cg_ddot( int N, double *a, double *b) { double ret = 0; int i; #pragma omp parallel for private(i) reduction(+:ret) for(i=0;i<N;i++) { ret+=a[i]*b[i]; } return ret; } // In place multiplication of a double vector x by constant a: a*x // @input N: int length of vector x // a: double scalar to multiply by // x: double vector to scale void cg_dscal(int N, double a, double *x) { int i; #pragma omp parallel for private(i) for(i=0;i<N;i++) { x[i]=a*x[i]; } } // Copy of one vector to another - memory already allocated: y=x // @input N: int length of vectors x and y // x: double vector to make copy of // y: double vector to copy into void cg_dcopy( int N, double *x, double *y) { int i; #pragma omp parallel for private(i) for(i=0;i<N;i++) { y[i]=x[i]; } } // In place axpy operation: y = a*x + y // @input N: int length of vectors x and y // a: double to multiply x by // x: first double vector // y: second double vector, stores result void cg_daxpy(int N, double a, double *x, double *y) { int i; #pragma omp parallel for private(i) for(i=0;i<N;i++) { y[i]=y[i]+a*x[i]; } } // Sparse CSR matrix-vector product: z = A*x // @input z: double vector to store the result // data: double vector with non-zero entries of A // colind: long vector of column indicies of non-zero entries of A // row_ptr: long vector giving index of rows for non-zero entires of A // x: double vector to be multiplied // M: length of vector x void cg_zAx(double * z, double * data, long * colind, long * row_ptr, double * x, int M){ long i, j, ckey; #pragma omp parallel for private(ckey,j,i) for (i=0; i<M; i++){ z[i]=0; for (ckey=row_ptr[i]; ckey<row_ptr[i+1]; ckey++) { j = colind[ckey]; z[i] += data[ckey]*x[j]; } } } // Diagonal matrix-vector product: z = D*x // @input z: double vector to store the result // D: double vector of diagonal matrix // x: double vector to be multiplied // M: length of vector x void cg_zDx(double * z, double * D, double * x, int M){ long i, j, ckey; #pragma omp parallel for private(ckey,j,i) for (i=0; i<M; i++){ z[i]=D[i]*x[i]; } } // Diagonal matrix-vector product: z = D*x // @input z: double vector to store the result // D: double vector of diagonal matrix // x: double vector to be multiplied // M: length of vector x void cg_zDinx(double * z, double * D, double * x, int M){ long i, j, ckey; #pragma omp parallel for private(ckey,j,i) for (i=0; i<M; i++){ z[i]=1.0/D[i]*x[i]; } } // Sparse CSR matrix-vector product and vector addition: z = a*A*x + y // @input z: double vector to store the result // a: double to scale matrix-vector product by // data: double vector with non-zero entries of A // colind: long vector of column indicies of non-zero entries of A // row_ptr: long vector giving index of rows for non-zero entires of A // x: double vector to be multiplied // y: double vector to add // M: length of vector x void cg_zaAxpy(double * z, double a, double * data, long * colind, long * row_ptr, double * x, double * y,int M){ long i, j, ckey; #pragma omp parallel for private(ckey,j,i) for (i=0; i<M; i++ ){ z[i]=y[i]; for (ckey=row_ptr[i]; ckey<row_ptr[i+1]; ckey++) { j = colind[ckey]; z[i] += a*data[ckey]*x[j]; } } } // Jacobi preconditioner for matrix, A, and right hand side, b. Mutiplies each row // by one divided by the diagonal element of the matrix. If the diagonal // element is zero, does nothing (should nnot occur) // colind: long vector of column indicies of non-zero entries of A // row_ptr: long vector giving index of rows for non-zero entires of A // b: double vector specifying right hand side of equation to solve // M: length of vector b int _jacobi_precon_c(double* data, long* colind, long* row_ptr, double * precon, int M){ long i, j, k, ckey; double diag; #pragma omp parallel for private(diag,ckey,j,i) for (i=0; i<M; i++){ diag = 0; for (ckey=row_ptr[i]; ckey<row_ptr[i+1]; ckey++) { j = colind[ckey]; if (i==j){ diag = data[ckey]; } } if (diag == 0){ diag =1; } precon[i]=diag; } return 0; } // Conjugate gradient solve Ax = b for x, A given in Sparse CSR format // @input data: double vector with non-zero entries of A // colind: long vector of column indicies of non-zero entries of A // row_ptr: long vector giving index of rows for non-zero entires of A // b: double vector specifying right hand side of equation to solve // x: double vector with initial guess and to store result // imax: maximum number of iterations // tol: error tollerance for stopping criteria // M: length of vectors x and b // @return: 0 on success int _cg_solve_c(double* data, long* colind, long* row_ptr, double * b, double * x, int imax, double tol, double a_tol, int M){ int i = 1; double alpha,rTr,rTrOld,bt,rTr0; double * d = malloc(sizeof(double)*M); double * r = malloc(sizeof(double)*M); double * q = malloc(sizeof(double)*M); double * xold = malloc(sizeof(double)*M); cg_zaAxpy(r,-1.0,data,colind,row_ptr,x,b,M); cg_dcopy(M,r,d); rTr=cg_ddot(M,r,r); rTr0 = rTr; while((i<imax) && (rTr>pow(tol,2)*rTr0) && (rTr > pow(a_tol,2))){ cg_zAx(q,data,colind,row_ptr,d,M); alpha = rTr/cg_ddot(M,d,q); cg_dcopy(M,x,xold); cg_daxpy(M,alpha,d,x); cg_daxpy(M,-alpha,q,r); rTrOld = rTr; rTr = cg_ddot(M,r,r); bt= rTr/rTrOld; cg_dscal(M,bt,d); cg_daxpy(M,1.0,r,d); i=i+1; } free(d); free(r); free(q); free(xold); if (i>=imax){ return -1; } else{ return 0; } } // Conjugate gradient solve Ax = b for x, A given in Sparse CSR format, // using a diagonal preconditioner M. // @input data: double vector with non-zero entries of A // colind: long vector of column indicies of non-zero entries of A // row_ptr: long vector giving index of rows for non-zero entires of A // b: double vector specifying right hand side of equation to solve // x: double vector with initial guess and to store result // imax: maximum number of iterations // tol: error tollerance for stopping criteria // M: length of vectors x and b // precon: diagonal preconditioner given as vector // @return: 0 on success int _cg_solve_c_precon(double* data, long* colind, long* row_ptr, double * b, double * x, int imax, double tol, double a_tol, int M, double * precon){ int i = 1; double alpha,rTr,rTrOld,bt,rTr0; double * d = malloc(sizeof(double)*M); double * r = malloc(sizeof(double)*M); double * q = malloc(sizeof(double)*M); double * xold = malloc(sizeof(double)*M); double * rhat = malloc(sizeof(double)*M); double * temp = malloc(sizeof(double)*M); cg_zaAxpy(r,-1.0,data,colind,row_ptr,x,b,M); cg_zDinx(rhat,precon,r,M); cg_dcopy(M,rhat,d); rTr=cg_ddot(M,r,rhat); rTr0 = rTr; while((i<imax) && (rTr>pow(tol,2)*rTr0) && (rTr > pow(a_tol,2))){ cg_zAx(q,data,colind,row_ptr,d,M); alpha = rTr/cg_ddot(M,d,q); cg_dcopy(M,x,xold); cg_daxpy(M,alpha,d,x); cg_daxpy(M,-alpha,q,r); cg_zDinx(rhat,precon,r,M); rTrOld = rTr; rTr = cg_ddot(M,r,rhat); bt= rTr/rTrOld; cg_dscal(M,bt,d); cg_daxpy(M,1.0,rhat,d); i=i+1; } free(temp); free(rhat); free(d); free(r); free(q); free(xold); if (i>=imax){ return -1; } else{ return 0; } }
bfe-bf.c
#include "include/bfe-bf.h" #include "bloom.h" #include "core.h" #include "utils.h" #include "FIPS202-opt64/KeccakHash.h" #include <config.h> #include <math.h> #include <stdbool.h> #include <stdint.h> #include <stdlib.h> #include <string.h> #define EP_SIZE (1 + 2 * RLC_FP_BYTES) #define EP2_SIZE (1 + 4 * RLC_FP_BYTES) #define FP12_SIZE (12 * RLC_FP_BYTES) /* bitset implementation */ #define BITSET_WORD_BITS (8 * sizeof(uint64_t)) #define BITSET_SIZE(size) (((size) + BITSET_WORD_BITS - 1) / BITSET_WORD_BITS) /** * Creates a bitset with the given number of bits. * * @param size the number of bits. * @return The initialized bitset with all bits set to 0. */ static inline bitset_t bitset_init(unsigned int size) { return (bitset_t){.bits = calloc(BITSET_SIZE(size), sizeof(uint64_t)), .size = size}; } /** * Sets a specific bit of a bitset. * * @param bitset the bitset. * @param index the index of the bit supposed to be set to 1. */ static inline void bitset_set(bitset_t* bitset, unsigned int index) { bitset->bits[index / BITSET_WORD_BITS] |= (UINT64_C(1) << (index & (BITSET_WORD_BITS - 1))); } /** * Retrieves a specific bit of a bitset. * * @param bitset the bitset. * @param index the index of the bit in question. * @return non-0 if the bit is set, 0 otherwise */ static inline uint64_t bitset_get(const bitset_t* bitset, unsigned int index) { return bitset->bits[index / BITSET_WORD_BITS] & (UINT64_C(1) << (index & (BITSET_WORD_BITS - 1))); } /** * Computes the number of set bits of a bitset. * * @param bitset the bitset. * @return number of set bits */ static inline unsigned int bitset_popcount(const bitset_t* bitset) { unsigned int bits = 0; for (unsigned int idx = 0; idx != BITSET_SIZE(bitset->size); ++idx) { bits += __builtin_popcount(bitset->bits[idx]); } return bits; } /** * Frees the memory allocated by the bitset. * * @param bitset the bitset. */ static inline void bitset_clean(bitset_t* bitset) { if (bitset) { free(bitset->bits); bitset->bits = NULL; bitset->size = 0; } } /* bloom filter implementation */ static unsigned int get_needed_size(unsigned int n, double false_positive_prob) { return -floor((n * log(false_positive_prob)) / (log(2) * log(2))); } static bloomfilter_t bf_init_fixed(unsigned int size, unsigned int hash_count) { return (bloomfilter_t){.hash_count = hash_count, .bitset = bitset_init(size)}; } static bloomfilter_t bf_init(unsigned int n, double false_positive_prob) { const unsigned int bitset_size = get_needed_size(n, false_positive_prob); return (bloomfilter_t){.hash_count = ceil((bitset_size / (double)n) * log(2)), .bitset = bitset_init(bitset_size)}; } static unsigned int get_position(uint32_t hash_idx, const uint8_t* input, size_t input_len, unsigned int filter_size) { static const uint8_t domain[] = "BF_HASH"; Keccak_HashInstance shake; Keccak_HashInitialize_SHAKE128(&shake); Keccak_HashUpdate(&shake, domain, sizeof(domain) * 8); hash_idx = htole32(hash_idx); Keccak_HashUpdate(&shake, (const uint8_t*)&hash_idx, sizeof(hash_idx) * 8); Keccak_HashUpdate(&shake, input, input_len * 8); Keccak_HashFinal(&shake, NULL); uint64_t output = 0; Keccak_HashSqueeze(&shake, (uint8_t*)&output, sizeof(output) * 8); return le64toh(output) % filter_size; } static void bf_get_bit_positions(unsigned int* positions, const ep_t input, unsigned int hash_count, unsigned int filter_size) { const unsigned int buffer_size = ep_size_bin(input, 0); uint8_t buffer[EP_SIZE] = {0}; ep_write_bin(buffer, buffer_size, input, 0); for (unsigned int i = 0; i < hash_count; ++i) { positions[i] = get_position(i, buffer, buffer_size, filter_size); } } static void bf_clear(bloomfilter_t* filter) { if (filter) { bitset_clean(&filter->bitset); } } /* Boneh-Franklin IBE implementation */ static int ibe_keygen(bn_t secret_key, bfe_bf_public_key_t* public_key) { int status = BFE_SUCCESS; TRY { zp_rand(secret_key); ep_mul_gen(public_key->public_key, secret_key); } CATCH_ANY { status = BFE_ERROR; } return status; } static int ibe_extract(ep2_t extracted_key, const bn_t secret_key, const uint8_t* id, size_t id_size) { int status = BFE_SUCCESS; ep2_t qid; ep2_null(qid); TRY { ep2_new(qid); ep2_map(qid, id, id_size); ep2_mul(extracted_key, qid, secret_key); } CATCH_ANY { status = BFE_ERROR; } FINALLY { ep2_free(qid); } return status; } /* G(y) ^ K */ static void hash_and_xor(uint8_t* dst, size_t len, const uint8_t* input, fp12_t y) { static const uint8_t domain_G[] = "BFE_H_G"; uint8_t buffer[FP12_SIZE] = {0}; fp12_write_bin(buffer, FP12_SIZE, y, 0); Keccak_HashInstance shake; Keccak_HashInitialize_SHAKE256(&shake); Keccak_HashUpdate(&shake, domain_G, sizeof(domain_G) * 8); Keccak_HashUpdate(&shake, buffer, FP12_SIZE * 8); const uint64_t len_le = htole64(len); Keccak_HashUpdate(&shake, (const uint8_t*)&len_le, sizeof(len_le) * 8); Keccak_HashFinal(&shake, NULL); for (; len; len -= MIN(len, 64), dst += 64, input += 64) { uint8_t buf[64]; const size_t l = MIN(len, 64); Keccak_HashSqueeze(&shake, buf, l * 8); /* make use of SIMD instructions */ #pragma omp simd for (size_t i = 0; i < l; ++i) { dst[i] = input[i] ^ buf[i]; } } } /* R(K) */ static void hash_R(Keccak_HashInstance* ctx, const uint8_t* key, size_t key_size) { static const uint8_t domain_R[] = "BFE_H_R"; Keccak_HashInitialize_SHAKE256(ctx); Keccak_HashUpdate(ctx, domain_R, sizeof(domain_R) * 8); Keccak_HashUpdate(ctx, key, key_size * 8); Keccak_HashFinal(ctx, NULL); } static int ibe_encrypt(uint8_t* dst, ep_t pkr, const uint8_t* id, size_t id_len, const uint8_t* message, size_t message_len) { int status = BFE_SUCCESS; ep2_t qid; fp12_t t; ep2_null(qid); fp12_null(t); TRY { ep2_new(qid); fp12_new(t); /* G(i_j) */ ep2_map(qid, id, id_len); /* e(pk^r, G(i_j)) */ pp_map_k12(t, pkr, qid); hash_and_xor(dst, message_len, message, t); } CATCH_ANY { status = BFE_ERROR; } FINALLY { fp12_free(t); ep2_free(qid); }; return status; } static int ibe_decrypt(uint8_t* message, ep_t g1r, const uint8_t* Kxored, size_t length, ep2_t secret_key) { int status = BFE_SUCCESS; fp12_t t; fp12_null(t); TRY { fp12_new(t); pp_map_k12(t, g1r, secret_key); hash_and_xor(message, length, Kxored, t); } CATCH_ANY { status = BFE_ERROR; } FINALLY { fp12_free(t); }; return status; } /* BFE implementation */ int bfe_bf_init_secret_key(bfe_bf_secret_key_t* secret_key) { memset(secret_key, 0, sizeof(bfe_bf_secret_key_t)); return BFE_SUCCESS; } void bfe_bf_clear_secret_key(bfe_bf_secret_key_t* secret_key) { if (secret_key) { if (secret_key->secret_keys) { for (unsigned int i = 0; i < secret_key->secret_keys_len; ++i) { if (bitset_get(&secret_key->filter.bitset, i) == 0) { ep2_set_infty(secret_key->secret_keys[i]); ep2_free(secret_key->secret_keys[i]); } } free(secret_key->secret_keys); secret_key->secret_keys_len = 0; secret_key->secret_keys = NULL; } bf_clear(&secret_key->filter); } } int bfe_bf_init_public_key(bfe_bf_public_key_t* public_key) { public_key->filter_hash_count = public_key->filter_size = public_key->key_size = 0; int status = BFE_SUCCESS; ep_null(public_key->public_key); TRY { ep_new(public_key->public_key); } CATCH_ANY { status = BFE_ERROR; } return status; } void bfe_bf_clear_public_key(bfe_bf_public_key_t* public_key) { if (public_key) { public_key->filter_hash_count = public_key->filter_size = public_key->key_size = 0; ep_free(public_key->public_key); ep_null(public_key->public_key); } } int bfe_bf_keygen(bfe_bf_public_key_t* public_key, bfe_bf_secret_key_t* secret_key, unsigned int key_size, unsigned int filter_size, double false_positive_prob) { if (key_size > MAX_BFE_KEY_SIZE || order_size > MAX_ORDER_SIZE) { return BFE_ERROR_INVALID_PARAM; } int status = BFE_SUCCESS; bloomfilter_t filter = bf_init(filter_size, false_positive_prob); if (filter.hash_count > MAX_BLOOMFILTER_HASH_COUNT) { bf_clear(&filter); return BFE_ERROR_INVALID_PARAM; } const unsigned int bf_size = filter.bitset.size; secret_key->secret_keys = calloc(bf_size, sizeof(ep2_t)); if (!secret_key->secret_keys) { bf_clear(&filter); return BFE_ERROR; } public_key->key_size = key_size; public_key->filter_size = bf_size; public_key->filter_hash_count = filter.hash_count; secret_key->secret_keys_len = bf_size; secret_key->filter = filter; bn_t sk; bn_null(sk); TRY { bn_new(sk); /* generate IBE key */ status = ibe_keygen(sk, public_key); if (!status) { /* run key generation in parallel */ #pragma omp parallel for reduction(| : status) for (unsigned int i = 0; i < bf_size; ++i) { /* extract key for identity i */ const uint64_t id = htole64(i); status |= ibe_extract(secret_key->secret_keys[i], sk, (const uint8_t*)&id, sizeof(id)); } } } CATCH_ANY { status = BFE_ERROR; } FINALLY { bn_free(sk); } return status; } static int internal_encrypt(bfe_bf_ciphertext_t* ciphertext, const bfe_bf_public_key_t* public_key, bn_t r, const uint8_t* K) { int status = BFE_SUCCESS; unsigned int bit_positions[MAX_BLOOMFILTER_HASH_COUNT]; ep_t pkr; ep_null(pkr); TRY { ep_new(pkr); /* g_1^r */ ep_mul_gen(ciphertext->u, r); /* pk^r */ ep_mul(pkr, public_key->public_key, r); bf_get_bit_positions(bit_positions, ciphertext->u, public_key->filter_hash_count, public_key->filter_size); #pragma omp parallel for reduction(| : status) for (unsigned int i = 0; i < public_key->filter_hash_count; ++i) { const uint64_t id = htole64(bit_positions[i]); status |= ibe_encrypt(&ciphertext->v[i * public_key->key_size], pkr, (const uint8_t*)&id, sizeof(id), K, public_key->key_size); } } CATCH_ANY { status = BFE_ERROR; } FINALLY { ep_free(pkr); } return status; } int bfe_bf_encaps(bfe_bf_ciphertext_t* ciphertext, uint8_t* Kout, const bfe_bf_public_key_t* public_key) { uint8_t key_buffer[MAX_BFE_KEY_SIZE]; rand_bytes(key_buffer, public_key->key_size); int status = BFE_SUCCESS; bn_t r; bn_null(r); TRY { bn_new(r); Keccak_HashInstance shake; hash_R(&shake, key_buffer, public_key->key_size); /* r of (r, K') = R(K) */ hash_squeeze_zp(r, &shake); status = internal_encrypt(ciphertext, public_key, r, key_buffer); if (!status) { /* K' of (r, K') = R(K) */ Keccak_HashSqueeze(&shake, Kout, public_key->key_size * 8); } } CATCH_ANY { status = BFE_ERROR; } FINALLY { bn_free(r); } #if defined(HAVE_EXPLICIT_BZERO) explicit_bzero(key_buffer, sizeof(key_buffer)); #endif return status; } void bfe_bf_puncture(bfe_bf_secret_key_t* secret_key, bfe_bf_ciphertext_t* ciphertext) { unsigned int indices[MAX_BLOOMFILTER_HASH_COUNT]; // bf_add(&secret_key->filter, ciphertext->u); bf_get_bit_positions(indices, ciphertext->u, secret_key->filter.hash_count, secret_key->filter.bitset.size); for (unsigned int i = 0; i < secret_key->filter.hash_count; ++i) { bitset_set(&secret_key->filter.bitset, indices[i]); ep2_set_infty(secret_key->secret_keys[indices[i]]); ep2_free(secret_key->secret_keys[indices[i]]); } } static int bfe_bf_ciphertext_cmp(const bfe_bf_ciphertext_t* ciphertext1, const bfe_bf_ciphertext_t* ciphertext2) { if (ep_cmp(ciphertext1->u, ciphertext2->u) != RLC_EQ || ciphertext1->v_size != ciphertext2->v_size) { return 1; } return memcmp(ciphertext1->v, ciphertext2->v, ciphertext1->v_size); } int bfe_bf_decaps(uint8_t* key, const bfe_bf_public_key_t* public_key, const bfe_bf_secret_key_t* secret_key, bfe_bf_ciphertext_t* ciphertext) { int status = BFE_SUCCESS; uint8_t key_buffer[MAX_BFE_KEY_SIZE]; unsigned int indices[MAX_BLOOMFILTER_HASH_COUNT]; bf_get_bit_positions(indices, ciphertext->u, secret_key->filter.hash_count, secret_key->filter.bitset.size); status = BFE_ERROR; for (unsigned int i = 0; i < secret_key->filter.hash_count; ++i) { if (bitset_get(&secret_key->filter.bitset, indices[i]) == 0) { status = ibe_decrypt(key_buffer, ciphertext->u, &ciphertext->v[i * public_key->key_size], public_key->key_size, secret_key->secret_keys[indices[i]]); if (status == BFE_SUCCESS) { break; } } } if (status != BFE_SUCCESS) { return BFE_ERROR_KEY_PUNCTURED; } bfe_bf_ciphertext_t check_ciphertext; bfe_bf_init_ciphertext(&check_ciphertext, public_key); bn_t r; bn_null(r); TRY { bn_new(r); Keccak_HashInstance shake; hash_R(&shake, key_buffer, public_key->key_size); /* r of (r, K') = R(K) */ hash_squeeze_zp(r, &shake); status = internal_encrypt(&check_ciphertext, public_key, r, key_buffer); if (!status && !bfe_bf_ciphertext_cmp(&check_ciphertext, ciphertext)) { /* K' of (r, K') = R(K) */ Keccak_HashSqueeze(&shake, key, public_key->key_size * 8); } else { status = BFE_ERROR; } } CATCH_ANY { status = BFE_ERROR; } FINALLY { bn_free(r); bfe_bf_clear_ciphertext(&check_ciphertext); } #if defined(HAVE_EXPLICIT_BZERO) explicit_bzero(key_buffer, sizeof(key_buffer)); #endif return status; } static int init_ciphertext(bfe_bf_ciphertext_t* ciphertext, unsigned int hash_count, unsigned int key_length) { int status = BFE_SUCCESS; ep_null(ciphertext->u); TRY { ep_new(ciphertext->u); } CATCH_ANY { status = BFE_ERROR; } if (!status) { ciphertext->v_size = hash_count * key_length; ciphertext->v = calloc(hash_count, key_length); if (!ciphertext->v) { status = BFE_ERROR; } } return status; } int bfe_bf_init_ciphertext(bfe_bf_ciphertext_t* ciphertext, const bfe_bf_public_key_t* public_key) { return init_ciphertext(ciphertext, public_key->filter_hash_count, public_key->key_size); } void bfe_bf_clear_ciphertext(bfe_bf_ciphertext_t* ciphertext) { if (ciphertext) { free(ciphertext->v); ep_free(ciphertext->u); ciphertext->v_size = 0; ciphertext->v = NULL; } } unsigned int bfe_bf_ciphertext_size(const bfe_bf_ciphertext_t* ciphertext) { return 1 * sizeof(uint32_t) + EP_SIZE + ciphertext->v_size; } void bfe_bf_ciphertext_serialize(uint8_t* dst, const bfe_bf_ciphertext_t* ciphertext) { const uint32_t u_size = EP_SIZE; const uint32_t total_size = bfe_bf_ciphertext_size(ciphertext); write_u32(&dst, total_size); ep_write_bin(dst, EP_SIZE, ciphertext->u, 0); memcpy(&dst[u_size], ciphertext->v, ciphertext->v_size); } int bfe_bf_ciphertext_deserialize(bfe_bf_ciphertext_t* ciphertext, const uint8_t* src) { const uint32_t total_size = read_u32(&src); const unsigned int v_size = total_size - EP_SIZE - 1 * sizeof(uint32_t); if (init_ciphertext(ciphertext, 1, v_size)) { return BFE_ERROR; } int status = BFE_SUCCESS; TRY { ep_read_bin(ciphertext->u, src, EP_SIZE); ciphertext->v_size = v_size; memcpy(ciphertext->v, &src[EP_SIZE], v_size); } CATCH_ANY { status = BFE_ERROR; } return status; } unsigned int bfe_bf_public_key_size(void) { return 3 * sizeof(uint32_t) + EP_SIZE; } void bfe_bf_public_key_serialize(uint8_t* dst, const bfe_bf_public_key_t* public_key) { write_u32(&dst, public_key->filter_hash_count); write_u32(&dst, public_key->filter_size); write_u32(&dst, public_key->key_size); ep_write_bin(dst, EP_SIZE, public_key->public_key, 0); } int bfe_bf_public_key_deserialize(bfe_bf_public_key_t* public_key, const uint8_t* src) { public_key->filter_hash_count = read_u32(&src); public_key->filter_size = read_u32(&src); public_key->key_size = read_u32(&src); int status = BFE_SUCCESS; TRY { ep_read_bin(public_key->public_key, src, EP_SIZE); } CATCH_ANY { status = BFE_ERROR; } return status; } unsigned int bfe_bf_secret_key_size(const bfe_bf_secret_key_t* secret_key) { unsigned int num_keys = secret_key->filter.bitset.size - bitset_popcount(&secret_key->filter.bitset); return 2 * sizeof(uint32_t) + BITSET_SIZE(secret_key->filter.bitset.size) * sizeof(uint64_t) + num_keys * EP2_SIZE; } void bfe_bf_secret_key_serialize(uint8_t* dst, const bfe_bf_secret_key_t* secret_key) { write_u32(&dst, secret_key->filter.hash_count); write_u32(&dst, secret_key->filter.bitset.size); for (unsigned int i = 0; i < BITSET_SIZE(secret_key->filter.bitset.size); ++i) { write_u64(&dst, secret_key->filter.bitset.bits[i]); } for (unsigned int i = 0; i < secret_key->filter.bitset.size; ++i) { if (bitset_get(&secret_key->filter.bitset, i) == 0) { ep2_write_bin(dst, EP2_SIZE, secret_key->secret_keys[i], 0); dst += EP2_SIZE; } } } int bfe_bf_secret_key_deserialize(bfe_bf_secret_key_t* secret_key, const uint8_t* src) { const unsigned int hash_count = read_u32(&src); const unsigned int filter_size = read_u32(&src); secret_key->filter = bf_init_fixed(filter_size, hash_count); for (unsigned int i = 0; i < BITSET_SIZE(secret_key->filter.bitset.size); ++i) { secret_key->filter.bitset.bits[i] = read_u64(&src); } secret_key->secret_keys_len = filter_size; secret_key->secret_keys = calloc(filter_size, sizeof(ep2_t)); int status = BFE_SUCCESS; TRY { for (unsigned int i = 0; i < filter_size; ++i) { if (bitset_get(&secret_key->filter.bitset, i) == 0) { ep2_new(secret_key->secret_keys[i]); ep2_read_bin(secret_key->secret_keys[i], src, EP2_SIZE); src += EP2_SIZE; } } } CATCH_ANY { status = BFE_ERROR; } return status; }
local_sum-inl.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ // This is an example demonstrating the usage of mshadow ps #include <cstdio> // use openmp to launch multiple threads #include <omp.h> #include <mshadow/tensor.h> #include <mshadow-ps/mshadow_ps.h> // simple util to print result void Print_(mshadow::Tensor<mshadow::cpu, 2, float> ts) { for (mshadow::index_t i = 0; i < ts.size(0); ++i) { for (mshadow::index_t j = 0; j < ts.size(1); ++j) { printf("%g ", ts[i][j]); } printf("\n"); } } template<typename xpu> inline void Print(mshadow::Tensor<xpu, 2, float> ts) { mshadow::TensorContainer<mshadow::cpu, 2, float> tmp; tmp.Resize(ts.shape_); mshadow::Copy(tmp, ts); Print_(tmp); } // this function is runed by specific thread template<typename xpu> inline void RunWorkerThread(int devid, mshadow::ps::ISharedModel<xpu, float> *ps) { // initialize tensor engine mshadow::InitTensorEngine<xpu>(devid); mshadow::Stream<xpu> *stream = mshadow::NewStream<xpu>(devid); // allocate tensor on xpu mshadow::TensorContainer<xpu, 2> data(mshadow::Shape2(2, 3)); // set the computation stream to the new allocated stream // this will make subsequent computation whose target is data // to use the stream, stream is needed for async execution in GPU data.set_stream(stream); // assume these operations sets the content of dataient data[0] = 1.0f; data[1] = devid + data[0]; printf("dev%d: before sync, data:\n", devid); // use print to show result, do not call // print normally since Copy will block Print(data); printf("====================\n"); // intiaialize the key, register the shape on parameter server ps->InitKey(data[0].shape_, 0, devid); ps->InitKey(data[1].shape_, 1, devid); // push data[0] out, for update, or aggregation // 0 is the key of the data, devid is the current device id ps->Push(data[0], 0, devid); // pull request is used to request the data to be copied back // once computation is done ps->PullReq(data[0], 0, devid); // computation can be done here.. // the pull request handler will be overlapped with // similar as previous call ps->Push(data[1], 1, devid); ps->PullReq(data[1], 1, devid); // more computation can be done here... // the computation will be overlapped // PullWait will block until these request finishes ps->PullWait(0, devid); ps->PullWait(1, devid); printf("dev%d: after sync, data:\n", devid); // use print to show result, do not call // print normally since Copy will block Print(data); printf("====================\n"); mshadow::DeleteStream(stream); mshadow::ShutdownTensorEngine<xpu>(); } namespace mshadow { namespace ps { // model updater is used when update is happening on server side // if we only use parameter server for sum aggregation // this is not needed, but we must declare this function to return NULL template<> IModelUpdater<float> *CreateModelUpdater(void) { return NULL; } } } template<typename xpu> inline int Run(int argc, char *argv[]) { if (argc < 2) { printf("Usage: device list\n"\ "\tfor CPU the device list can be arbitrary\n"\ "\tfor GPU the device list need to be actual device index\n"); return 0; } #if MSHADOW_RABIT_PS rabit::Init(argc, argv); #endif // list of device ids std::vector<int> devs; // initialization for (int i = 1; i < argc; ++i) { // record the device id devs.push_back(atoi(argv[i])); } mshadow::ps::ISharedModel<xpu, float> *ps = mshadow::ps::CreateSharedModel<xpu, float>("local"); // intiaialize the ps ps->Init(devs); // use openmp to launch #devs threads #pragma omp parallel num_threads(devs.size()) { int tid = omp_get_thread_num(); RunWorkerThread<xpu>(devs[tid], ps); } delete ps; #if MSHADOW_RABIT_PS rabit::Finalize(); #endif return 0; }
seeta_aip_image.h
// // Created by kier on 2020/9/27. // #ifndef SEETA_AIP_SEETA_AIP_IMAGE_H #define SEETA_AIP_SEETA_AIP_IMAGE_H #include "seeta_aip_struct.h" namespace seeta { namespace aip { namespace _ { template<typename SRC, typename DST, typename=typename std::enable_if<std::is_convertible<SRC, DST>::value>::type> static inline void cast(int threads, const SRC *src, DST *dst, int32_t N) { if (threads > 1) { #ifdef _OPENMP #pragma omp parallel for num_threads(threads) #endif for (decltype(N) i = 0; i < N; ++i) { dst[i] = DST(src[i]); } } else { for (decltype(N) i = 0; i < N; ++i) { *dst++ = DST(*src++); } } } template<typename SRC, typename DST, typename=typename std::enable_if<std::is_convertible<SRC, DST>::value>::type> static inline void cast(int threads, const SRC *src, DST *dst, int32_t N, SRC scale) { if (threads > 1) { #ifdef _OPENMP #pragma omp parallel for num_threads(threads) #endif for (decltype(N) i = 0; i < N; ++i) { dst[i] = DST(scale * src[i]); } } else { for (decltype(N) i = 0; i < N; ++i) { *dst++ = DST(scale * *src++); } } } template<typename DST> static inline void cast_to(int threads, const void *src, SEETA_AIP_VALUE_TYPE src_type, DST *dst, uint32_t N, float data_scale = 1) { switch (src_type) { default: case SEETA_AIP_VALUE_VOID: break; case SEETA_AIP_VALUE_BYTE: cast<uint8_t, DST>(threads, reinterpret_cast<const uint8_t *>(src), dst, int32_t(N)); break; case SEETA_AIP_VALUE_INT32: cast<int32_t, DST>(threads, reinterpret_cast<const int32_t *>(src), dst, int32_t(N)); break; case SEETA_AIP_VALUE_FLOAT32: cast<float, DST>(threads, reinterpret_cast<const float *>(src), dst, int32_t(N), data_scale); break; case SEETA_AIP_VALUE_FLOAT64: cast<double, DST>(threads, reinterpret_cast<const double *>(src), dst, int32_t(N), data_scale); break; } } static inline size_t value_type_width(SEETA_AIP_VALUE_TYPE type) { switch (type) { default: case SEETA_AIP_VALUE_VOID: return 0; case SEETA_AIP_VALUE_BYTE: return 1; case SEETA_AIP_VALUE_FLOAT32: case SEETA_AIP_VALUE_INT32: return 4; case SEETA_AIP_VALUE_FLOAT64: return 8; } } } static inline void cast(int threads, const void *src, SEETA_AIP_VALUE_TYPE src_type, void *dst, SEETA_AIP_VALUE_TYPE dst_type, uint32_t N, float data_scale = 1) { if (src_type == dst_type) { if (src != dst) { std::memcpy(dst, src, _::value_type_width(src_type) * N); } return; } switch (dst_type) { default: case SEETA_AIP_VALUE_VOID: break; case SEETA_AIP_VALUE_BYTE: _::cast_to<uint8_t>(threads, src, src_type, reinterpret_cast<uint8_t *>(dst), N, data_scale); break; case SEETA_AIP_VALUE_INT32: _::cast_to<int32_t>(threads, src, src_type, reinterpret_cast<int32_t *>(dst), N, data_scale); break; case SEETA_AIP_VALUE_FLOAT32: _::cast_to<float>(threads, src, src_type, reinterpret_cast<float *>(dst), N, data_scale); break; case SEETA_AIP_VALUE_FLOAT64: _::cast_to<double>(threads, src, src_type, reinterpret_cast<double *>(dst), N, data_scale); break; } } namespace _ { static inline SEETA_AIP_IMAGE_FORMAT casted_format(SEETA_AIP_IMAGE_FORMAT original_format, SEETA_AIP_VALUE_TYPE casted_type) { (void) (original_format); switch (casted_type) { default: throw seeta::aip::Exception("Unknown value type."); case SEETA_AIP_VALUE_VOID: throw seeta::aip::Exception("There is no image format VOID"); case SEETA_AIP_VALUE_BYTE: return SEETA_AIP_FORMAT_U8RAW; case SEETA_AIP_VALUE_INT32: return SEETA_AIP_FORMAT_I32RAW; case SEETA_AIP_VALUE_FLOAT32: return SEETA_AIP_FORMAT_F32RAW; case SEETA_AIP_VALUE_FLOAT64: throw seeta::aip::Exception("There is no image format FLOAT64"); } } enum cvt_format { NO_CONVERT, BGR2RGB, BGR2BGRA, BGR2RGBA, BGRA2BGR, BGRA2RGB, BGRA2RGBA, Y2BGR, Y2BGRA, BGR2Y, BGRA2Y, RGB2Y, RGBA2Y, }; static inline cvt_format serch_u8image_cvt_format(SEETA_AIP_IMAGE_FORMAT src, SEETA_AIP_IMAGE_FORMAT dst) { // U8RGB, U8BGR, U8RGBA, U8BGRA, U8Y static cvt_format table[5][5] = { {NO_CONVERT, BGR2RGB, BGR2BGRA, BGR2RGBA, RGB2Y}, {BGR2RGB, NO_CONVERT, BGR2RGBA, BGR2BGRA, BGR2Y}, {BGRA2BGR, BGRA2RGB, NO_CONVERT, BGRA2RGBA, RGBA2Y}, {BGRA2RGB, BGRA2BGR, BGRA2RGBA, NO_CONVERT, BGRA2Y}, {Y2BGR, Y2BGR, Y2BGRA, Y2BGRA, NO_CONVERT}, }; int i = src - 1001; int j = dst - 1001; return table[i][j]; } static inline void _bgr2rgb(const uint8_t *src, uint8_t *dst) { auto tmp = src[0]; dst[0] = src[2]; dst[1] = src[1]; dst[2] = tmp; } static inline void convert_uimage_bgr2rgb(int threads, const uint8_t *src, uint8_t *dst, int32_t N) { if (threads > 1) { const auto N3 = N * 3; #ifdef _OPENMP #pragma omp parallel for num_threads(threads) #endif for (decltype(N) n = 0; n < N3; n += 3) { _bgr2rgb(src + n, dst + n); } } else { for (decltype(N) i = 0; i < N; ++i, src += 3, dst += 3) { _bgr2rgb(src, dst); } } } static inline void _bgr2bgra(const uint8_t *src, uint8_t *dst) { dst[0] = src[0]; dst[1] = src[1]; dst[2] = src[2]; dst[3] = 0; } static inline void convert_uimage_bgr2bgra(int threads, const uint8_t *src, uint8_t *dst, int32_t N) { if (threads > 1) { #ifdef _OPENMP #pragma omp parallel for num_threads(threads) #endif for (decltype(N) i = 0; i < N; ++i) { _bgr2bgra(src + i * 3, dst + i * 4); } } else { for (decltype(N) i = 0; i < N; ++i, src += 3, dst += 4) { _bgr2bgra(src, dst); } } } static inline void _bgr2rgba(const uint8_t *src, uint8_t *dst) { dst[0] = src[2]; dst[1] = src[1]; dst[2] = src[0]; dst[3] = 0; } static inline void convert_uimage_bgr2rgba(int threads, const uint8_t *src, uint8_t *dst, int32_t N) { if (threads > 1) { #ifdef _OPENMP #pragma omp parallel for num_threads(threads) #endif for (decltype(N) i = 0; i < N; ++i) { _bgr2rgba(src + i * 3, dst + i * 4); } } else { for (decltype(N) i = 0; i < N; ++i, src += 3, dst += 4) { _bgr2rgba(src, dst); } } } static inline void _bgra2bgr(const uint8_t *src, uint8_t *dst) { dst[0] = src[0]; dst[1] = src[1]; dst[2] = src[2]; } static inline void convert_uimage_bgra2bgr(int threads, const uint8_t *src, uint8_t *dst, int32_t N) { if (threads > 1) { #ifdef _OPENMP #pragma omp parallel for num_threads(threads) #endif for (decltype(N) i = 0; i < N; ++i) { _bgra2bgr(src + i * 4, dst + i * 3); } } else { for (decltype(N) i = 0; i < N; ++i, src += 4, dst += 3) { _bgra2bgr(src, dst); } } } static inline void _bgra2rgb(const uint8_t *src, uint8_t *dst) { dst[0] = src[2]; dst[1] = src[1]; dst[2] = src[0]; } static inline void convert_uimage_bgra2rgb(int threads, const uint8_t *src, uint8_t *dst, int32_t N) { if (threads > 1) { #ifdef _OPENMP #pragma omp parallel for num_threads(threads) #endif for (decltype(N) i = 0; i < N; ++i) { _bgra2rgb(src + i * 4, dst + i * 3); } } else { for (decltype(N) i = 0; i < N; ++i, src += 4, dst += 3) { _bgra2rgb(src, dst); } } } static inline void _bgra2rgba(const uint8_t *src, uint8_t *dst) { auto tmp = src[0]; dst[0] = src[2]; dst[1] = src[1]; dst[2] = tmp; dst[3] = src[3]; } static inline void convert_uimage_bgra2rgba(int threads, const uint8_t *src, uint8_t *dst, int32_t N) { if (threads > 1) { const auto N4 = N * 4; #ifdef _OPENMP #pragma omp parallel for num_threads(threads) #endif for (decltype(N) n = 0; n < N4; n += 4) { _bgra2rgba(src + n, dst + n); } } else { for (decltype(N) i = 0; i < N; ++i, src += 4, dst += 4) { _bgra2rgba(src, dst); } } } static inline void _y2bgr(const uint8_t *src, uint8_t *dst) { dst[0] = src[0]; dst[1] = src[0]; dst[2] = src[0]; } static inline void convert_uimage_y2bgr(int threads, const uint8_t *src, uint8_t *dst, int32_t N) { if (threads > 1) { #ifdef _OPENMP #pragma omp parallel for num_threads(threads) #endif for (decltype(N) i = 0; i < N; ++i) { _y2bgr(src + i, dst + i * 3); } } else { for (decltype(N) i = 0; i < N; ++i, src += 1, dst += 3) { _y2bgr(src, dst); } } } static inline void _y2bgra(const uint8_t *src, uint8_t *dst) { dst[0] = src[0]; dst[1] = src[0]; dst[2] = src[0]; dst[3] = 0; } static inline void convert_uimage_y2bgra(int threads, const uint8_t *src, uint8_t *dst, int32_t N) { if (threads > 1) { #ifdef _OPENMP #pragma omp parallel for num_threads(threads) #endif for (decltype(N) i = 0; i < N; ++i) { _y2bgra(src + i, dst + i * 4); } } else { for (decltype(N) i = 0; i < N; ++i, src += 1, dst += 4) { _y2bgra(src, dst); } } } static inline void _bgr2y(const uint8_t *src, uint8_t *dst) { auto B = src[0]; auto G = src[1]; auto R = src[2]; auto Y = (R * uint32_t(19595) + G * uint32_t(38469) + B * uint32_t(7472)) >> 16; dst[0] = Y > 255 ? 255 : Y; } static inline void convert_uimage_bgr2y(int threads, const uint8_t *src, uint8_t *dst, int32_t N) { if (threads > 1) { #ifdef _OPENMP #pragma omp parallel for num_threads(threads) #endif for (decltype(N) i = 0; i < N; ++i) { _bgr2y(src + i * 3, dst + i); } } else { for (decltype(N) i = 0; i < N; ++i, src += 3, dst += 1) { _bgr2y(src, dst); } } } static inline void convert_uimage_bgra2y(int threads, const uint8_t *src, uint8_t *dst, int32_t N) { if (threads > 1) { #ifdef _OPENMP #pragma omp parallel for num_threads(threads) #endif for (decltype(N) i = 0; i < N; ++i) { _bgr2y(src + i * 4, dst + i); } } else { for (decltype(N) i = 0; i < N; ++i, src += 4, dst += 1) { _bgr2y(src, dst); } } } static inline void _rgb2y(const uint8_t *src, uint8_t *dst) { auto B = src[2]; auto G = src[1]; auto R = src[0]; auto Y = (R * uint32_t(19595) + G * uint32_t(38469) + B * uint32_t(7472)) >> 16; dst[0] = Y > 255 ? 255 : Y; } static inline void convert_uimage_rgb2y(int threads, const uint8_t *src, uint8_t *dst, int32_t N) { if (threads > 1) { #ifdef _OPENMP #pragma omp parallel for num_threads(threads) #endif for (decltype(N) i = 0; i < N; ++i) { _rgb2y(src + i * 3, dst + i); } } else { for (decltype(N) i = 0; i < N; ++i, src += 3, dst += 1) { _rgb2y(src, dst); } } } static inline void convert_uimage_rgba2y(int threads, const uint8_t *src, uint8_t *dst, int32_t N) { if (threads > 1) { #ifdef _OPENMP #pragma omp parallel for num_threads(threads) #endif for (decltype(N) i = 0; i < N; ++i) { _rgb2y(src + i * 4, dst + i); } } else { for (decltype(N) i = 0; i < N; ++i, src += 4, dst += 1) { _rgb2y(src, dst); } } } static inline void convert_uimage(int threads, cvt_format cvt_code, const void *src, void *dst, uint32_t pixel_number) { static decltype(convert_uimage_bgr2rgb) *converter[] = { nullptr, convert_uimage_bgr2rgb, convert_uimage_bgr2bgra, convert_uimage_bgr2rgba, convert_uimage_bgra2bgr, convert_uimage_bgra2rgb, convert_uimage_bgra2rgba, convert_uimage_y2bgr, convert_uimage_y2bgra, convert_uimage_bgr2y, convert_uimage_bgra2y, convert_uimage_rgb2y, convert_uimage_rgba2y, }; auto func = converter[cvt_code]; if (!func) return; func(threads, reinterpret_cast<const uint8_t *>(src), reinterpret_cast<uint8_t *>(dst), int32_t(pixel_number)); } static inline void convert_u8image(int threads, const void *src_data, void *dst_data, uint32_t src_channels, uint32_t dst_channels, SEETA_AIP_IMAGE_FORMAT src_format, SEETA_AIP_IMAGE_FORMAT dst_format, uint32_t pixel_number) { if ((src_format < 1000 || dst_format < 1000) && src_channels != dst_channels) { throw seeta::aip::Exception("Can not convert mismatch channel images with format U8Raw"); } if (src_format < 1000 || dst_format < 1000) { // raw no need to channels swap if (src_data != dst_data) { std::memcpy(dst_data, src_data, pixel_number * src_channels); } return; } auto cvt_code = _::serch_u8image_cvt_format(src_format, dst_format); if (cvt_code != 0) { _::convert_uimage(threads, cvt_code, src_data, dst_data, pixel_number); } else { // no swap, so copy input to output if (src_data != dst_data) { std::memcpy(dst_data, src_data, pixel_number * src_channels); } } } static inline void convert_f32image(int threads, const void *src_data, void *dst_data, uint32_t src_channels, uint32_t dst_channels, SEETA_AIP_IMAGE_FORMAT src_format, SEETA_AIP_IMAGE_FORMAT dst_format, uint32_t pixel_number) { if (src_format != SEETA_AIP_FORMAT_F32RAW || dst_format != SEETA_AIP_FORMAT_F32RAW) { throw seeta::aip::Exception("Float image format only support F32Raw for now."); } if (src_channels != dst_channels) { throw seeta::aip::Exception("Can not convert mismatch channel images with format F32Raw"); } if (src_data != dst_data) { std::memcpy(dst_data, src_data, pixel_number * src_channels * 4); } } static inline void convert_i32image(int threads, const void *src_data, void *dst_data, uint32_t src_channels, uint32_t dst_channels, SEETA_AIP_IMAGE_FORMAT src_format, SEETA_AIP_IMAGE_FORMAT dst_format, uint32_t pixel_number) { if (src_format != SEETA_AIP_FORMAT_I32RAW || dst_format != SEETA_AIP_FORMAT_I32RAW) { throw seeta::aip::Exception("Integer image format only support I32Raw for now."); } if (src_channels != dst_channels) { throw seeta::aip::Exception("Can not convert mismatch channel images with format I32Raw"); } if (src_data != dst_data) { std::memcpy(dst_data, src_data, pixel_number * src_channels * 4); } } static inline uint32_t offset4d(const uint32_t *shape, uint32_t dim1, uint32_t dim2, uint32_t dim3, uint32_t dim4) { return ((dim1 * shape[1] + dim2) * shape[2] + dim3) * shape[3] + dim4; } template <typename T> static inline void permute4d( T *dst, const T *src, const uint32_t *shape, uint32_t dim1, uint32_t dim2, uint32_t dim3, uint32_t dim4) { std::vector<uint32_t> dim(4), redim(4), idx(4); dim[0] = dim1; redim[dim[0]] = 0; dim[1] = dim2; redim[dim[1]] = 1; dim[2] = dim3; redim[dim[2]] = 2; dim[3] = dim4; redim[dim[3]] = 3; std::vector<int> new_dims(4); for (int i = 0; i < 4; ++i) new_dims[i] = shape[dim[i]]; int cnt = 0; for (idx[0] = 0; idx[0] < shape[dim[0]]; ++idx[0]) { for (idx[1] = 0; idx[1] < shape[dim[1]]; ++idx[1]) { for (idx[2] = 0; idx[2] < shape[dim[2]]; ++idx[2]) { for (idx[3] = 0; idx[3] < shape[dim[3]]; ++idx[3]) { dst[cnt] = src[offset4d( shape, idx[redim[0]], idx[redim[1]], idx[redim[2]], idx[redim[3]])]; cnt++; } } } } } static inline void permute4d( SEETA_AIP_VALUE_TYPE type, void *dst, const void *src, const uint32_t *shape, uint32_t dim1, uint32_t dim2, uint32_t dim3, uint32_t dim4) { switch (type) { default: throw seeta::aip::Exception("Got unknown value type."); case SEETA_AIP_VALUE_VOID: break; case SEETA_AIP_VALUE_BYTE: { using T = uint8_t; permute4d(reinterpret_cast<T*>(dst), reinterpret_cast<const T*>(src), shape, dim1, dim2, dim3, dim4); break; } case SEETA_AIP_VALUE_INT32: { using T = int32_t ; permute4d(reinterpret_cast<T*>(dst), reinterpret_cast<const T*>(src), shape, dim1, dim2, dim3, dim4); break; } case SEETA_AIP_VALUE_FLOAT32: { using T = float; permute4d(reinterpret_cast<T*>(dst), reinterpret_cast<const T*>(src), shape, dim1, dim2, dim3, dim4); break; } case SEETA_AIP_VALUE_FLOAT64: { using T = double; permute4d(reinterpret_cast<T*>(dst), reinterpret_cast<const T*>(src), shape, dim1, dim2, dim3, dim4); break; } } } } inline uint32_t image_format_element_width(SEETA_AIP_IMAGE_FORMAT format) { auto type = seeta::aip::ImageData::GetType(format); switch (type) { default: return 0; case SEETA_AIP_VALUE_VOID: return 0; case SEETA_AIP_VALUE_BYTE: return 1; case SEETA_AIP_VALUE_FLOAT32: return 4; case SEETA_AIP_VALUE_INT32: return 4; case SEETA_AIP_VALUE_FLOAT64: return 8; } } static inline void convert(int threads, SeetaAIPImageData src, SeetaAIPImageData dst, float data_scale = 255.0) { if (src.format == dst.format) { auto SRC_N = src.number * src.height * src.width; auto DST_N = dst.number * dst.height * dst.width; if (SRC_N != DST_N) { throw seeta::aip::Exception("Convert image pixels' number must be equal."); } auto src_format = SEETA_AIP_IMAGE_FORMAT(src.format); auto type_width = image_format_element_width(src_format); auto src_data = src.data; auto dst_data = dst.data; auto src_channels = seeta::aip::ImageData::GetChannels(src_format, src.channels); auto dst_channels = seeta::aip::ImageData::GetChannels(src_format, dst.channels); if (src_channels != dst_channels) { throw seeta::aip::Exception("Convert images' channels must be equal with format are same."); } std::memcpy(dst_data, src_data, SRC_N * src_channels * type_width); return; } if ((src.format & 0xffff0000) == 0x80000) { // CHW format seeta::aip::ImageData tmp( SEETA_AIP_IMAGE_FORMAT(src.format & 0x0000ffff), src.number, src.width, src.height, src.channels); uint32_t chw_shape[] = {src.number, src.channels, src.height, src.width }; _::permute4d(tmp.type(), tmp.data(), src.data, chw_shape, 0, 2, 3, 1); convert(threads, tmp, dst); return; } if ((dst.format & 0xffff0000) == 0x80000) { // CHW format seeta::aip::ImageData tmp( SEETA_AIP_IMAGE_FORMAT(dst.format & 0x0000ffff), dst.number, dst.width, dst.height, dst.channels); convert(threads, src, tmp); uint32_t hwc_shape[] = {dst.number, dst.height, dst.width, dst.channels }; _::permute4d(tmp.type(), dst.data, tmp.data(), hwc_shape, 0, 3, 1, 2); return; } auto SRC_N = src.number * src.height * src.width; auto DST_N = dst.number * dst.height * dst.width; if (SRC_N != DST_N) { throw seeta::aip::Exception("Convert image pixels' number must be equal."); } auto src_format = SEETA_AIP_IMAGE_FORMAT(src.format); auto dst_format = SEETA_AIP_IMAGE_FORMAT(dst.format); auto src_type = seeta::aip::ImageData::GetType(src_format); auto dst_type = seeta::aip::ImageData::GetType(dst_format); auto src_data = src.data; auto dst_data = dst.data; auto src_channels = seeta::aip::ImageData::GetChannels(src_format, src.channels); auto dst_channels = seeta::aip::ImageData::GetChannels(dst_format, dst.channels); // step1. convert dtype std::shared_ptr<char> buffer; if (src_type != dst_type) { if (src_channels == dst_channels) { // for output channels same with input channels cast(threads, src.data, src_type, dst.data, dst_type, SRC_N * src_channels, data_scale); src_type = dst_type; src_data = dst.data; src_format = _::casted_format(src_format, dst_type); } else { // use buffer to convert data buffer.reset(new char[SRC_N * src_channels * _::value_type_width(dst_type)], std::default_delete<char[]>()); cast(threads, src.data, src_type, buffer.get(), dst_type, SRC_N * src_channels, data_scale); src_type = dst_type; src_data = buffer.get(); src_format = _::casted_format(src_format, dst_type); } } // step2. swap channels switch (dst_type) { default: break; case SEETA_AIP_VALUE_BYTE: _::convert_u8image(threads, src_data, dst_data, src_channels, dst_channels, src_format, dst_format, SRC_N); break; case SEETA_AIP_VALUE_INT32: _::convert_i32image(threads, src_data, dst_data, src_channels, dst_channels, src_format, dst_format, SRC_N); break; case SEETA_AIP_VALUE_FLOAT32: _::convert_f32image(threads, src_data, dst_data, src_channels, dst_channels, src_format, dst_format, SRC_N); break; } } /** * Use `threads` to convert image to `format`. * @param threads using threads(OpenMP) * @param format wanted format * @param image original image * @param data_scale used when convert float value to wanted format. the image value while multiply `data_scale`. * @return */ static seeta::aip::ImageData convert(int threads, SEETA_AIP_IMAGE_FORMAT format, SeetaAIPImageData image, float data_scale = 255.0) { seeta::aip::ImageData converted(format, image.number, image.width, image.height, seeta::aip::ImageData::GetChannels(format, image.channels)); convert(threads, image, SeetaAIPImageData(converted), data_scale); return converted; } /** * Use `threads` to convert image to `format`. * @param threads using threads(OpenMP) * @param format wanted format * @param image original image * @param data_scale used when convert float value to wanted format. the image value while multiply `data_scale`. * @return */ static seeta::aip::ImageData convert(int threads, SEETA_AIP_IMAGE_FORMAT format, const seeta::aip::ImageData &image, float data_scale = 255.0) { return convert(threads, format, SeetaAIPImageData(image), data_scale); } /** * Use `threads` to convert image to `format`.:q:q * * @param threads using threads(OpenMP) * @param format wanted format * @param align memory align * @param image original image * @param data_scale used when convert float value to wanted format. the image value while multiply `data_scale`. * @return */ static seeta::aip::ImageData convert(int threads, SEETA_AIP_IMAGE_FORMAT format, const ImageAlign &align, SeetaAIPImageData image, float data_scale = 255.0) { seeta::aip::ImageData converted(format, align, image.number, image.width, image.height, seeta::aip::ImageData::GetChannels(format, image.channels)); convert(threads, image, SeetaAIPImageData(converted), data_scale); return converted; } /** * Use `threads` to convert image to `format`. * @param threads using threads(OpenMP) * @param format wanted format * @param align memory align * @param image original image * @param data_scale used when convert float value to wanted format. the image value while multiply `data_scale`. * @return */ static seeta::aip::ImageData convert(int threads, SEETA_AIP_IMAGE_FORMAT format, const ImageAlign &align, const seeta::aip::ImageData &image, float data_scale = 255.0) { return convert(threads, format, align, SeetaAIPImageData(image), data_scale); } } } #endif //SEETA_AIP_SEETA_AIP_IMAGE_H
test_core.c
/* * RELIC is an Efficient LIbrary for Cryptography * Copyright (C) 2007-2017 RELIC Authors * * This file is part of RELIC. RELIC is legal property of its developers, * whose names are not listed here. Please refer to the COPYRIGHT file * for contact information. * * RELIC is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * RELIC is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with RELIC. If not, see <http://www.gnu.org/licenses/>. */ /** * @file * * Tests for configuration management. * * @ingroup test */ #include <stdio.h> #include "relic.h" #include "relic_test.h" #if MULTI == PTHREAD void *master(void *ptr) { int *code = (int *)ptr; core_init(); THROW(ERR_NO_MEMORY); if (err_get_code() != STS_ERR) { *code = STS_ERR; } else { *code = STS_OK; } core_clean(); return NULL; } void *tester(void *ptr) { int *code = (int *)ptr; core_init(); if (err_get_code() != STS_OK) { *code = STS_ERR; } else { *code = STS_OK; } core_clean(); return NULL; } #endif int main(void) { int code = STS_ERR; /* Initialize library with default configuration. */ if (core_init() != STS_OK) { core_clean(); return 1; } util_banner("Tests for the CORE module:\n", 0); TEST_ONCE("the library context is consistent") { TEST_ASSERT(core_get() != NULL, end); } TEST_END; TEST_ONCE("switching the library context is correct") { ctx_t new_ctx, *old_ctx; /* Backup the old context. */ old_ctx = core_get(); /* Switch the library context. */ core_set(&new_ctx); /* Reinitialize library with new context. */ core_init(); /* Run function to manipulate the library context. */ THROW(ERR_NO_MEMORY); core_set(old_ctx); TEST_ASSERT(err_get_code() == STS_OK, end); core_set(&new_ctx); TEST_ASSERT(err_get_code() == STS_ERR, end); /* Now we need to finalize the new context. */ core_clean(); /* And restore the original context. */ core_set(old_ctx); } TEST_END; code = STS_OK; #if MULTI == OPENMP TEST_ONCE("library context is thread-safe") { omp_set_num_threads(CORES); #pragma omp parallel shared(code) { if (omp_get_thread_num() == 0) { THROW(ERR_NO_MEMORY); if (err_get_code() != STS_ERR) { code = STS_ERR; } } else { core_init(); if (err_get_code() != STS_OK) { code = STS_ERR; } core_clean(); } } TEST_ASSERT(code == STS_OK, end); } TEST_END; #endif #if MULTI == PTHREAD TEST_ONCE("library context is thread-safe") { pthread_t thread[CORES]; int result[CORES] = { STS_OK }; for (int i = 0; i < CORES; i++) { if (i == 0) { if (pthread_create(&(thread[0]), NULL, master, &(result[0]))) { code = STS_ERR; } } else { if (pthread_create(&(thread[i]), NULL, tester, &(result[i]))) { code = STS_ERR; } } if (result[i] != STS_OK) { code = STS_ERR; } } for (int i = 0; i < CORES; i++) { if (pthread_join(thread[i], NULL)) { code = STS_ERR; } } TEST_ASSERT(code == STS_OK, end); } TEST_END; #endif util_banner("All tests have passed.\n", 0); end: core_clean(); return code; }
7493.c
// this source is derived from CHILL AST originally from file '/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/polybench/polybench-code/stencils/heat-3d/kernel.c' as parsed by frontend compiler rose void kernel_heat_3d(int tsteps, int n, double A[120 + 0][120 + 0][120 + 0], double B[120 + 0][120 + 0][120 + 0]) { int t14; int t12; int t10; int t8; int t6; int t4; int t2; for (t2 = 1; t2 <= 500; t2 += 1) { #pragma omp parallel for private(t4,t6,t8,t10,t12,t14) for (t4 = 1; t4 <= n - 2; t4 += 32) for (t6 = t4; t6 <= (t4 + 31 < n - 2 ? t4 + 31 : n - 2); t6 += 1) for (t8 = 1; t8 <= n - 2; t8 += 16) for (t10 = t8; t10 <= (n - 2 < t8 + 15 ? n - 2 : t8 + 15); t10 += 1) for (t12 = 1; t12 <= n - 2; t12 += 16) for (t14 = t12; t14 <= (n - 2 < t12 + 15 ? n - 2 : t12 + 15); t14 += 1) B[t6][t10][t14] = 0.125 * (A[t6 + 1][t10][t14] - 2 * A[t6][t10][t14] + A[t6 - 1][t10][t14]) + 0.125 * (A[t6][t10 + 1][t14] - 2 * A[t6][t10][t14] + A[t6][t10 - 1][t14]) + 0.125 * (A[t6][t10][t14 + 1] - 2 * A[t6][t10][t14] + A[t6][t10][t14 - 1]) + A[t6][t10][t14]; #pragma omp parallel for private(t4,t6,t8,t10,t12,t14) for (t4 = 1; t4 <= n - 2; t4 += 32) for (t6 = t4; t6 <= (t4 + 31 < n - 2 ? t4 + 31 : n - 2); t6 += 1) for (t8 = 1; t8 <= n - 2; t8 += 16) for (t10 = t8; t10 <= (n - 2 < t8 + 15 ? n - 2 : t8 + 15); t10 += 1) for (t12 = 1; t12 <= n - 2; t12 += 16) for (t14 = t12; t14 <= (n - 2 < t12 + 15 ? n - 2 : t12 + 15); t14 += 1) A[t6][t10][t14] = 0.125 * (B[t6 + 1][t10][t14] - 2 * B[t6][t10][t14] + B[t6 - 1][t10][t14]) + 0.125 * (B[t6][t10 + 1][t14] - 2 * B[t6][t10][t14] + B[t6][t10 - 1][t14]) + 0.125 * (B[t6][t10][t14 + 1] - 2 * B[t6][t10][t14] + B[t6][t10][t14 - 1]) + B[t6][t10][t14]; } }
struct.c
// RUN: %libomptarget-compile-generic -fopenmp-extensions // RUN: %libomptarget-run-generic | %fcheck-generic -strict-whitespace // Wrong results on amdgpu // XFAIL: amdgcn-amd-amdhsa // XFAIL: amdgcn-amd-amdhsa-newDriver #include <omp.h> #include <stdio.h> #define CHECK_PRESENCE(Var1, Var2, Var3) \ printf(" presence of %s, %s, %s: %d, %d, %d\n", \ #Var1, #Var2, #Var3, \ omp_target_is_present(&(Var1), omp_get_default_device()), \ omp_target_is_present(&(Var2), omp_get_default_device()), \ omp_target_is_present(&(Var3), omp_get_default_device())) #define CHECK_VALUES(Var1, Var2) \ printf(" values of %s, %s: %d, %d\n", \ #Var1, #Var2, (Var1), (Var2)) int main() { struct S { int i; int j; } s; // CHECK: presence of s, s.i, s.j: 0, 0, 0 CHECK_PRESENCE(s, s.i, s.j); // ======================================================================= // Check that ompx_hold keeps entire struct present. // ----------------------------------------------------------------------- // CHECK-LABEL: check:{{.*}} printf("check: ompx_hold only on first member\n"); s.i = 20; s.j = 30; #pragma omp target data map(tofrom: s) map(ompx_hold,tofrom: s.i) \ map(tofrom: s.j) { // CHECK-NEXT: presence of s, s.i, s.j: 1, 1, 1 CHECK_PRESENCE(s, s.i, s.j); #pragma omp target map(tofrom: s) { s.i = 21; s.j = 31; } #pragma omp target exit data map(delete: s, s.i) // ompx_hold on s.i applies to all of s. // CHECK-NEXT: presence of s, s.i, s.j: 1, 1, 1 // CHECK-NEXT: values of s.i, s.j: 20, 30 CHECK_PRESENCE(s, s.i, s.j); CHECK_VALUES(s.i, s.j); } // CHECK-NEXT: presence of s, s.i, s.j: 0, 0, 0 // CHECK-NEXT: values of s.i, s.j: 21, 31 CHECK_PRESENCE(s, s.i, s.j); CHECK_VALUES(s.i, s.j); // ----------------------------------------------------------------------- // CHECK-LABEL: check:{{.*}} printf("check: ompx_hold only on last member\n"); s.i = 20; s.j = 30; #pragma omp target data map(tofrom: s) map(tofrom: s.i) \ map(ompx_hold,tofrom: s.j) { // CHECK-NEXT: presence of s, s.i, s.j: 1, 1, 1 CHECK_PRESENCE(s, s.i, s.j); #pragma omp target map(tofrom: s) { s.i = 21; s.j = 31; } #pragma omp target exit data map(delete: s, s.i) // ompx_hold on s.j applies to all of s. // CHECK-NEXT: presence of s, s.i, s.j: 1, 1, 1 // CHECK-NEXT: values of s.i, s.j: 20, 30 CHECK_PRESENCE(s, s.i, s.j); CHECK_VALUES(s.i, s.j); } // CHECK-NEXT: presence of s, s.i, s.j: 0, 0, 0 // CHECK-NEXT: values of s.i, s.j: 21, 31 CHECK_PRESENCE(s, s.i, s.j); CHECK_VALUES(s.i, s.j); // ----------------------------------------------------------------------- // CHECK-LABEL: check:{{.*}} printf("check: ompx_hold only on struct\n"); s.i = 20; s.j = 30; #pragma omp target data map(ompx_hold,tofrom: s) map(tofrom: s.i) \ map(tofrom: s.j) { // CHECK-NEXT: presence of s, s.i, s.j: 1, 1, 1 CHECK_PRESENCE(s, s.i, s.j); #pragma omp target map(tofrom: s) { s.i = 21; s.j = 31; } #pragma omp target exit data map(delete: s, s.i) // CHECK-NEXT: presence of s, s.i, s.j: 1, 1, 1 // CHECK-NEXT: values of s.i, s.j: 20, 30 CHECK_PRESENCE(s, s.i, s.j); CHECK_VALUES(s.i, s.j); } // CHECK-NEXT: presence of s, s.i, s.j: 0, 0, 0 // CHECK-NEXT: values of s.i, s.j: 21, 31 CHECK_PRESENCE(s, s.i, s.j); CHECK_VALUES(s.i, s.j); // ======================================================================= // Check that transfer to/from host checks reference count correctly. // ----------------------------------------------------------------------- // CHECK-LABEL: check:{{.*}} printf("check: parent DynRefCount=1 is not sufficient for transfer\n"); s.i = 20; s.j = 30; #pragma omp target data map(ompx_hold, tofrom: s) #pragma omp target data map(ompx_hold, tofrom: s) { // CHECK-NEXT: presence of s, s.i, s.j: 1, 1, 1 CHECK_PRESENCE(s, s.i, s.j); #pragma omp target map(from: s.i, s.j) { s.i = 21; s.j = 31; } // No transfer here even though parent's DynRefCount=1. // CHECK-NEXT: presence of s, s.i, s.j: 1, 1, 1 // CHECK-NEXT: values of s.i, s.j: 20, 30 CHECK_PRESENCE(s, s.i, s.j); CHECK_VALUES(s.i, s.j); #pragma omp target map(to: s.i, s.j) { // No transfer here even though parent's DynRefCount=1. // CHECK-NEXT: values of s.i, s.j: 21, 31 CHECK_VALUES(s.i, s.j); } } // CHECK-NEXT: presence of s, s.i, s.j: 0, 0, 0 // CHECK-NEXT: values of s.i, s.j: 21, 31 CHECK_PRESENCE(s, s.i, s.j); CHECK_VALUES(s.i, s.j); // ----------------------------------------------------------------------- // CHECK-LABEL: check:{{.*}} printf("check: parent HoldRefCount=1 is not sufficient for transfer\n"); s.i = 20; s.j = 30; #pragma omp target data map(tofrom: s) #pragma omp target data map(tofrom: s) { // CHECK-NEXT: presence of s, s.i, s.j: 1, 1, 1 CHECK_PRESENCE(s, s.i, s.j); #pragma omp target map(ompx_hold, from: s.i, s.j) { s.i = 21; s.j = 31; } // No transfer here even though parent's HoldRefCount=1. // CHECK-NEXT: presence of s, s.i, s.j: 1, 1, 1 // CHECK-NEXT: values of s.i, s.j: 20, 30 CHECK_PRESENCE(s, s.i, s.j); CHECK_VALUES(s.i, s.j); #pragma omp target map(ompx_hold, to: s.i, s.j) { // No transfer here even though parent's HoldRefCount=1. // CHECK-NEXT: values of s.i, s.j: 21, 31 CHECK_VALUES(s.i, s.j); } } // CHECK-NEXT: presence of s, s.i, s.j: 0, 0, 0 // CHECK-NEXT: values of s.i, s.j: 21, 31 CHECK_PRESENCE(s, s.i, s.j); CHECK_VALUES(s.i, s.j); // ----------------------------------------------------------------------- // CHECK-LABEL: check:{{.*}} // // At the beginning of a region, if the parent's TotalRefCount=1, then the // transfer should happen. // // At the end of a region, it also must be true that the reference count being // decremented is the reference count that is 1. printf("check: parent TotalRefCount=1 is not sufficient for transfer\n"); s.i = 20; s.j = 30; #pragma omp target data map(ompx_hold, tofrom: s) { // CHECK-NEXT: presence of s, s.i, s.j: 1, 1, 1 CHECK_PRESENCE(s, s.i, s.j); #pragma omp target map(ompx_hold, tofrom: s.i, s.j) { s.i = 21; s.j = 31; } #pragma omp target exit data map(from: s.i, s.j) // No transfer here even though parent's TotalRefCount=1. // CHECK-NEXT: presence of s, s.i, s.j: 1, 1, 1 // CHECK-NEXT: values of s.i, s.j: 20, 30 CHECK_PRESENCE(s, s.i, s.j); CHECK_VALUES(s.i, s.j); } // CHECK-NEXT: presence of s, s.i, s.j: 0, 0, 0 // CHECK-NEXT: values of s.i, s.j: 21, 31 CHECK_PRESENCE(s, s.i, s.j); CHECK_VALUES(s.i, s.j); return 0; }
GB_unop__log_fc64_fc64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__log_fc64_fc64 // op(A') function: GB_unop_tran__log_fc64_fc64 // C type: GxB_FC64_t // A type: GxB_FC64_t // cast: GxB_FC64_t cij = aij // unaryop: cij = clog (aij) #define GB_ATYPE \ GxB_FC64_t #define GB_CTYPE \ GxB_FC64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = clog (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC64_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC64_t z = aij ; \ Cx [pC] = clog (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LOG || GxB_NO_FC64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__log_fc64_fc64 ( GxB_FC64_t *Cx, // Cx and Ax may be aliased const GxB_FC64_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = aij ; Cx [p] = clog (z) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__log_fc64_fc64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
dipoledipoleinteraction.h
/* * Copyright 2009-2020 The VOTCA Development Team (http://www.votca.org) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ #pragma once #ifndef VOTCA_XTP_DIPOLEDIPOLEINTERACTION_H #define VOTCA_XTP_DIPOLEDIPOLEINTERACTION_H // Local VOTCA includes #include "eeinteractor.h" #include "eigen.h" namespace votca { namespace xtp { class DipoleDipoleInteraction; } } // namespace votca namespace Eigen { namespace internal { // MatrixReplacement looks-like a SparseMatrix, so let's inherits its traits: template <> struct traits<votca::xtp::DipoleDipoleInteraction> : public Eigen::internal::traits<Eigen::MatrixXd> {}; } // namespace internal } // namespace Eigen namespace votca { namespace xtp { class DipoleDipoleInteraction : public Eigen::EigenBase<DipoleDipoleInteraction> { public: // Required typedefs, constants, and method: using Scalar = double; using RealScalar = double; using StorageIndex = votca::Index; enum { ColsAtCompileTime = Eigen::Dynamic, MaxColsAtCompileTime = Eigen::Dynamic, IsRowMajor = false }; DipoleDipoleInteraction(const eeInteractor& interactor, const std::vector<PolarSegment>& segs) : interactor_(interactor) { size_ = 0; for (const PolarSegment& seg : segs) { size_ += 3 * seg.size(); } sites_.reserve(size_ / 3); for (const PolarSegment& seg : segs) { for (const PolarSite& site : seg) { sites_.push_back(&site); } } } class InnerIterator { public: InnerIterator(const DipoleDipoleInteraction& xpr, const Index& id) : xpr_(xpr), id_(id){}; InnerIterator& operator++() { row_++; return *this; } operator bool() const { return row_ < xpr_.size_; } // DO not use the size method, it returns linear dimension*linear // dimension i.e size_^2 double value() const { return xpr_(row_, id_); } Index row() const { return row_; } Index col() const { return id_; } Index index() const { return row(); } private: const DipoleDipoleInteraction& xpr_; const Index id_; Index row_ = 0; }; Index rows() const { return this->size_; } Index cols() const { return this->size_; } Index outerSize() const { return this->size_; } template <typename Vtype> Eigen::Product<DipoleDipoleInteraction, Vtype, Eigen::AliasFreeProduct> operator*(const Eigen::MatrixBase<Vtype>& x) const { return Eigen::Product<DipoleDipoleInteraction, Vtype, Eigen::AliasFreeProduct>(*this, x.derived()); } // this is not a fast method double operator()(const Index i, const Index j) const { Index seg1id = Index(i / 3); Index xyz1 = Index(i % 3); Index seg2id = Index(j / 3); Index xyz2 = Index(j % 3); if (seg1id == seg2id) { return sites_[seg1id]->getPInv()(xyz1, xyz2); } else { const PolarSite& site1 = *sites_[seg1id]; const PolarSite& site2 = *sites_[seg2id]; return interactor_.FillTholeInteraction(site1, site2)(xyz1, xyz2); } }; Eigen::VectorXd multiply(const Eigen::VectorXd& v) const { assert(v.size() == size_ && "input vector has the wrong size for multiply with operator"); const Index segment_size = Index(sites_.size()); Eigen::VectorXd result = Eigen::VectorXd::Zero(size_); #pragma omp parallel for schedule(dynamic) reduction(+ : result) for (Index i = 0; i < segment_size; i++) { const PolarSite& site1 = *sites_[i]; result.segment<3>(3 * i) += site1.getPInv() * v.segment<3>(3 * i); for (Index j = i + 1; j < segment_size; j++) { const PolarSite& site2 = *sites_[j]; Eigen::Matrix3d block = interactor_.FillTholeInteraction(site1, site2); result.segment<3>(3 * i) += block * v.segment<3>(3 * j); result.segment<3>(3 * j) += block.transpose() * v.segment<3>(3 * i); } } return result; } private: const eeInteractor& interactor_; std::vector<const PolarSite*> sites_; Index size_; }; } // namespace xtp } // namespace votca namespace Eigen { namespace internal { // replacement of the mat*vect operation template <typename Vtype> struct generic_product_impl<votca::xtp::DipoleDipoleInteraction, Vtype, DenseShape, DenseShape, GemvProduct> : generic_product_impl_base< votca::xtp::DipoleDipoleInteraction, Vtype, generic_product_impl<votca::xtp::DipoleDipoleInteraction, Vtype>> { typedef typename Product<votca::xtp::DipoleDipoleInteraction, Vtype>::Scalar Scalar; template <typename Dest> static void scaleAndAddTo(Dest& dst, const votca::xtp::DipoleDipoleInteraction& op, const Vtype& v, const Scalar& alpha) { // returns dst = alpha * op * v // alpha must be 1 here assert(alpha == Scalar(1) && "scaling is not implemented"); EIGEN_ONLY_USED_FOR_DEBUG(alpha); Eigen::VectorXd temp = op.multiply(v); dst = temp.cast<Scalar>(); // tumbleweed fix do not delete } }; } // namespace internal } // namespace Eigen #endif // VOTCA_XTP_DIPOLEDIPOLEINTERACTION_H
bclevd2.c
#include "laev2.h" #include "wnrme.h" #include "rnd.h" #include "timer.h" int main(int argc, char *argv[]) { (void)set_cbwr(); if (4 != argc) { (void)fprintf(stderr, "%s filename 2^{batch_size} #batches\n", *argv); return EXIT_FAILURE; } const size_t n = ((size_t)1u << atoz(argv[2u])); int th = 0; #ifdef _OPENMP th = omp_get_max_threads(); if (n % th) { (void)fprintf(stderr, "batch_size has to be a multiple of %d.\n", th); return EXIT_FAILURE; } #endif /* _OPENMP */ const size_t b = atoz(argv[3u]); if (!b) return EXIT_SUCCESS; const size_t nl = strlen(argv[1u]), nl1 = (nl + 1u); char *const fn = calloc((nl + 3u), sizeof(char)); assert(fn); strcpy(fn, argv[1u])[nl] = '.'; int fm = O_RDONLY; #ifdef _LARGEFILE64_SOURCE fm |= O_LARGEFILE; #endif /* _LARGEFILE64_SOURCE */ fn[nl1] = 'k'; const int fk = open(fn, fm); if (-1 >= fk) { (void)fprintf(stderr, "Cannot open %s for reading!\n", fn); return EXIT_FAILURE; } fn[nl1] = 'l'; const int fl = open(fn, fm); if (-1 >= fl) { (void)fprintf(stderr, "Cannot open %s for reading!\n", fn); return EXIT_FAILURE; } fn[nl1] = 'f'; const int ff = open(fn, fm); if (-1 >= ff) { (void)fprintf(stderr, "Cannot open %s for reading!\n", fn); return EXIT_FAILURE; } fn[nl1] = 'g'; const int fg = open(fn, fm); if (-1 >= fg) { (void)fprintf(stderr, "Cannot open %s for reading!\n", fn); return EXIT_FAILURE; } fn[nl1] = 'r'; const int fr = open(fn, fm); if (-1 >= fr) { (void)fprintf(stderr, "Cannot open %s for reading!\n", fn); return EXIT_FAILURE; } fn[nl1] = 'j'; const int fj = open(fn, fm); if (-1 >= fj) { (void)fprintf(stderr, "Cannot open %s for reading!\n", fn); return EXIT_FAILURE; } const size_t nt = n * sizeof(float); float *const a11 = (float*)aligned_alloc(sizeof(float), nt), *const a22 = (float*)aligned_alloc(sizeof(float), nt), *const a21r = (float*)aligned_alloc(sizeof(float), nt), *const a21i = (float*)aligned_alloc(sizeof(float), nt), *const cs1 = (float*)aligned_alloc(sizeof(float), nt), *const snr = (float*)aligned_alloc(sizeof(float), nt), *const sni = (float*)aligned_alloc(sizeof(float), nt), *const l1 = (float*)aligned_alloc(sizeof(float), nt), *const l2 = (float*)aligned_alloc(sizeof(float), nt); assert(a11); assert(a22); assert(a21r); assert(a21i); assert(cs1); assert(snr); assert(sni); assert(l1); assert(l2); wide *const w = (wide*)aligned_alloc(sizeof(wide), (n * sizeof(wide))); assert(w); unsigned rd[2u] = { 0u, 0u }; const uint64_t hz = tsc_get_freq_hz_(rd); (void)fprintf(stderr, "TSC frequency: %llu+(%u/%u) Hz.\n", (unsigned long long)hz, rd[0u], rd[1u]); (void)fflush(stderr); (void)fprintf(stdout, "\"B\",\"Ts\",\"ORT\",\"REN\",\"RLN\",\"RLX\",\"RLM\"\n"); (void)fflush(stdout); const char *bf = (const char*)NULL; if (b <= 10u) bf = "%1zu"; else if (b <= 100u) bf = "%2zu"; else if (b <= 1000u) bf = "%3zu"; else // b > 1000 bf = "%zu"; const size_t n_t = n / imax(th, 1); const size_t cnt = n_t * sizeof(float); char a[31u] = { '\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0' }; for (size_t j = 0u; j < b; ++j) { (void)fprintf(stdout, bf, j); (void)fflush(stdout); const size_t jn = j * n; #ifdef _OPENMP #pragma omp parallel default(none) shared(ff,fg,fr,fj,a11,a22,a21r,a21i,n,n_t,cnt,jn) #endif /* _OPENMP */ { const int mt = #ifdef _OPENMP omp_get_thread_num() #else /* !_OPENMP */ 0 #endif /* ?_OPENMP */ ; const size_t tnt = mt * n_t; const off_t off = (jn + tnt) * sizeof(float); if ((ssize_t)cnt != pread(ff, (a11 + tnt), cnt, off)) exit(EXIT_FAILURE); if ((ssize_t)cnt != pread(fg, (a22 + tnt), cnt, off)) exit(EXIT_FAILURE); if ((ssize_t)cnt != pread(fr, (a21r + tnt), cnt, off)) exit(EXIT_FAILURE); if ((ssize_t)cnt != pread(fj, (a21i + tnt), cnt, off)) exit(EXIT_FAILURE); } (void)fprintf(stdout, ","); (void)fflush(stdout); uint64_t be[2u] = { UINT64_C(0), UINT64_C(0) }; be[0u] = rdtsc_beg(rd); #ifdef _OPENMP #pragma omp parallel for default(none) shared(n,a11,a22,a21r,a21i,l1,l2,cs1,snr,sni) #endif /* _OPENMP */ for (size_t i = 0u; i < n; ++i) _claev2((a11 + i), (a21r + i), (a21i + i), (a22 + i), (l1 + i), (l2 + i), (cs1 + i), (snr + i), (sni + i)); be[1u] = rdtsc_end(rd); (void)fprintf(stdout, "%15.9Lf,", tsc_lap(hz, be[0u], be[1u])); (void)fflush(stdout); wide o = W_ZERO, r = W_ZERO; #ifdef _OPENMP #pragma omp parallel for default(none) shared(n,a11,a22,a21r,a21i,cs1,snr,sni,l1,l2,w) reduction(max:o,r) #endif /* _OPENMP */ for (size_t i = 0u; i < n; ++i) { wide AE = W_ZERO, AN = W_ZERO; o = fmaxw(o, (w[i] = worc(cs1[i], snr[i], sni[i]))); r = fmaxw(r, wrec(a11[i], a22[i], a21r[i], a21i[i], cs1[i], snr[i], sni[i], l1[i], l2[i], &AE, &AN)); } (void)fprintf(stdout, "%s,", xtoa(a, (long double)o)); (void)fprintf(stdout, "%s", xtoa(a, (long double)r)); (void)fflush(stdout); size_t ix = n; #ifdef _OPENMP #pragma omp parallel for default(none) shared(n,o,w) reduction(min:ix) #endif /* _OPENMP */ for (size_t i = 0u; i < n; ++i) if (w[i] == o) ix = i; (void)fprintf(stderr, "%zu,%zu,%s;", j, ix, xtoa(a, (long double)o)); (void)fprintf(stderr, "%s,", xtoa(a, a11[ix])); (void)fprintf(stderr, "%s,", xtoa(a, a22[ix])); (void)fprintf(stderr, "(%s,", xtoa(a, a21r[ix])); (void)fprintf(stderr, "%s);", xtoa(a, a21i[ix])); (void)fprintf(stderr, "%s,", xtoa(a, cs1[ix])); (void)fprintf(stderr, "(%s,", xtoa(a, snr[ix])); (void)fprintf(stderr, "%s);", xtoa(a, sni[ix])); (void)fprintf(stderr, "%s,", xtoa(a, l1[ix])); (void)fprintf(stderr, "%s\n", xtoa(a, l2[ix])); (void)fflush(stderr); #ifdef _OPENMP #pragma omp parallel default(none) shared(fk,fl,snr,sni,n,n_t,cnt,jn) #endif /* _OPENMP */ { const int mt = #ifdef _OPENMP omp_get_thread_num() #else /* !_OPENMP */ 0 #endif /* ?_OPENMP */ ; const size_t tnt = mt * n_t; const off_t off = (jn + tnt) * sizeof(float); if ((ssize_t)cnt != pread(fk, (snr + tnt), cnt, off)) exit(EXIT_FAILURE); if ((ssize_t)cnt != pread(fl, (sni + tnt), cnt, off)) exit(EXIT_FAILURE); } (void)fprintf(stdout, ","); (void)fflush(stdout); wide x = W_ZERO, m = W_ZERO; r = W_ZERO; #ifdef _OPENMP #pragma omp parallel for default(none) shared(n,l1,l2,snr,sni) reduction(max:r,x,m) #endif /* _OPENMP */ for (size_t i = 0u; i < n; ++i) { wide AE = W_ZERO, AN = W_ZERO; const wide RE = wlam(l1[i], l2[i], snr[i], sni[i], &AE, &AN); r = fmaxw(r, RE); x = fmaxw(x, AE); m = fmaxw(m, AN); } (void)fprintf(stdout, "%s,", xtoa(a, (long double)r)); (void)fprintf(stdout, "%s,", xtoa(a, (long double)x)); (void)fprintf(stdout, "%s\n", xtoa(a, (long double)m)); (void)fflush(stdout); } (void)close(fj); (void)close(fr); (void)close(fg); (void)close(ff); (void)close(fl); (void)close(fk); free(w); free(l2); free(l1); free(sni); free(snr); free(cs1); free(a21i); free(a21r); free(a22); free(a11); free(fn); return EXIT_SUCCESS; }
GB_binop__plus_fc32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__plus_fc32 // A.*B function (eWiseMult): GB_AemultB__plus_fc32 // A*D function (colscale): GB_AxD__plus_fc32 // D*A function (rowscale): GB_DxB__plus_fc32 // C+=B function (dense accum): GB_Cdense_accumB__plus_fc32 // C+=b function (dense accum): GB_Cdense_accumb__plus_fc32 // C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum__plus_fc32 // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__plus_fc32 // C=scalar+B GB_bind1st__plus_fc32 // C=scalar+B' GB_bind1st_tran__plus_fc32 // C=A+scalar GB_bind2nd__plus_fc32 // C=A'+scalar GB_bind2nd_tran__plus_fc32 // C type: GxB_FC32_t // A type: GxB_FC32_t // B,b type: GxB_FC32_t // BinaryOp: cij = GB_FC32_add (aij, bij) #define GB_ATYPE \ GxB_FC32_t #define GB_BTYPE \ GxB_FC32_t #define GB_CTYPE \ GxB_FC32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC32_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ GxB_FC32_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ GxB_FC32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = GB_FC32_add (x, y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_PLUS || GxB_NO_FC32 || GxB_NO_PLUS_FC32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB_Cdense_ewise3_accum__plus_fc32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__plus_fc32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__plus_fc32 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__plus_fc32 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type GxB_FC32_t GxB_FC32_t bwork = (*((GxB_FC32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__plus_fc32 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t *GB_RESTRICT Cx = (GxB_FC32_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__plus_fc32 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t *GB_RESTRICT Cx = (GxB_FC32_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__plus_fc32 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__plus_fc32 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__plus_fc32 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t *Cx = (GxB_FC32_t *) Cx_output ; GxB_FC32_t x = (*((GxB_FC32_t *) x_input)) ; GxB_FC32_t *Bx = (GxB_FC32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; GxB_FC32_t bij = Bx [p] ; Cx [p] = GB_FC32_add (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__plus_fc32 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; GxB_FC32_t *Cx = (GxB_FC32_t *) Cx_output ; GxB_FC32_t *Ax = (GxB_FC32_t *) Ax_input ; GxB_FC32_t y = (*((GxB_FC32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; GxB_FC32_t aij = Ax [p] ; Cx [p] = GB_FC32_add (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GxB_FC32_t aij = Ax [pA] ; \ Cx [pC] = GB_FC32_add (x, aij) ; \ } GrB_Info GB_bind1st_tran__plus_fc32 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ GxB_FC32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t x = (*((const GxB_FC32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ GxB_FC32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GxB_FC32_t aij = Ax [pA] ; \ Cx [pC] = GB_FC32_add (aij, y) ; \ } GrB_Info GB_bind2nd_tran__plus_fc32 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t y = (*((const GxB_FC32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
2d_array_ptr_v2.c
#include <stdlib.h> #include <omp.h> int main() { int* data = malloc( sizeof(int) * 4); int** arr = malloc(sizeof(int*)*2); arr[0] = data; arr[1] = data + 2; #pragma omp parallel { arr[0][0] = 0; arr[0][1] = 1; arr[1][0] = 2; arr[1][1] = 3; printf("[%d, %d],\n[%d, %d]\n",arr[0][0], arr[0][1], arr[1][0], arr[1][1]); } free(data); free(arr); }
matrixLogGAK.c
#include "matrixLogGAK.h" int g_nInstances; int g_nLength_train; int g_nLength_test; int g_nDim; int resOffset(int i, int j) { return i * g_nInstances + j; } int seqOffset_train(int i) { return i * g_nLength_train * g_nDim; } int seqOffset_test(int i) { return i * g_nLength_test * g_nDim; } /* Compute the kernel matrix of a seq with equal length time series data * * seq is a flat array that contains the data in form nInstances, nLength, nDim * nInstances is the amount of instances * nLength is the length of the the series * nDim is the dimension * res is a flat array that contains the kernel matrix in form nInstances x nInstances * sigma is the bandwidth parameter for the gauss-ish kernel * triangular is the parameter to trade off global alignment vs. optimal alignment, i.e., more diagonal alignment */ void trainGramMatrixExp(double *seq, int nInstances, int nLength, int nDim, double *res, double sigma, int triangular) { g_nInstances = nInstances; g_nLength_train = nLength; g_nDim = nDim; double *cache = (double *) malloc(nInstances * sizeof(double)); int j = 0; int i = 0; if (cache == NULL) { printf("Error! memory not allocated."); exit(1); } // compute GAK with itself #pragma omp parallel for private(i) for (i = 0; i < nInstances; i++) { int seq_i = seqOffset_train(i); cache[i] = logGAK((double *) &seq[seq_i], (double *) &seq[seq_i], nLength, nLength, nDim, sigma, triangular); } #pragma omp parallel for private(i,j) for (i = 0; i < nInstances; i++) { int seq_i = seqOffset_train(i); for (j = i; j < nInstances; j++) { int seq_j = seqOffset_train(j); // compute the global alignment kernel value double ga12 = logGAK((double *) &seq[seq_i], (double *) &seq[seq_j], nLength, nLength, nDim, sigma, triangular); // compute the normalization factor //double ga11 = logGAK((double*) &seq[seq_i], (double*) &seq[seq_i], nLength, nLength, nDim, sigma, triangular); double ga11 = cache[i]; //double ga22 = logGAK((double*) &seq[seq_j], (double*) &seq[seq_j], nLength, nLength, nDim, sigma, triangular); double ga22 = cache[j]; double nf = 0.5 * (ga11 + ga22); double mlnk = nf - ga12; int res_ij = resOffset(i, j); int res_ji = resOffset(j, i); res[res_ij] = mlnk; res[res_ji] = mlnk; } } free(cache); } /* Compute the kernel matrix of a seq with equal length time series data * * train and test are flat arrays that contains the data of shape (nInstances, nLength, nDim) * nInstances is the amount of instances * nLength is the length of the the series * nDim is the dimension * res is a flat array that contains the kernel matrix in form nInstances_test x nInstances_train * sigma is the bandwidth parameter for the gauss-ish kernel * triangular is the parameter to trade off global alignment vs. optimal alignment, i.e., more diagonal alignment * sv_indices contain the support vectors from the SVDD model. */ void testGramMatrixExp(double *train, double *test, int nInstances_train, int nInstances_test, int nLength_train, int nLength_test, int nDim, double *res ,double sigma, int triangular, int64_t *sv_indices, int64_t sv_size) { g_nInstances = nInstances_train; g_nLength_test = nLength_test; g_nLength_train = nLength_train; g_nDim = nDim; int i = 0; int l = 0; double *cache_train = (double *) malloc(nInstances_train * sizeof(double)); double *cache_test = (double *) malloc(nInstances_test * sizeof(double)); if (cache_train == NULL || cache_test == NULL) { printf("Error! memory not allocated."); exit(1); } // compute GAK with itself #pragma omp parallel for private(l) for (l = 0; l < sv_size; l++) { int i = sv_indices[l]; int seq_i = seqOffset_train(i); cache_train[i] = logGAK((double *) &train[seq_i], (double *) &train[seq_i], nLength_train, nLength_train, nDim, sigma, triangular); } #pragma omp parallel for private(i) for (i = 0; i < nInstances_test; i++) { int seq_i = seqOffset_test(i); cache_test[i] = logGAK((double *) &test[seq_i], (double *) &test[seq_i], nLength_test, nLength_test, nDim, sigma, triangular); } #pragma omp parallel for private(i,l) for (i = 0; i < nInstances_test; i++) { int seq_test = seqOffset_test(i); for (l = 0; l < sv_size; l++) { int j = sv_indices[l]; int seq_train = seqOffset_train(j); // compute the global alignment kernel value double ga12 = logGAK((double *) &train[seq_train], (double *) &test[seq_test], nLength_train, nLength_test, nDim, sigma, triangular); // compute the normalization factor //double ga11 = logGAK((double*) &train[seq_train], (double*) &train[seq_train], nLength_train, nLength_train, nDim, sigma, triangular); double ga11 = cache_train[j]; //double ga22 = logGAK((double*) &test[seq_test], (double*) &test[seq_test], nLength_test, nLength_test, nDim, sigma, triangular); double ga22 = cache_test[i]; double nf = 0.5 * (ga11 + ga22); double mlnk = nf - ga12; int res_k = resOffset(i, j); res[res_k] = mlnk; } } free(cache_test); free(cache_train); }
omptest.c
#include "gptl.h" #include <stdio.h> #include <omp.h> extern void sub (int); int main () { int ret; int iter; double value; static const char *thisprog = "omptest"; omp_set_num_threads (2); ret = GPTLinitialize (); ret = GPTLstart ("main"); ret = GPTLstart ("omp_loop"); #pragma omp parallel for private (iter) for (iter = 0; iter < 2; ++iter) { sub (iter); } ret = GPTLstop ("omp_loop"); ret = GPTLstop ("main"); // This test should succeed ret = GPTLget_wallclock ("sub", 1, &value); if (ret != 0) { printf ("%s: GPTLget_wallclock failure for thread 1\n", thisprog); return -1; } // This test should fail ret = GPTLget_wallclock ("sub", 2, &value); if (ret == 0) { printf ("%s: GPTLget_wallclock should have failed for thread 2\n", thisprog); return -1; } return 0; } void sub (int iter) { int ret; int mythread = omp_get_thread_num(); ret = GPTLstart ("sub"); printf ("iter=%d being processed by thread=%d\n", iter, mythread); ret = GPTLstop ("sub"); }
opencv_util.h
#ifndef _OPENCV_UTIL_H #define _OPENCV_UTIL_H #define _CRT_SECURE_NO_WARNINGS 1 #ifdef USE_OPENCV_UTIL #include <opencv2/imgproc.hpp> #include <opencv2/highgui.hpp> #include <iostream> #include <fstream> #include <algorithm> #include <random> #include <filesystem> #ifndef _HAS_CXX17 #error CXX17 #endif #include <time.h> using namespace cv; using namespace std; namespace cpp_torch { namespace cvutil { cv::Mat ImgeTocvMat(const Image* img) { cv::Mat cvimg(img->height, img->width, CV_8UC3); const size_t sz = img->height*img->width; #pragma omp parallel for for (int k = 0; k < sz; k++) { const int i = k / img->width; const int j = k % img->width; cvimg.at<cv::Vec3b>(i, j)[0] = img->data[k].b;//Blue cvimg.at<cv::Vec3b>(i, j)[1] = img->data[k].g; //Green cvimg.at<cv::Vec3b>(i, j)[2] = img->data[k].r; //Red } return cvimg; } Image cvMatToImage(const cv::Mat& cvimg) { Image im; im.height = cvimg.rows; im.width = cvimg.cols; im.data.resize(im.height*im.width); const size_t sz = im.height*im.width; #pragma omp parallel for for (int k = 0; k < sz; k++) { const int i = k / im.width; const int j = k % im.width; im.data[k].b = cvimg.at<cv::Vec3b>(i, j)[0];//Blue im.data[k].g = cvimg.at<cv::Vec3b>(i, j)[1]; //Green im.data[k].r = cvimg.at<cv::Vec3b>(i, j)[2]; //Red } return im; } void resize(cv::Mat& cvimg, int h, int w) { cv:resize(cvimg, cvimg, cv::Size(), (double)(w) / cvimg.cols, (double)(h) / cvimg.rows); } void resize(cv::Mat& cvimg, int padding) { cv:resize(cvimg, cvimg, cv::Size(), (double)(cvimg.cols + padding) / cvimg.cols, (double)(cvimg.rows + padding) / cvimg.cols); } cv::Mat padding_img(cv::Mat& target_mat, int padding) { cv::Mat convert_mat, work_mat; work_mat = cv::Mat::zeros(cv::Size(target_mat.cols + padding, target_mat.rows + padding), CV_8UC3); convert_mat = target_mat.clone(); cv::Mat Roi1(work_mat, cv::Rect((target_mat.cols + padding - convert_mat.cols) / 2.0, (target_mat.rows + padding - convert_mat.rows) / 2.0, convert_mat.cols, convert_mat.rows)); convert_mat.copyTo(Roi1); return work_mat.clone(); } cv::Mat tensorToMat(const torch::Tensor &tensor, const int scale = 1.0) { const auto sizes = tensor.sizes(); //printf("%d %d %d\n", sizes[0], sizes[1], sizes[2]); if (sizes.size() > 3) { printf("%d %d %d\n", sizes[0], sizes[1], sizes[2]); throw error_exception("tensorToMat input size error."); } torch::Tensor out_tensor = tensor.detach(); out_tensor.to(torch::kCPU); out_tensor = out_tensor.squeeze().detach().permute({ 1,2,0 }); out_tensor = out_tensor.mul(scale).clamp(0, 255).to(torch::kU8); out_tensor = out_tensor.to(torch::kCPU); cv::Mat resultImg(sizes[1], sizes[2], CV_8UC3); if (tensor.device().type() == torch::kCPU) { #pragma omp parallel for for (int y = 0; y < sizes[2]; ++y) { for (int x = 0; x < sizes[1]; ++x) { for (int c = 0; c < sizes[0]; ++c) { resultImg.at< cv::Vec3b>(y, x)[c] = out_tensor[y][x][c].template item<float_t>(); } } } } else { // this > libtorch-1.60 #pragma omp parallel for for (int y = 0; y < sizes[2]; ++y) { for (int x = 0; x < sizes[1]; ++x) { for (int c = 0; c < sizes[0]; ++c) { resultImg.at< cv::Vec3b>(y, x)[c] = out_tensor[y][x][c].template item<float_t>(); } } } // this <= libtorch-1.60 //std::memcpy((void*)resultImg.data, out_tensor.data_ptr(), sizeof(torch::kU8)*out_tensor.numel()); } cv::cvtColor(resultImg, resultImg, cv::COLOR_BGR2RGB); //cv::imshow("", resultImg); //cv::waitKey(); return resultImg; } cv::Mat TensorToImageFile(torch::Tensor image_tensor, const std::string& filename, const int scale = 1.0) { cv::Mat& cv_mat = tensorToMat(image_tensor, scale); cv::imwrite(filename, cv_mat); return cv_mat; } cv::Mat ImageWrite(const torch::Tensor batch_tensor, int M, int N, const std::string& image_file_name, int padding = 0) { //printf("%d\n", batch_tensor.sizes()[0]); int l = batch_tensor.sizes()[0]; int k = 0; cv::Mat im1; cv::Mat im2; for (int i = 0; i < M; i++) { for (int j = 0; j < N; j++) { if (k >= l) break; cv::Mat im = tensorToMat(batch_tensor[k], 1.0); if (padding) { im = padding_img(im, padding); } if (j == 0) im1 = im; else cv::hconcat(im1, im, im1); k += 1; } if (i == 0) im2 = im1; else cv::vconcat(im2, im1, im2); if (k >= l) break; } if (padding) { im2 = padding_img(im2, padding); } //cv::cvtColor(im2, im2, cv::COLOR_BGR2RGB); cv::imwrite(image_file_name, im2); return im2; } // main cv::Mat ImageWrite(const std::string& path, int M, int N, const std::string& image_file_name, int padding = 0) { std::vector<string> flist; std::filesystem::path p(path); for_each(std::filesystem::directory_iterator(p), std::filesystem::directory_iterator(), [&flist](const std::filesystem::path& p) { if (std::filesystem::is_regular_file(p)) { string s = p.string(); if ( strstr(s.c_str(), ".bmp") == NULL && strstr(s.c_str(), ".BMP") == NULL && strstr(s.c_str(), ".jpg") == NULL && strstr(s.c_str(), ".JPG") == NULL && strstr(s.c_str(), ".jpeg") == NULL && strstr(s.c_str(), ".JPEG") == NULL && strstr(s.c_str(), ".png") == NULL && strstr(s.c_str(), ".PNG") == NULL ) { /* skipp*/ } else { flist.push_back(s); } } }); printf("%d\n", flist.size()); int l = flist.size(); int k = 0; cv::Mat im1; cv::Mat im2; for (int i = 0; i < M; i++) { for (int j = 0; j < N; j++) { if (k >= l) break; cv::Mat im = imread(flist[k]); if (padding) { im = padding_img(im, padding); } if (j == 0) im1 = im; else cv::hconcat(im1, im, im1); k += 1; } if (i == 0) im2 = im1; else cv::vconcat(im2, im1, im2); if (k >= l) break; } if (padding) { im2 = padding_img(im2, padding); } //cv::cvtColor(im2, im2, cv::COLOR_BGR2RGB); cv::imwrite(image_file_name, im2); return im2; } // main } } #endif #endif
3d7pt.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 7 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 24; tile_size[1] = 24; tile_size[2] = 16; tile_size[3] = 1024; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; const double alpha = 0.0876; const double beta = 0.0765; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) { for (t1=-1;t1<=floord(Nt-2,12);t1++) { lbp=max(ceild(t1,2),ceild(24*t1-Nt+3,24)); ubp=min(floord(Nt+Nz-4,24),floord(12*t1+Nz+9,24)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(0,ceild(3*t1-3,4)),ceild(24*t2-Nz-12,16));t3<=min(min(min(floord(Nt+Ny-4,16),floord(12*t1+Ny+21,16)),floord(24*t2+Ny+20,16)),floord(24*t1-24*t2+Nz+Ny+19,16));t3++) { for (t4=max(max(max(0,ceild(3*t1-255,256)),ceild(24*t2-Nz-1020,1024)),ceild(16*t3-Ny-1020,1024));t4<=min(min(min(min(floord(Nt+Nx-4,1024),floord(12*t1+Nx+21,1024)),floord(24*t2+Nx+20,1024)),floord(16*t3+Nx+12,1024)),floord(24*t1-24*t2+Nz+Nx+19,1024));t4++) { for (t5=max(max(max(max(max(0,12*t1),24*t1-24*t2+1),24*t2-Nz+2),16*t3-Ny+2),1024*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,12*t1+23),24*t2+22),16*t3+14),1024*t4+1022),24*t1-24*t2+Nz+21);t5++) { for (t6=max(max(24*t2,t5+1),-24*t1+24*t2+2*t5-23);t6<=min(min(24*t2+23,-24*t1+24*t2+2*t5),t5+Nz-2);t6++) { for (t7=max(16*t3,t5+1);t7<=min(16*t3+15,t5+Ny-2);t7++) { lbv=max(1024*t4,t5+1); ubv=min(1024*t4+1023,t5+Nx-2); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = ((alpha * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (beta * (((((A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)] + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1]) + A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays (Causing performance degradation /* for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); */ return 0; }
kmp_sch_simd_runtime_static.c
// RUN: %libomp-compile && %libomp-run // RUN: %libomp-run 1 && %libomp-run 2 // REQUIRES: openmp-4.5 // The test checks schedule(simd:runtime) // in combination with OMP_SCHEDULE=static[,chunk] #include <stdio.h> #include <stdlib.h> #include <omp.h> #if defined(WIN32) || defined(_WIN32) #include <windows.h> #define delay() Sleep(1); #define seten(a,b,c) _putenv_s((a),(b)) #else #include <unistd.h> #define delay() usleep(10); #define seten(a,b,c) setenv((a),(b),(c)) #endif #define SIMD_LEN 4 int err = 0; // --------------------------------------------------------------------------- // Various definitions copied from OpenMP RTL. enum sched { kmp_sch_static_balanced_chunked = 45, kmp_sch_guided_simd = 46, kmp_sch_runtime_simd = 47, }; typedef unsigned u32; typedef long long i64; typedef unsigned long long u64; typedef struct { int reserved_1; int flags; int reserved_2; int reserved_3; char *psource; } id; #ifdef __cplusplus extern "C" { #endif int __kmpc_global_thread_num(id*); void __kmpc_barrier(id*, int gtid); void __kmpc_dispatch_init_4(id*, int, enum sched, int, int, int, int); void __kmpc_dispatch_init_8(id*, int, enum sched, i64, i64, i64, i64); int __kmpc_dispatch_next_4(id*, int, void*, void*, void*, void*); int __kmpc_dispatch_next_8(id*, int, void*, void*, void*, void*); #ifdef __cplusplus } // extern "C" #endif // End of definitions copied from OpenMP RTL. // --------------------------------------------------------------------------- static id loc = {0, 2, 0, 0, ";file;func;0;0;;"}; // --------------------------------------------------------------------------- void run_loop( int loop_lb, // Loop lower bound. int loop_ub, // Loop upper bound. int loop_st, // Loop stride. int lchunk ) { static int volatile loop_sync = 0; int lb; // Chunk lower bound. int ub; // Chunk upper bound. int st; // Chunk stride. int rc; int nthreads = omp_get_num_threads(); int tid = omp_get_thread_num(); int gtid = __kmpc_global_thread_num(&loc); int last; int tc = (loop_ub - loop_lb) / loop_st + 1; int ch; int no_chunk = 0; if (lchunk == 0) { no_chunk = 1; lchunk = 1; } ch = lchunk * SIMD_LEN; #if _DEBUG > 1 printf("run_loop gtid %d tid %d (lb=%d, ub=%d, st=%d, ch=%d)\n", gtid, tid, (int)loop_lb, (int)loop_ub, (int)loop_st, lchunk); #endif // Don't test degenerate cases that should have been discovered by codegen. if (loop_st == 0) return; if (loop_st > 0 ? loop_lb > loop_ub : loop_lb < loop_ub) return; __kmpc_dispatch_init_4(&loc, gtid, kmp_sch_runtime_simd, loop_lb, loop_ub, loop_st, SIMD_LEN); { // Let the master thread handle the chunks alone. int chunk; // No of current chunk. int last_ub; // Upper bound of the last processed chunk. u64 cur; // Number of interations in current chunk. u64 max; // Max allowed iterations for current chunk. int undersized = 0; last_ub = loop_ub; chunk = 0; max = (loop_ub - loop_lb) / loop_st + 1; // The first chunk can consume all iterations. while (__kmpc_dispatch_next_4(&loc, gtid, &last, &lb, &ub, &st)) { ++ chunk; #if _DEBUG printf("th %d: chunk=%d, lb=%d, ub=%d ch %d\n", tid, chunk, (int)lb, (int)ub, (int)(ub-lb+1)); #endif // Check if previous chunk (it is not the final chunk) is undersized. if (undersized) printf("Error with chunk %d, th %d, err %d\n", chunk, tid, ++err); if (loop_st > 0) { if (!(ub <= loop_ub)) printf("Error with ub %d, %d, ch %d, err %d\n", (int)ub, (int)loop_ub, chunk, ++err); if (!(lb <= ub)) printf("Error with bounds %d, %d, %d, err %d\n", (int)lb, (int)ub, chunk, ++err); } else { if (!(ub >= loop_ub)) printf("Error with ub %d, %d, %d, err %d\n", (int)ub, (int)loop_ub, chunk, ++err); if (!(lb >= ub)) printf("Error with bounds %d, %d, %d, err %d\n", (int)lb, (int)ub, chunk, ++err); }; // if // Stride should not change. if (!(st == loop_st)) printf("Error with st %d, %d, ch %d, err %d\n", (int)st, (int)loop_st, chunk, ++err); cur = ( ub - lb ) / loop_st + 1; // Guided scheduling uses FP computations, so current chunk may // be a bit bigger (+1) than allowed maximum. if (!( cur <= max + 1)) printf("Error with iter %d, %d, err %d\n", cur, max, ++err); // Update maximum for the next chunk. if (last) { if (!no_chunk && cur > ch && nthreads > 1) printf("Error: too big last chunk %d (%d), tid %d, err %d\n", (int)cur, ch, tid, ++err); } else { if (cur % ch) printf("Error with chunk %d, %d, ch %d, tid %d, err %d\n", chunk, (int)cur, ch, tid, ++err); } if (cur < max) max = cur; last_ub = ub; undersized = (cur < ch); #if _DEBUG > 1 if (last) printf("under%d cur %d, ch %d, tid %d, ub %d, lb %d, st %d =======\n", undersized,cur,ch,tid,ub,lb,loop_st); #endif } // while // Must have the right last iteration index. if (loop_st > 0) { if (!(last_ub <= loop_ub)) printf("Error with last1 %d, %d, ch %d, err %d\n", (int)last_ub, (int)loop_ub, chunk, ++err); if (last && !(last_ub + loop_st > loop_ub)) printf("Error with last2 %d, %d, %d, ch %d, err %d\n", (int)last_ub, (int)loop_st, (int)loop_ub, chunk, ++err); } else { if (!(last_ub >= loop_ub)) printf("Error with last1 %d, %d, ch %d, err %d\n", (int)last_ub, (int)loop_ub, chunk, ++err); if (last && !(last_ub + loop_st < loop_ub)) printf("Error with last2 %d, %d, %d, ch %d, err %d\n", (int)last_ub, (int)loop_st, (int)loop_ub, chunk, ++err); } // if } __kmpc_barrier(&loc, gtid); } // run_loop int main(int argc, char *argv[]) { int chunk = 0; if (argc > 1) { char *buf = malloc(8 + strlen(argv[1])); // expect chunk size as a parameter chunk = atoi(argv[1]); strcpy(buf,"static,"); strcat(buf,argv[1]); seten("OMP_SCHEDULE",buf,1); printf("Testing schedule(simd:%s)\n", buf); free(buf); } else { seten("OMP_SCHEDULE","static",1); printf("Testing schedule(simd:static)\n"); } #pragma omp parallel// num_threads(num_th) run_loop(0, 26, 1, chunk); if (err) { printf("failed, err = %d\n", err); return 1; } else { printf("passed\n"); return 0; } }
threadPrivate.c
#include <omp.h> int a, b, i, tid; float x; #pragma omp threadprivate(a, x) main () { /* Explicitly turn off dynamic threads */ omp_set_dynamic(0); printf("1st Parallel Region:\n"); #pragma omp parallel private(b,tid) { tid = omp_get_thread_num(); a = tid; b = tid; x = 1.1 * tid +1.0; printf("Thread %d: a,b,x= %d %d %f\n",tid,a,b,x); } /* end of parallel section */ printf("************************************\n"); printf("Master thread doing serial work here\n"); printf("************************************\n"); printf("2nd Parallel Region:\n"); #pragma omp parallel private(tid) { tid = omp_get_thread_num(); printf("Thread %d: a,b,x= %d %d %f\n",tid,a,b,x); } /* end of parallel section */ }
libperf.c
/** * Copyright (C) Mellanox Technologies Ltd. 2001-2019. ALL RIGHTS RESERVED. * Copyright (C) UT-Battelle, LLC. 2015. ALL RIGHTS RESERVED. * Copyright (C) The University of Tennessee and The University * of Tennessee Research Foundation. 2015-2016. ALL RIGHTS RESERVED. * Copyright (C) ARM Ltd. 2017. ALL RIGHTS RESERVED. * See file LICENSE for terms. */ #ifdef HAVE_CONFIG_H # include "config.h" #endif #include <ucs/debug/log.h> #include <ucs/arch/bitops.h> #include <ucs/sys/module.h> #include <ucs/sys/string.h> #include <string.h> #include <tools/perf/lib/libperf_int.h> #include <unistd.h> #if _OPENMP #include <omp.h> #endif /* _OPENMP */ #define ATOMIC_OP_CONFIG(_size, _op32, _op64, _op, _msg, _params, _status) \ _status = __get_atomic_flag((_size), (_op32), (_op64), (_op)); \ if (_status != UCS_OK) { \ ucs_error(UCT_PERF_TEST_PARAMS_FMT" does not support atomic %s for " \ "message size %zu bytes", UCT_PERF_TEST_PARAMS_ARG(_params), \ (_msg)[_op], (_size)); \ return _status; \ } #define ATOMIC_OP_CHECK(_size, _attr, _required, _params, _msg) \ if (!ucs_test_all_flags(_attr, _required)) { \ if ((_params)->flags & UCX_PERF_TEST_FLAG_VERBOSE) { \ ucs_error(UCT_PERF_TEST_PARAMS_FMT" does not support required " \ #_size"-bit atomic: %s", UCT_PERF_TEST_PARAMS_ARG(_params), \ (_msg)[ucs_ffs64(~(_attr) & (_required))]); \ } \ return UCS_ERR_UNSUPPORTED; \ } typedef struct { union { struct { size_t dev_addr_len; size_t iface_addr_len; size_t ep_addr_len; } uct; struct { size_t worker_addr_len; size_t total_wireup_len; } ucp; }; size_t rkey_size; unsigned long recv_buffer; } ucx_perf_ep_info_t; const ucx_perf_allocator_t* ucx_perf_mem_type_allocators[UCS_MEMORY_TYPE_LAST]; static const char *perf_iface_ops[] = { [ucs_ilog2(UCT_IFACE_FLAG_AM_SHORT)] = "am short", [ucs_ilog2(UCT_IFACE_FLAG_AM_BCOPY)] = "am bcopy", [ucs_ilog2(UCT_IFACE_FLAG_AM_ZCOPY)] = "am zcopy", [ucs_ilog2(UCT_IFACE_FLAG_PUT_SHORT)] = "put short", [ucs_ilog2(UCT_IFACE_FLAG_PUT_BCOPY)] = "put bcopy", [ucs_ilog2(UCT_IFACE_FLAG_PUT_ZCOPY)] = "put zcopy", [ucs_ilog2(UCT_IFACE_FLAG_GET_SHORT)] = "get short", [ucs_ilog2(UCT_IFACE_FLAG_GET_BCOPY)] = "get bcopy", [ucs_ilog2(UCT_IFACE_FLAG_GET_ZCOPY)] = "get zcopy", [ucs_ilog2(UCT_IFACE_FLAG_ERRHANDLE_PEER_FAILURE)] = "peer failure handler", [ucs_ilog2(UCT_IFACE_FLAG_CONNECT_TO_IFACE)] = "connect to iface", [ucs_ilog2(UCT_IFACE_FLAG_CONNECT_TO_EP)] = "connect to ep", [ucs_ilog2(UCT_IFACE_FLAG_AM_DUP)] = "full reliability", [ucs_ilog2(UCT_IFACE_FLAG_CB_SYNC)] = "sync callback", [ucs_ilog2(UCT_IFACE_FLAG_CB_ASYNC)] = "async callback", [ucs_ilog2(UCT_IFACE_FLAG_EVENT_SEND_COMP)] = "send completion event", [ucs_ilog2(UCT_IFACE_FLAG_EVENT_RECV)] = "tag or active message event", [ucs_ilog2(UCT_IFACE_FLAG_EVENT_RECV_SIG)] = "signaled message event", [ucs_ilog2(UCT_IFACE_FLAG_PENDING)] = "pending", [ucs_ilog2(UCT_IFACE_FLAG_TAG_EAGER_SHORT)] = "tag eager short", [ucs_ilog2(UCT_IFACE_FLAG_TAG_EAGER_BCOPY)] = "tag eager bcopy", [ucs_ilog2(UCT_IFACE_FLAG_TAG_EAGER_ZCOPY)] = "tag eager zcopy", [ucs_ilog2(UCT_IFACE_FLAG_TAG_RNDV_ZCOPY)] = "tag rndv zcopy" }; static const char *perf_atomic_op[] = { [UCT_ATOMIC_OP_ADD] = "add", [UCT_ATOMIC_OP_AND] = "and", [UCT_ATOMIC_OP_OR] = "or" , [UCT_ATOMIC_OP_XOR] = "xor" }; static const char *perf_atomic_fop[] = { [UCT_ATOMIC_OP_ADD] = "fetch-add", [UCT_ATOMIC_OP_AND] = "fetch-and", [UCT_ATOMIC_OP_OR] = "fetch-or", [UCT_ATOMIC_OP_XOR] = "fetch-xor", [UCT_ATOMIC_OP_SWAP] = "swap", [UCT_ATOMIC_OP_CSWAP] = "cswap" }; /* * This Quickselect routine is based on the algorithm described in * "Numerical recipes in C", Second Edition, * Cambridge University Press, 1992, Section 8.5, ISBN 0-521-43108-5 * This code by Nicolas Devillard - 1998. Public domain. */ static ucs_time_t __find_median_quick_select(ucs_time_t arr[], int n) { int low, high ; int median; int middle, ll, hh; #define ELEM_SWAP(a,b) { register ucs_time_t t=(a);(a)=(b);(b)=t; } low = 0 ; high = n-1 ; median = (low + high) / 2; for (;;) { if (high <= low) /* One element only */ return arr[median] ; if (high == low + 1) { /* Two elements only */ if (arr[low] > arr[high]) ELEM_SWAP(arr[low], arr[high]) ; return arr[median] ; } /* Find median of low, middle and high items; swap into position low */ middle = (low + high) / 2; if (arr[middle] > arr[high]) ELEM_SWAP(arr[middle], arr[high]) ; if (arr[low] > arr[high]) ELEM_SWAP(arr[low], arr[high]) ; if (arr[middle] > arr[low]) ELEM_SWAP(arr[middle], arr[low]) ; /* Swap low item (now in position middle) into position (low+1) */ ELEM_SWAP(arr[middle], arr[low+1]) ; /* Nibble from each end towards middle, swapping items when stuck */ ll = low + 1; hh = high; for (;;) { do ll++; while (arr[low] > arr[ll]) ; do hh--; while (arr[hh] > arr[low]) ; if (hh < ll) break; ELEM_SWAP(arr[ll], arr[hh]) ; } /* Swap middle item (in position low) back into correct position */ ELEM_SWAP(arr[low], arr[hh]) ; /* Re-set active partition */ if (hh <= median) low = ll; if (hh >= median) high = hh - 1; } } static ucs_status_t uct_perf_test_alloc_host(const ucx_perf_context_t *perf, size_t length, unsigned flags, uct_allocated_memory_t *alloc_mem) { ucs_status_t status; status = uct_iface_mem_alloc(perf->uct.iface, length, flags, "perftest", alloc_mem); if (status != UCS_OK) { ucs_free(alloc_mem); ucs_error("failed to allocate memory: %s", ucs_status_string(status)); return status; } ucs_assert(alloc_mem->md == perf->uct.md); return UCS_OK; } static void uct_perf_test_free_host(const ucx_perf_context_t *perf, uct_allocated_memory_t *alloc_mem) { uct_iface_mem_free(alloc_mem); } static void ucx_perf_test_memcpy_host(void *dst, ucs_memory_type_t dst_mem_type, const void *src, ucs_memory_type_t src_mem_type, size_t count) { if ((dst_mem_type != UCS_MEMORY_TYPE_HOST) || (src_mem_type != UCS_MEMORY_TYPE_HOST)) { ucs_error("wrong memory type passed src - %d, dst - %d", src_mem_type, dst_mem_type); } else { memcpy(dst, src, count); } } static ucs_status_t uct_perf_test_alloc_mem(ucx_perf_context_t *perf) { ucx_perf_params_t *params = &perf->params; ucs_status_t status; unsigned flags; size_t buffer_size; if ((UCT_PERF_DATA_LAYOUT_ZCOPY == params->uct.data_layout) && params->iov_stride) { buffer_size = params->msg_size_cnt * params->iov_stride; } else { buffer_size = ucx_perf_get_message_size(params); } /* TODO use params->alignment */ flags = (params->flags & UCX_PERF_TEST_FLAG_MAP_NONBLOCK) ? UCT_MD_MEM_FLAG_NONBLOCK : 0; flags |= UCT_MD_MEM_ACCESS_ALL; /* Allocate send buffer memory */ status = perf->allocator->uct_alloc(perf, buffer_size * params->thread_count, flags, &perf->uct.send_mem); if (status != UCS_OK) { goto err; } perf->send_buffer = perf->uct.send_mem.address; /* Allocate receive buffer memory */ status = perf->allocator->uct_alloc(perf, buffer_size * params->thread_count, flags, &perf->uct.recv_mem); if (status != UCS_OK) { goto err_free_send; } perf->recv_buffer = perf->uct.recv_mem.address; /* Allocate IOV datatype memory */ perf->params.msg_size_cnt = params->msg_size_cnt; perf->uct.iov = malloc(sizeof(*perf->uct.iov) * perf->params.msg_size_cnt * params->thread_count); if (NULL == perf->uct.iov) { status = UCS_ERR_NO_MEMORY; ucs_error("Failed allocate send IOV(%lu) buffer: %s", perf->params.msg_size_cnt, ucs_status_string(status)); goto err_free_recv; } ucs_debug("allocated memory. Send buffer %p, Recv buffer %p", perf->send_buffer, perf->recv_buffer); return UCS_OK; err_free_recv: perf->allocator->uct_free(perf, &perf->uct.recv_mem); err_free_send: perf->allocator->uct_free(perf, &perf->uct.send_mem); err: return status; } static void uct_perf_test_free_mem(ucx_perf_context_t *perf) { perf->allocator->uct_free(perf, &perf->uct.send_mem); perf->allocator->uct_free(perf, &perf->uct.recv_mem); free(perf->uct.iov); } void ucx_perf_test_start_clock(ucx_perf_context_t *perf) { ucs_time_t start_time = ucs_get_time(); perf->start_time_acc = ucs_get_accurate_time(); perf->end_time = (perf->params.max_time == 0.0) ? UINT64_MAX : ucs_time_from_sec(perf->params.max_time) + start_time; perf->prev_time = start_time; perf->prev.time = start_time; perf->prev.time_acc = perf->start_time_acc; perf->current.time_acc = perf->start_time_acc; } /* Initialize/reset all parameters that could be modified by the warm-up run */ static void ucx_perf_test_prepare_new_run(ucx_perf_context_t *perf, const ucx_perf_params_t *params) { unsigned i; perf->max_iter = (perf->params.max_iter == 0) ? UINT64_MAX : perf->params.max_iter; perf->report_interval = ucs_time_from_sec(perf->params.report_interval); perf->current.time = 0; perf->current.msgs = 0; perf->current.bytes = 0; perf->current.iters = 0; perf->prev.msgs = 0; perf->prev.bytes = 0; perf->prev.iters = 0; perf->timing_queue_head = 0; for (i = 0; i < TIMING_QUEUE_SIZE; ++i) { perf->timing_queue[i] = 0; } ucx_perf_test_start_clock(perf); } static void ucx_perf_test_init(ucx_perf_context_t *perf, const ucx_perf_params_t *params) { unsigned group_index; perf->params = *params; group_index = rte_call(perf, group_index); if (0 == group_index) { perf->allocator = ucx_perf_mem_type_allocators[params->send_mem_type]; } else { perf->allocator = ucx_perf_mem_type_allocators[params->recv_mem_type]; } ucx_perf_test_prepare_new_run(perf, params); } void ucx_perf_calc_result(ucx_perf_context_t *perf, ucx_perf_result_t *result) { ucs_time_t median; double factor; if (perf->params.test_type == UCX_PERF_TEST_TYPE_PINGPONG) { factor = 2.0; } else { factor = 1.0; } result->iters = perf->current.iters; result->bytes = perf->current.bytes; result->elapsed_time = perf->current.time_acc - perf->start_time_acc; /* Latency */ median = __find_median_quick_select(perf->timing_queue, TIMING_QUEUE_SIZE); result->latency.typical = ucs_time_to_sec(median) / factor; result->latency.moment_average = (perf->current.time_acc - perf->prev.time_acc) / (perf->current.iters - perf->prev.iters) / factor; result->latency.total_average = (perf->current.time_acc - perf->start_time_acc) / perf->current.iters / factor; /* Bandwidth */ result->bandwidth.typical = 0.0; // Undefined result->bandwidth.moment_average = (perf->current.bytes - perf->prev.bytes) / (perf->current.time_acc - perf->prev.time_acc) * factor; result->bandwidth.total_average = perf->current.bytes / (perf->current.time_acc - perf->start_time_acc) * factor; /* Packet rate */ result->msgrate.typical = 0.0; // Undefined result->msgrate.moment_average = (perf->current.msgs - perf->prev.msgs) / (perf->current.time_acc - perf->prev.time_acc) * factor; result->msgrate.total_average = perf->current.msgs / (perf->current.time_acc - perf->start_time_acc) * factor; } static ucs_status_t ucx_perf_test_check_params(ucx_perf_params_t *params) { size_t it; /* check if zero-size messages are requested and supported */ if ((/* they are not supported by: */ /* - UCT tests, except UCT AM Short/Bcopy */ (params->api == UCX_PERF_API_UCT) || (/* - UCP RMA and AMO tests */ (params->api == UCX_PERF_API_UCP) && (params->command != UCX_PERF_CMD_AM) && (params->command != UCX_PERF_CMD_TAG) && (params->command != UCX_PERF_CMD_TAG_SYNC) && (params->command != UCX_PERF_CMD_STREAM))) && ucx_perf_get_message_size(params) < 1) { if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) { ucs_error("Message size too small, need to be at least 1"); } return UCS_ERR_INVALID_PARAM; } if ((params->api == UCX_PERF_API_UCP) && ((params->send_mem_type != UCS_MEMORY_TYPE_HOST) || (params->recv_mem_type != UCS_MEMORY_TYPE_HOST)) && ((params->command == UCX_PERF_CMD_PUT) || (params->command == UCX_PERF_CMD_GET) || (params->command == UCX_PERF_CMD_ADD) || (params->command == UCX_PERF_CMD_FADD) || (params->command == UCX_PERF_CMD_SWAP) || (params->command == UCX_PERF_CMD_CSWAP))) { /* TODO: remove when support for non-HOST memory types will be added */ if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) { ucs_error("UCP doesn't support RMA/AMO for \"%s\"<->\"%s\" memory types", ucs_memory_type_names[params->send_mem_type], ucs_memory_type_names[params->recv_mem_type]); } return UCS_ERR_INVALID_PARAM; } if (params->max_outstanding < 1) { if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) { ucs_error("max_outstanding, need to be at least 1"); } return UCS_ERR_INVALID_PARAM; } /* check if particular message size fit into stride size */ if (params->iov_stride) { for (it = 0; it < params->msg_size_cnt; ++it) { if (params->msg_size_list[it] > params->iov_stride) { if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) { ucs_error("Buffer size %lu bigger than stride %lu", params->msg_size_list[it], params->iov_stride); } return UCS_ERR_INVALID_PARAM; } } } return UCS_OK; } void uct_perf_iface_flush_b(ucx_perf_context_t *perf) { ucs_status_t status; do { status = uct_iface_flush(perf->uct.iface, 0, NULL); uct_worker_progress(perf->uct.worker); } while (status == UCS_INPROGRESS); } static inline uint64_t __get_flag(uct_perf_data_layout_t layout, uint64_t short_f, uint64_t bcopy_f, uint64_t zcopy_f) { return (layout == UCT_PERF_DATA_LAYOUT_SHORT) ? short_f : (layout == UCT_PERF_DATA_LAYOUT_BCOPY) ? bcopy_f : (layout == UCT_PERF_DATA_LAYOUT_ZCOPY) ? zcopy_f : 0; } static inline ucs_status_t __get_atomic_flag(size_t size, uint64_t *op32, uint64_t *op64, uint64_t op) { if (size == sizeof(uint32_t)) { *op32 = UCS_BIT(op); return UCS_OK; } else if (size == sizeof(uint64_t)) { *op64 = UCS_BIT(op); return UCS_OK; } return UCS_ERR_UNSUPPORTED; } static inline size_t __get_max_size(uct_perf_data_layout_t layout, size_t short_m, size_t bcopy_m, uint64_t zcopy_m) { return (layout == UCT_PERF_DATA_LAYOUT_SHORT) ? short_m : (layout == UCT_PERF_DATA_LAYOUT_BCOPY) ? bcopy_m : (layout == UCT_PERF_DATA_LAYOUT_ZCOPY) ? zcopy_m : 0; } static ucs_status_t uct_perf_test_check_md_support(ucx_perf_params_t *params, ucs_memory_type_t mem_type, uct_md_attr_t *md_attr) { if (!(md_attr->cap.access_mem_type == mem_type) && !(md_attr->cap.reg_mem_types & UCS_BIT(mem_type))) { if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) { ucs_error("Unsupported memory type %s by "UCT_PERF_TEST_PARAMS_FMT, ucs_memory_type_names[mem_type], UCT_PERF_TEST_PARAMS_ARG(params)); return UCS_ERR_INVALID_PARAM; } } return UCS_OK; } static ucs_status_t uct_perf_test_check_capabilities(ucx_perf_params_t *params, uct_iface_h iface, uct_md_h md) { uint64_t required_flags = 0; uint64_t atomic_op32 = 0; uint64_t atomic_op64 = 0; uint64_t atomic_fop32 = 0; uint64_t atomic_fop64 = 0; uct_md_attr_t md_attr; uct_iface_attr_t attr; ucs_status_t status; size_t min_size, max_size, max_iov, message_size; status = uct_md_query(md, &md_attr); if (status != UCS_OK) { ucs_error("uct_md_query(%s) failed: %s", params->uct.md_name, ucs_status_string(status)); return status; } status = uct_iface_query(iface, &attr); if (status != UCS_OK) { ucs_error("uct_iface_query("UCT_PERF_TEST_PARAMS_FMT") failed: %s", UCT_PERF_TEST_PARAMS_ARG(params), ucs_status_string(status)); return status; } min_size = 0; max_iov = 1; message_size = ucx_perf_get_message_size(params); switch (params->command) { case UCX_PERF_CMD_AM: required_flags = __get_flag(params->uct.data_layout, UCT_IFACE_FLAG_AM_SHORT, UCT_IFACE_FLAG_AM_BCOPY, UCT_IFACE_FLAG_AM_ZCOPY); required_flags |= UCT_IFACE_FLAG_CB_SYNC; min_size = __get_max_size(params->uct.data_layout, 0, 0, attr.cap.am.min_zcopy); max_size = __get_max_size(params->uct.data_layout, attr.cap.am.max_short, attr.cap.am.max_bcopy, attr.cap.am.max_zcopy); max_iov = attr.cap.am.max_iov; break; case UCX_PERF_CMD_PUT: required_flags = __get_flag(params->uct.data_layout, UCT_IFACE_FLAG_PUT_SHORT, UCT_IFACE_FLAG_PUT_BCOPY, UCT_IFACE_FLAG_PUT_ZCOPY); min_size = __get_max_size(params->uct.data_layout, 0, 0, attr.cap.put.min_zcopy); max_size = __get_max_size(params->uct.data_layout, attr.cap.put.max_short, attr.cap.put.max_bcopy, attr.cap.put.max_zcopy); max_iov = attr.cap.put.max_iov; break; case UCX_PERF_CMD_GET: required_flags = __get_flag(params->uct.data_layout, UCT_IFACE_FLAG_GET_SHORT, UCT_IFACE_FLAG_GET_BCOPY, UCT_IFACE_FLAG_GET_ZCOPY); min_size = __get_max_size(params->uct.data_layout, 0, 0, attr.cap.get.min_zcopy); max_size = __get_max_size(params->uct.data_layout, attr.cap.get.max_short, attr.cap.get.max_bcopy, attr.cap.get.max_zcopy); max_iov = attr.cap.get.max_iov; break; case UCX_PERF_CMD_ADD: ATOMIC_OP_CONFIG(message_size, &atomic_op32, &atomic_op64, UCT_ATOMIC_OP_ADD, perf_atomic_op, params, status); max_size = 8; break; case UCX_PERF_CMD_FADD: ATOMIC_OP_CONFIG(message_size, &atomic_fop32, &atomic_fop64, UCT_ATOMIC_OP_ADD, perf_atomic_fop, params, status); max_size = 8; break; case UCX_PERF_CMD_SWAP: ATOMIC_OP_CONFIG(message_size, &atomic_fop32, &atomic_fop64, UCT_ATOMIC_OP_SWAP, perf_atomic_fop, params, status); max_size = 8; break; case UCX_PERF_CMD_CSWAP: ATOMIC_OP_CONFIG(message_size, &atomic_fop32, &atomic_fop64, UCT_ATOMIC_OP_CSWAP, perf_atomic_fop, params, status); max_size = 8; break; default: if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) { ucs_error("Invalid test command"); } return UCS_ERR_INVALID_PARAM; } status = ucx_perf_test_check_params(params); if (status != UCS_OK) { return status; } /* check atomics first */ ATOMIC_OP_CHECK(32, attr.cap.atomic32.op_flags, atomic_op32, params, perf_atomic_op); ATOMIC_OP_CHECK(64, attr.cap.atomic64.op_flags, atomic_op64, params, perf_atomic_op); ATOMIC_OP_CHECK(32, attr.cap.atomic32.fop_flags, atomic_fop32, params, perf_atomic_fop); ATOMIC_OP_CHECK(64, attr.cap.atomic64.fop_flags, atomic_fop64, params, perf_atomic_fop); /* check iface flags */ if (!(atomic_op32 | atomic_op64 | atomic_fop32 | atomic_fop64) && (!ucs_test_all_flags(attr.cap.flags, required_flags) || !required_flags)) { if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) { ucs_error(UCT_PERF_TEST_PARAMS_FMT" does not support operation %s", UCT_PERF_TEST_PARAMS_ARG(params), perf_iface_ops[ucs_ffs64(~attr.cap.flags & required_flags)]); } return UCS_ERR_UNSUPPORTED; } if (message_size < min_size) { if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) { ucs_error("Message size (%zu) is smaller than min supported (%zu)", message_size, min_size); } return UCS_ERR_UNSUPPORTED; } if (message_size > max_size) { if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) { ucs_error("Message size (%zu) is larger than max supported (%zu)", message_size, max_size); } return UCS_ERR_UNSUPPORTED; } if (params->command == UCX_PERF_CMD_AM) { if ((params->uct.data_layout == UCT_PERF_DATA_LAYOUT_SHORT) && (params->am_hdr_size != sizeof(uint64_t))) { if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) { ucs_error("Short AM header size must be 8 bytes"); } return UCS_ERR_INVALID_PARAM; } if ((params->uct.data_layout == UCT_PERF_DATA_LAYOUT_ZCOPY) && (params->am_hdr_size > attr.cap.am.max_hdr)) { if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) { ucs_error("AM header size (%zu) is larger than max supported (%zu)", params->am_hdr_size, attr.cap.am.max_hdr); } return UCS_ERR_UNSUPPORTED; } if (params->am_hdr_size > message_size) { if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) { ucs_error("AM header size (%zu) is larger than message size (%zu)", params->am_hdr_size, message_size); } return UCS_ERR_INVALID_PARAM; } if (params->uct.fc_window > UCT_PERF_TEST_MAX_FC_WINDOW) { if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) { ucs_error("AM flow-control window (%d) too large (should be <= %d)", params->uct.fc_window, UCT_PERF_TEST_MAX_FC_WINDOW); } return UCS_ERR_INVALID_PARAM; } if ((params->flags & UCX_PERF_TEST_FLAG_ONE_SIDED) && (params->flags & UCX_PERF_TEST_FLAG_VERBOSE)) { ucs_warn("Running active-message test with on-sided progress"); } } if (UCT_PERF_DATA_LAYOUT_ZCOPY == params->uct.data_layout) { if (params->msg_size_cnt > max_iov) { if ((params->flags & UCX_PERF_TEST_FLAG_VERBOSE) || !params->msg_size_cnt) { ucs_error("Wrong number of IOV entries. Requested is %lu, " "should be in the range 1...%lu", params->msg_size_cnt, max_iov); } return UCS_ERR_UNSUPPORTED; } /* if msg_size_cnt == 1 the message size checked above */ if ((UCX_PERF_CMD_AM == params->command) && (params->msg_size_cnt > 1)) { if (params->am_hdr_size > params->msg_size_list[0]) { if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) { ucs_error("AM header size (%lu) larger than the first IOV " "message size (%lu)", params->am_hdr_size, params->msg_size_list[0]); } return UCS_ERR_INVALID_PARAM; } } } status = uct_perf_test_check_md_support(params, params->send_mem_type, &md_attr); if (status != UCS_OK) { return status; } status = uct_perf_test_check_md_support(params, params->recv_mem_type, &md_attr); if (status != UCS_OK) { return status; } return UCS_OK; } static ucs_status_t uct_perf_test_setup_endpoints(ucx_perf_context_t *perf) { const size_t buffer_size = ADDR_BUF_SIZE; ucx_perf_ep_info_t info, *remote_info; unsigned group_size, i, group_index; uct_device_addr_t *dev_addr; uct_iface_addr_t *iface_addr; uct_ep_addr_t *ep_addr; uct_iface_attr_t iface_attr; uct_md_attr_t md_attr; uct_ep_params_t ep_params; void *rkey_buffer; ucs_status_t status; struct iovec vec[5]; void *buffer; void *req; buffer = malloc(buffer_size); if (buffer == NULL) { ucs_error("Failed to allocate RTE buffer"); status = UCS_ERR_NO_MEMORY; goto err; } status = uct_iface_query(perf->uct.iface, &iface_attr); if (status != UCS_OK) { ucs_error("Failed to uct_iface_query: %s", ucs_status_string(status)); goto err_free; } status = uct_md_query(perf->uct.md, &md_attr); if (status != UCS_OK) { ucs_error("Failed to uct_md_query: %s", ucs_status_string(status)); goto err_free; } if (md_attr.cap.flags & (UCT_MD_FLAG_ALLOC|UCT_MD_FLAG_REG)) { info.rkey_size = md_attr.rkey_packed_size; } else { info.rkey_size = 0; } info.uct.dev_addr_len = iface_attr.device_addr_len; info.uct.iface_addr_len = iface_attr.iface_addr_len; info.uct.ep_addr_len = iface_attr.ep_addr_len; info.recv_buffer = (uintptr_t)perf->recv_buffer; rkey_buffer = buffer; dev_addr = UCS_PTR_BYTE_OFFSET(rkey_buffer, info.rkey_size); iface_addr = UCS_PTR_BYTE_OFFSET(dev_addr, info.uct.dev_addr_len); ep_addr = UCS_PTR_BYTE_OFFSET(iface_addr, info.uct.iface_addr_len); ucs_assert_always(UCS_PTR_BYTE_OFFSET(ep_addr, info.uct.ep_addr_len) <= UCS_PTR_BYTE_OFFSET(buffer, buffer_size)); status = uct_iface_get_device_address(perf->uct.iface, dev_addr); if (status != UCS_OK) { ucs_error("Failed to uct_iface_get_device_address: %s", ucs_status_string(status)); goto err_free; } status = uct_iface_get_address(perf->uct.iface, iface_addr); if (status != UCS_OK) { ucs_error("Failed to uct_iface_get_address: %s", ucs_status_string(status)); goto err_free; } if (info.rkey_size > 0) { memset(rkey_buffer, 0, info.rkey_size); status = uct_md_mkey_pack(perf->uct.md, perf->uct.recv_mem.memh, rkey_buffer); if (status != UCS_OK) { ucs_error("Failed to uct_rkey_pack: %s", ucs_status_string(status)); goto err_free; } } group_size = rte_call(perf, group_size); group_index = rte_call(perf, group_index); perf->uct.peers = calloc(group_size, sizeof(*perf->uct.peers)); if (perf->uct.peers == NULL) { goto err_free; } ep_params.field_mask = UCT_EP_PARAM_FIELD_IFACE; ep_params.iface = perf->uct.iface; if (iface_attr.cap.flags & UCT_IFACE_FLAG_CONNECT_TO_EP) { for (i = 0; i < group_size; ++i) { if (i == group_index) { continue; } status = uct_ep_create(&ep_params, &perf->uct.peers[i].ep); if (status != UCS_OK) { ucs_error("Failed to uct_ep_create: %s", ucs_status_string(status)); goto err_destroy_eps; } status = uct_ep_get_address(perf->uct.peers[i].ep, ep_addr); if (status != UCS_OK) { ucs_error("Failed to uct_ep_get_address: %s", ucs_status_string(status)); goto err_destroy_eps; } } } else if (iface_attr.cap.flags & UCT_IFACE_FLAG_CONNECT_TO_IFACE) { ep_params.field_mask |= UCT_EP_PARAM_FIELD_DEV_ADDR | UCT_EP_PARAM_FIELD_IFACE_ADDR; } vec[0].iov_base = &info; vec[0].iov_len = sizeof(info); vec[1].iov_base = buffer; vec[1].iov_len = info.rkey_size + info.uct.dev_addr_len + info.uct.iface_addr_len + info.uct.ep_addr_len; rte_call(perf, post_vec, vec, 2, &req); rte_call(perf, exchange_vec, req); for (i = 0; i < group_size; ++i) { if (i == group_index) { continue; } rte_call(perf, recv, i, buffer, buffer_size, req); remote_info = buffer; rkey_buffer = remote_info + 1; dev_addr = UCS_PTR_BYTE_OFFSET(rkey_buffer, remote_info->rkey_size); iface_addr = UCS_PTR_BYTE_OFFSET(dev_addr, remote_info->uct.dev_addr_len); ep_addr = UCS_PTR_BYTE_OFFSET(iface_addr, remote_info->uct.iface_addr_len); perf->uct.peers[i].remote_addr = remote_info->recv_buffer; if (!uct_iface_is_reachable(perf->uct.iface, dev_addr, remote_info->uct.iface_addr_len ? iface_addr : NULL)) { ucs_error("Destination is unreachable"); status = UCS_ERR_UNREACHABLE; goto err_destroy_eps; } if (remote_info->rkey_size > 0) { status = uct_rkey_unpack(perf->uct.cmpt, rkey_buffer, &perf->uct.peers[i].rkey); if (status != UCS_OK) { ucs_error("Failed to uct_rkey_unpack: %s", ucs_status_string(status)); goto err_destroy_eps; } } else { perf->uct.peers[i].rkey.handle = NULL; perf->uct.peers[i].rkey.rkey = UCT_INVALID_RKEY; } if (iface_attr.cap.flags & UCT_IFACE_FLAG_CONNECT_TO_EP) { status = uct_ep_connect_to_ep(perf->uct.peers[i].ep, dev_addr, ep_addr); } else if (iface_attr.cap.flags & UCT_IFACE_FLAG_CONNECT_TO_IFACE) { ep_params.dev_addr = dev_addr; ep_params.iface_addr = iface_addr; status = uct_ep_create(&ep_params, &perf->uct.peers[i].ep); } else { status = UCS_ERR_UNSUPPORTED; } if (status != UCS_OK) { ucs_error("Failed to connect endpoint: %s", ucs_status_string(status)); goto err_destroy_eps; } } uct_perf_iface_flush_b(perf); free(buffer); uct_perf_barrier(perf); return UCS_OK; err_destroy_eps: for (i = 0; i < group_size; ++i) { if (perf->uct.peers[i].rkey.rkey != UCT_INVALID_RKEY) { uct_rkey_release(perf->uct.cmpt, &perf->uct.peers[i].rkey); } if (perf->uct.peers[i].ep != NULL) { uct_ep_destroy(perf->uct.peers[i].ep); } } free(perf->uct.peers); err_free: free(buffer); err: return status; } static void uct_perf_test_cleanup_endpoints(ucx_perf_context_t *perf) { unsigned group_size, group_index, i; uct_perf_barrier(perf); uct_iface_set_am_handler(perf->uct.iface, UCT_PERF_TEST_AM_ID, NULL, NULL, 0); group_size = rte_call(perf, group_size); group_index = rte_call(perf, group_index); for (i = 0; i < group_size; ++i) { if (i != group_index) { if (perf->uct.peers[i].rkey.rkey != UCT_INVALID_RKEY) { uct_rkey_release(perf->uct.cmpt, &perf->uct.peers[i].rkey); } if (perf->uct.peers[i].ep) { uct_ep_destroy(perf->uct.peers[i].ep); } } } free(perf->uct.peers); } static ucs_status_t ucp_perf_test_fill_params(ucx_perf_params_t *params, ucp_params_t *ucp_params) { ucs_status_t status; size_t message_size; message_size = ucx_perf_get_message_size(params); switch (params->command) { case UCX_PERF_CMD_PUT: case UCX_PERF_CMD_GET: ucp_params->features |= UCP_FEATURE_RMA; break; case UCX_PERF_CMD_ADD: case UCX_PERF_CMD_FADD: case UCX_PERF_CMD_SWAP: case UCX_PERF_CMD_CSWAP: if (message_size == sizeof(uint32_t)) { ucp_params->features |= UCP_FEATURE_AMO32; } else if (message_size == sizeof(uint64_t)) { ucp_params->features |= UCP_FEATURE_AMO64; } else { if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) { ucs_error("Atomic size should be either 32 or 64 bit"); } return UCS_ERR_INVALID_PARAM; } break; case UCX_PERF_CMD_TAG: case UCX_PERF_CMD_TAG_SYNC: ucp_params->features |= UCP_FEATURE_TAG; break; case UCX_PERF_CMD_STREAM: ucp_params->features |= UCP_FEATURE_STREAM; break; default: if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) { ucs_error("Invalid test command"); } return UCS_ERR_INVALID_PARAM; } status = ucx_perf_test_check_params(params); if (status != UCS_OK) { return status; } return UCS_OK; } static ucs_status_t ucp_perf_test_alloc_iov_mem(ucp_perf_datatype_t datatype, size_t iovcnt, unsigned thread_count, ucp_dt_iov_t **iov_p) { ucp_dt_iov_t *iov; if (UCP_PERF_DATATYPE_IOV == datatype) { iov = malloc(sizeof(*iov) * iovcnt * thread_count); if (NULL == iov) { ucs_error("Failed allocate IOV buffer with iovcnt=%lu", iovcnt); return UCS_ERR_NO_MEMORY; } *iov_p = iov; } return UCS_OK; } static ucs_status_t ucp_perf_test_alloc_host(const ucx_perf_context_t *perf, size_t length, void **address_p, ucp_mem_h *memh, int non_blk_flag) { ucp_mem_map_params_t mem_map_params; ucp_mem_attr_t mem_attr; ucs_status_t status; mem_map_params.field_mask = UCP_MEM_MAP_PARAM_FIELD_ADDRESS | UCP_MEM_MAP_PARAM_FIELD_LENGTH | UCP_MEM_MAP_PARAM_FIELD_FLAGS; mem_map_params.address = *address_p; mem_map_params.length = length; mem_map_params.flags = UCP_MEM_MAP_ALLOCATE; if (perf->params.flags & UCX_PERF_TEST_FLAG_MAP_NONBLOCK) { mem_map_params.flags |= non_blk_flag; } status = ucp_mem_map(perf->ucp.context, &mem_map_params, memh); if (status != UCS_OK) { goto err; } mem_attr.field_mask = UCP_MEM_ATTR_FIELD_ADDRESS; status = ucp_mem_query(*memh, &mem_attr); if (status != UCS_OK) { goto err; } *address_p = mem_attr.address; return UCS_OK; err: return status; } static void ucp_perf_test_free_host(const ucx_perf_context_t *perf, void *address, ucp_mem_h memh) { ucs_status_t status; status = ucp_mem_unmap(perf->ucp.context, memh); if (status != UCS_OK) { ucs_warn("ucp_mem_unmap() failed: %s", ucs_status_string(status)); } } static ucs_status_t ucp_perf_test_alloc_mem(ucx_perf_context_t *perf) { ucx_perf_params_t *params = &perf->params; ucs_status_t status; size_t buffer_size; if (params->iov_stride) { buffer_size = params->msg_size_cnt * params->iov_stride; } else { buffer_size = ucx_perf_get_message_size(params); } /* Allocate send buffer memory */ perf->send_buffer = NULL; status = perf->allocator->ucp_alloc(perf, buffer_size * params->thread_count, &perf->send_buffer, &perf->ucp.send_memh, UCP_MEM_MAP_NONBLOCK); if (status != UCS_OK) { goto err; } /* Allocate receive buffer memory */ perf->recv_buffer = NULL; status = perf->allocator->ucp_alloc(perf, buffer_size * params->thread_count, &perf->recv_buffer, &perf->ucp.recv_memh, 0); if (status != UCS_OK) { goto err_free_send_buffer; } /* Allocate IOV datatype memory */ perf->ucp.send_iov = NULL; status = ucp_perf_test_alloc_iov_mem(params->ucp.send_datatype, perf->params.msg_size_cnt, params->thread_count, &perf->ucp.send_iov); if (UCS_OK != status) { goto err_free_buffers; } perf->ucp.recv_iov = NULL; status = ucp_perf_test_alloc_iov_mem(params->ucp.recv_datatype, perf->params.msg_size_cnt, params->thread_count, &perf->ucp.recv_iov); if (UCS_OK != status) { goto err_free_send_iov_buffers; } return UCS_OK; err_free_send_iov_buffers: free(perf->ucp.send_iov); err_free_buffers: perf->allocator->ucp_free(perf, perf->recv_buffer, perf->ucp.recv_memh); err_free_send_buffer: perf->allocator->ucp_free(perf, perf->send_buffer, perf->ucp.send_memh); err: return UCS_ERR_NO_MEMORY; } static void ucp_perf_test_free_mem(ucx_perf_context_t *perf) { free(perf->ucp.recv_iov); free(perf->ucp.send_iov); perf->allocator->ucp_free(perf, perf->recv_buffer, perf->ucp.recv_memh); perf->allocator->ucp_free(perf, perf->send_buffer, perf->ucp.send_memh); } static void ucp_perf_test_destroy_eps(ucx_perf_context_t* perf) { unsigned i, thread_count = perf->params.thread_count; ucs_status_ptr_t *req; ucs_status_t status; for (i = 0; i < thread_count; ++i) { if (perf->ucp.tctx[i].perf.ucp.rkey != NULL) { ucp_rkey_destroy(perf->ucp.tctx[i].perf.ucp.rkey); } if (perf->ucp.tctx[i].perf.ucp.ep != NULL) { req = ucp_ep_close_nb(perf->ucp.tctx[i].perf.ucp.ep, UCP_EP_CLOSE_MODE_FLUSH); if (UCS_PTR_IS_PTR(req)) { do { ucp_worker_progress(perf->ucp.tctx[i].perf.ucp.worker); status = ucp_request_check_status(req); } while (status == UCS_INPROGRESS); ucp_request_release(req); } else if (UCS_PTR_STATUS(req) != UCS_OK) { ucs_warn("failed to close ep %p on thread %d: %s\n", perf->ucp.tctx[i].perf.ucp.ep, i, ucs_status_string(UCS_PTR_STATUS(req))); } } } } static ucs_status_t ucp_perf_test_exchange_status(ucx_perf_context_t *perf, ucs_status_t status) { unsigned group_size = rte_call(perf, group_size); ucs_status_t collective_status = status; struct iovec vec; void *req = NULL; unsigned i; vec.iov_base = &status; vec.iov_len = sizeof(status); rte_call(perf, post_vec, &vec, 1, &req); rte_call(perf, exchange_vec, req); for (i = 0; i < group_size; ++i) { rte_call(perf, recv, i, &status, sizeof(status), req); if (status != UCS_OK) { collective_status = status; } } return collective_status; } static ucs_status_t ucp_perf_test_receive_remote_data(ucx_perf_context_t *perf) { unsigned thread_count = perf->params.thread_count; void *rkey_buffer = NULL; void *req = NULL; unsigned group_size, group_index, i; ucx_perf_ep_info_t *remote_info; ucp_ep_params_t ep_params; ucp_address_t *address; ucs_status_t status; size_t buffer_size; void *buffer; group_size = rte_call(perf, group_size); group_index = rte_call(perf, group_index); if (group_size != 2) { ucs_error("perftest requires group size to be exactly 2 " "(actual group size: %u)", group_size); return UCS_ERR_UNSUPPORTED; } buffer_size = ADDR_BUF_SIZE * thread_count; buffer = malloc(buffer_size); if (buffer == NULL) { ucs_error("failed to allocate RTE receive buffer"); status = UCS_ERR_NO_MEMORY; goto err; } /* Initialize all endpoints and rkeys to NULL to handle error flow */ for (i = 0; i < thread_count; i++) { perf->ucp.tctx[i].perf.ucp.ep = NULL; perf->ucp.tctx[i].perf.ucp.rkey = NULL; } /* receive the data from the remote peer, extract the address from it * (along with additional wireup info) and create an endpoint to the peer */ rte_call(perf, recv, 1 - group_index, buffer, buffer_size, req); remote_info = buffer; for (i = 0; i < thread_count; i++) { address = (ucp_address_t*)(remote_info + 1); rkey_buffer = UCS_PTR_BYTE_OFFSET(address, remote_info->ucp.worker_addr_len); perf->ucp.tctx[i].perf.ucp.remote_addr = remote_info->recv_buffer; ep_params.field_mask = UCP_EP_PARAM_FIELD_REMOTE_ADDRESS; ep_params.address = address; status = ucp_ep_create(perf->ucp.tctx[i].perf.ucp.worker, &ep_params, &perf->ucp.tctx[i].perf.ucp.ep); if (status != UCS_OK) { if (perf->params.flags & UCX_PERF_TEST_FLAG_VERBOSE) { ucs_error("ucp_ep_create() failed: %s", ucs_status_string(status)); } goto err_free_eps_buffer; } if (remote_info->rkey_size > 0) { status = ucp_ep_rkey_unpack(perf->ucp.tctx[i].perf.ucp.ep, rkey_buffer, &perf->ucp.tctx[i].perf.ucp.rkey); if (status != UCS_OK) { if (perf->params.flags & UCX_PERF_TEST_FLAG_VERBOSE) { ucs_fatal("ucp_rkey_unpack() failed: %s", ucs_status_string(status)); } goto err_free_eps_buffer; } } else { perf->ucp.tctx[i].perf.ucp.rkey = NULL; } remote_info = UCS_PTR_BYTE_OFFSET(remote_info, remote_info->ucp.total_wireup_len); } free(buffer); return UCS_OK; err_free_eps_buffer: ucp_perf_test_destroy_eps(perf); free(buffer); err: return status; } static ucs_status_t ucp_perf_test_send_local_data(ucx_perf_context_t *perf, uint64_t features) { unsigned i, j, thread_count = perf->params.thread_count; size_t address_length = 0; void *rkey_buffer = NULL; void *req = NULL; ucx_perf_ep_info_t *info; ucp_address_t *address; ucs_status_t status; struct iovec *vec; size_t rkey_size; if (features & (UCP_FEATURE_RMA|UCP_FEATURE_AMO32|UCP_FEATURE_AMO64)) { status = ucp_rkey_pack(perf->ucp.context, perf->ucp.recv_memh, &rkey_buffer, &rkey_size); if (status != UCS_OK) { if (perf->params.flags & UCX_PERF_TEST_FLAG_VERBOSE) { ucs_error("ucp_rkey_pack() failed: %s", ucs_status_string(status)); } goto err; } } else { rkey_size = 0; } /* each thread has an iovec with 3 entries to send to the remote peer: * ep_info, worker_address and rkey buffer */ vec = calloc(3 * thread_count, sizeof(struct iovec)); if (vec == NULL) { ucs_error("failed to allocate iovec"); status = UCS_ERR_NO_MEMORY; goto err_rkey_release; } /* get the worker address created for every thread and send it to the remote * peer */ for (i = 0; i < thread_count; i++) { status = ucp_worker_get_address(perf->ucp.tctx[i].perf.ucp.worker, &address, &address_length); if (status != UCS_OK) { if (perf->params.flags & UCX_PERF_TEST_FLAG_VERBOSE) { ucs_error("ucp_worker_get_address() failed: %s", ucs_status_string(status)); } goto err_free_workers_vec; } vec[i * 3].iov_base = malloc(sizeof(*info)); if (vec[i * 3].iov_base == NULL) { ucs_error("failed to allocate vec entry for info"); status = UCS_ERR_NO_MEMORY; ucp_worker_destroy(perf->ucp.tctx[i].perf.ucp.worker); goto err_free_workers_vec; } info = vec[i * 3].iov_base; info->ucp.worker_addr_len = address_length; info->ucp.total_wireup_len = sizeof(*info) + address_length + rkey_size; info->rkey_size = rkey_size; info->recv_buffer = (uintptr_t)perf->ucp.tctx[i].perf.recv_buffer; vec[(i * 3) + 0].iov_len = sizeof(*info); vec[(i * 3) + 1].iov_base = address; vec[(i * 3) + 1].iov_len = address_length; vec[(i * 3) + 2].iov_base = rkey_buffer; vec[(i * 3) + 2].iov_len = info->rkey_size; address_length = 0; } /* send to the remote peer */ rte_call(perf, post_vec, vec, 3 * thread_count, &req); rte_call(perf, exchange_vec, req); if (features & (UCP_FEATURE_RMA|UCP_FEATURE_AMO32|UCP_FEATURE_AMO64)) { ucp_rkey_buffer_release(rkey_buffer); } for (i = 0; i < thread_count; i++) { free(vec[i * 3].iov_base); ucp_worker_release_address(perf->ucp.tctx[i].perf.ucp.worker, vec[(i * 3) + 1].iov_base); } free(vec); return UCS_OK; err_free_workers_vec: for (j = 0; j < i; j++) { ucp_worker_destroy(perf->ucp.tctx[i].perf.ucp.worker); } free(vec); err_rkey_release: if (features & (UCP_FEATURE_RMA|UCP_FEATURE_AMO32|UCP_FEATURE_AMO64)) { ucp_rkey_buffer_release(rkey_buffer); } err: return status; } static ucs_status_t ucp_perf_test_setup_endpoints(ucx_perf_context_t *perf, uint64_t features) { ucs_status_t status; unsigned i; /* pack the local endpoints data and send to the remote peer */ status = ucp_perf_test_send_local_data(perf, features); if (status != UCS_OK) { goto err; } /* receive remote peer's endpoints' data and connect to them */ status = ucp_perf_test_receive_remote_data(perf); if (status != UCS_OK) { goto err; } /* sync status across all processes */ status = ucp_perf_test_exchange_status(perf, UCS_OK); if (status != UCS_OK) { goto err_destroy_eps; } /* force wireup completion */ for (i = 0; i < perf->params.thread_count; i++) { status = ucp_worker_flush(perf->ucp.tctx[i].perf.ucp.worker); if (status != UCS_OK) { ucs_warn("ucp_worker_flush() failed on theread %d: %s", i, ucs_status_string(status)); } } return status; err_destroy_eps: ucp_perf_test_destroy_eps(perf); err: (void)ucp_perf_test_exchange_status(perf, status); return status; } static void ucp_perf_test_cleanup_endpoints(ucx_perf_context_t *perf) { ucp_perf_barrier(perf); ucp_perf_test_destroy_eps(perf); } static void ucp_perf_test_destroy_workers(ucx_perf_context_t *perf) { unsigned i; for (i = 0; i < perf->params.thread_count; i++) { if (perf->ucp.tctx[i].perf.ucp.worker != NULL) { ucp_worker_destroy(perf->ucp.tctx[i].perf.ucp.worker); } } } static void ucx_perf_set_warmup(ucx_perf_context_t* perf, const ucx_perf_params_t* params) { perf->max_iter = ucs_min(params->warmup_iter, ucs_div_round_up(params->max_iter, 10)); perf->report_interval = ULONG_MAX; } static ucs_status_t uct_perf_create_md(ucx_perf_context_t *perf) { uct_component_h *uct_components; uct_component_attr_t component_attr; uct_tl_resource_desc_t *tl_resources; unsigned md_index, num_components; unsigned tl_index, num_tl_resources; unsigned cmpt_index; ucs_status_t status; uct_md_h md; uct_md_config_t *md_config; status = uct_query_components(&uct_components, &num_components); if (status != UCS_OK) { goto out; } for (cmpt_index = 0; cmpt_index < num_components; ++cmpt_index) { component_attr.field_mask = UCT_COMPONENT_ATTR_FIELD_MD_RESOURCE_COUNT; status = uct_component_query(uct_components[cmpt_index], &component_attr); if (status != UCS_OK) { goto out_release_components_list; } component_attr.field_mask = UCT_COMPONENT_ATTR_FIELD_MD_RESOURCES; component_attr.md_resources = alloca(sizeof(*component_attr.md_resources) * component_attr.md_resource_count); status = uct_component_query(uct_components[cmpt_index], &component_attr); if (status != UCS_OK) { goto out_release_components_list; } for (md_index = 0; md_index < component_attr.md_resource_count; ++md_index) { status = uct_md_config_read(uct_components[cmpt_index], NULL, NULL, &md_config); if (status != UCS_OK) { goto out_release_components_list; } ucs_strncpy_zero(perf->params.uct.md_name, component_attr.md_resources[md_index].md_name, UCT_MD_NAME_MAX); status = uct_md_open(uct_components[cmpt_index], component_attr.md_resources[md_index].md_name, md_config, &md); uct_config_release(md_config); if (status != UCS_OK) { goto out_release_components_list; } status = uct_md_query_tl_resources(md, &tl_resources, &num_tl_resources); if (status != UCS_OK) { uct_md_close(md); goto out_release_components_list; } for (tl_index = 0; tl_index < num_tl_resources; ++tl_index) { if (!strcmp(perf->params.uct.tl_name, tl_resources[tl_index].tl_name) && !strcmp(perf->params.uct.dev_name, tl_resources[tl_index].dev_name)) { uct_release_tl_resource_list(tl_resources); perf->uct.cmpt = uct_components[cmpt_index]; perf->uct.md = md; status = UCS_OK; goto out_release_components_list; } } uct_md_close(md); uct_release_tl_resource_list(tl_resources); } } ucs_error("Cannot use "UCT_PERF_TEST_PARAMS_FMT, UCT_PERF_TEST_PARAMS_ARG(&perf->params)); status = UCS_ERR_NO_DEVICE; out_release_components_list: uct_release_component_list(uct_components); out: return status; } void uct_perf_barrier(ucx_perf_context_t *perf) { rte_call(perf, barrier, (void(*)(void*))uct_worker_progress, (void*)perf->uct.worker); } void ucp_perf_barrier(ucx_perf_context_t *perf) { rte_call(perf, barrier, (void(*)(void*))ucp_worker_progress, #if _OPENMP (void*)perf->ucp.tctx[omp_get_thread_num()].perf.ucp.worker); #else (void*)perf->ucp.tctx[0].perf.ucp.worker); #endif } static ucs_status_t uct_perf_setup(ucx_perf_context_t *perf) { ucx_perf_params_t *params = &perf->params; uct_iface_config_t *iface_config; ucs_status_t status; uct_iface_params_t iface_params = { .field_mask = UCT_IFACE_PARAM_FIELD_OPEN_MODE | UCT_IFACE_PARAM_FIELD_STATS_ROOT | UCT_IFACE_PARAM_FIELD_RX_HEADROOM | UCT_IFACE_PARAM_FIELD_CPU_MASK, .open_mode = UCT_IFACE_OPEN_MODE_DEVICE, .mode.device.tl_name = params->uct.tl_name, .mode.device.dev_name = params->uct.dev_name, .stats_root = ucs_stats_get_root(), .rx_headroom = 0 }; UCS_CPU_ZERO(&iface_params.cpu_mask); status = ucs_async_context_init(&perf->uct.async, params->async_mode); if (status != UCS_OK) { goto out; } status = uct_worker_create(&perf->uct.async, params->thread_mode, &perf->uct.worker); if (status != UCS_OK) { goto out_cleanup_async; } status = uct_perf_create_md(perf); if (status != UCS_OK) { goto out_destroy_worker; } status = uct_md_iface_config_read(perf->uct.md, params->uct.tl_name, NULL, NULL, &iface_config); if (status != UCS_OK) { goto out_destroy_md; } status = uct_iface_open(perf->uct.md, perf->uct.worker, &iface_params, iface_config, &perf->uct.iface); uct_config_release(iface_config); if (status != UCS_OK) { ucs_error("Failed to open iface: %s", ucs_status_string(status)); goto out_destroy_md; } status = uct_perf_test_check_capabilities(params, perf->uct.iface, perf->uct.md); /* sync status across all processes */ status = ucp_perf_test_exchange_status(perf, status); if (status != UCS_OK) { goto out_iface_close; } status = uct_perf_test_alloc_mem(perf); if (status != UCS_OK) { goto out_iface_close; } /* Enable progress before `uct_iface_flush` and `uct_worker_progress` called * to give a chance to finish connection for some tranports (ib/ud, tcp). * They may return UCS_INPROGRESS from `uct_iface_flush` when connections are * in progress */ uct_iface_progress_enable(perf->uct.iface, UCT_PROGRESS_SEND | UCT_PROGRESS_RECV); status = uct_perf_test_setup_endpoints(perf); if (status != UCS_OK) { ucs_error("Failed to setup endpoints: %s", ucs_status_string(status)); goto out_free_mem; } return UCS_OK; out_free_mem: uct_perf_test_free_mem(perf); out_iface_close: uct_iface_close(perf->uct.iface); out_destroy_md: uct_md_close(perf->uct.md); out_destroy_worker: uct_worker_destroy(perf->uct.worker); out_cleanup_async: ucs_async_context_cleanup(&perf->uct.async); out: return status; } static void uct_perf_cleanup(ucx_perf_context_t *perf) { uct_perf_test_cleanup_endpoints(perf); uct_perf_test_free_mem(perf); uct_iface_close(perf->uct.iface); uct_md_close(perf->uct.md); uct_worker_destroy(perf->uct.worker); ucs_async_context_cleanup(&perf->uct.async); } static void ucp_perf_request_init(void *req) { ucp_perf_request_t *request = req; request->context = NULL; } static ucs_status_t ucp_perf_setup(ucx_perf_context_t *perf) { ucp_params_t ucp_params; ucp_worker_params_t worker_params; ucp_config_t *config; ucs_status_t status; unsigned i, thread_count; size_t message_size; ucp_params.field_mask = UCP_PARAM_FIELD_FEATURES | UCP_PARAM_FIELD_REQUEST_SIZE | UCP_PARAM_FIELD_REQUEST_INIT; ucp_params.features = 0; ucp_params.request_size = sizeof(ucp_perf_request_t); ucp_params.request_init = ucp_perf_request_init; if (perf->params.thread_count > 1) { /* when there is more than one thread, a ucp_worker would be created for * each. all of them will share the same ucp_context */ ucp_params.features |= UCP_PARAM_FIELD_MT_WORKERS_SHARED; ucp_params.mt_workers_shared = 1; } status = ucp_perf_test_fill_params(&perf->params, &ucp_params); if (status != UCS_OK) { goto err; } status = ucp_config_read(NULL, NULL, &config); if (status != UCS_OK) { goto err; } status = ucp_init(&ucp_params, config, &perf->ucp.context); ucp_config_release(config); if (status != UCS_OK) { goto err; } thread_count = perf->params.thread_count; message_size = ucx_perf_get_message_size(&perf->params); status = ucp_perf_test_alloc_mem(perf); if (status != UCS_OK) { ucs_warn("ucp test failed to allocate memory"); goto err_cleanup; } perf->ucp.tctx = calloc(thread_count, sizeof(ucx_perf_thread_context_t)); if (perf->ucp.tctx == NULL) { ucs_warn("ucp test failed to allocate memory for thread contexts"); goto err_free_mem; } worker_params.field_mask = UCP_WORKER_PARAM_FIELD_THREAD_MODE; worker_params.thread_mode = perf->params.thread_mode; for (i = 0; i < thread_count; i++) { perf->ucp.tctx[i].tid = i; perf->ucp.tctx[i].perf = *perf; /* Doctor the src and dst buffers to make them thread specific */ perf->ucp.tctx[i].perf.send_buffer = UCS_PTR_BYTE_OFFSET(perf->send_buffer, i * message_size); perf->ucp.tctx[i].perf.recv_buffer = UCS_PTR_BYTE_OFFSET(perf->recv_buffer, i * message_size); status = ucp_worker_create(perf->ucp.context, &worker_params, &perf->ucp.tctx[i].perf.ucp.worker); if (status != UCS_OK) { goto err_free_tctx_destroy_workers; } } status = ucp_perf_test_setup_endpoints(perf, ucp_params.features); if (status != UCS_OK) { if (perf->params.flags & UCX_PERF_TEST_FLAG_VERBOSE) { ucs_error("Failed to setup endpoints: %s", ucs_status_string(status)); } goto err_free_tctx_destroy_workers; } return UCS_OK; err_free_tctx_destroy_workers: ucp_perf_test_destroy_workers(perf); free(perf->ucp.tctx); err_free_mem: ucp_perf_test_free_mem(perf); err_cleanup: ucp_cleanup(perf->ucp.context); err: return status; } static void ucp_perf_cleanup(ucx_perf_context_t *perf) { ucp_perf_test_cleanup_endpoints(perf); ucp_perf_barrier(perf); ucp_perf_test_free_mem(perf); ucp_perf_test_destroy_workers(perf); free(perf->ucp.tctx); ucp_cleanup(perf->ucp.context); } static struct { ucs_status_t (*setup)(ucx_perf_context_t *perf); void (*cleanup)(ucx_perf_context_t *perf); ucs_status_t (*run)(ucx_perf_context_t *perf); void (*barrier)(ucx_perf_context_t *perf); } ucx_perf_funcs[] = { [UCX_PERF_API_UCT] = {uct_perf_setup, uct_perf_cleanup, uct_perf_test_dispatch, uct_perf_barrier}, [UCX_PERF_API_UCP] = {ucp_perf_setup, ucp_perf_cleanup, ucp_perf_test_dispatch, ucp_perf_barrier} }; static ucs_status_t ucx_perf_thread_spawn(ucx_perf_context_t *perf, ucx_perf_result_t* result); ucs_status_t ucx_perf_run(const ucx_perf_params_t *params, ucx_perf_result_t *result) { ucx_perf_context_t *perf; ucs_status_t status; ucx_perf_global_init(); if (params->command == UCX_PERF_CMD_LAST) { ucs_error("Test is not selected"); status = UCS_ERR_INVALID_PARAM; goto out; } if ((params->api != UCX_PERF_API_UCT) && (params->api != UCX_PERF_API_UCP)) { ucs_error("Invalid test API parameter (should be UCT or UCP)"); status = UCS_ERR_INVALID_PARAM; goto out; } perf = malloc(sizeof(*perf)); if (perf == NULL) { status = UCS_ERR_NO_MEMORY; goto out; } ucx_perf_test_init(perf, params); if (perf->allocator == NULL) { ucs_error("Unsupported memory types %s<->%s", ucs_memory_type_names[params->send_mem_type], ucs_memory_type_names[params->recv_mem_type]); status = UCS_ERR_UNSUPPORTED; goto out_free; } if ((params->api == UCX_PERF_API_UCT) && (perf->allocator->mem_type != UCS_MEMORY_TYPE_HOST)) { ucs_warn("UCT tests also copy 2-byte values from %s memory to " "%s memory, which may impact performance results", ucs_memory_type_names[perf->allocator->mem_type], ucs_memory_type_names[UCS_MEMORY_TYPE_HOST]); } status = perf->allocator->init(perf); if (status != UCS_OK) { goto out_free; } status = ucx_perf_funcs[params->api].setup(perf); if (status != UCS_OK) { goto out_free; } if (params->thread_count == 1) { if (params->api == UCX_PERF_API_UCP) { perf->ucp.worker = perf->ucp.tctx[0].perf.ucp.worker; perf->ucp.ep = perf->ucp.tctx[0].perf.ucp.ep; perf->ucp.remote_addr = perf->ucp.tctx[0].perf.ucp.remote_addr; perf->ucp.rkey = perf->ucp.tctx[0].perf.ucp.rkey; } if (params->warmup_iter > 0) { ucx_perf_set_warmup(perf, params); status = ucx_perf_funcs[params->api].run(perf); if (status != UCS_OK) { goto out_cleanup; } ucx_perf_funcs[params->api].barrier(perf); ucx_perf_test_prepare_new_run(perf, params); } /* Run test */ status = ucx_perf_funcs[params->api].run(perf); ucx_perf_funcs[params->api].barrier(perf); if (status == UCS_OK) { ucx_perf_calc_result(perf, result); rte_call(perf, report, result, perf->params.report_arg, 1, 0); } } else { status = ucx_perf_thread_spawn(perf, result); } out_cleanup: ucx_perf_funcs[params->api].cleanup(perf); out_free: free(perf); out: return status; } #if _OPENMP static ucs_status_t ucx_perf_thread_run_test(void* arg) { ucx_perf_thread_context_t* tctx = (ucx_perf_thread_context_t*) arg; /* a single thread context */ ucx_perf_result_t* result = &tctx->result; ucx_perf_context_t* perf = &tctx->perf; ucx_perf_params_t* params = &perf->params; ucs_status_t status; if (params->warmup_iter > 0) { ucx_perf_set_warmup(perf, params); status = ucx_perf_funcs[params->api].run(perf); ucx_perf_funcs[params->api].barrier(perf); if (UCS_OK != status) { goto out; } ucx_perf_test_prepare_new_run(perf, params); } /* Run test */ #pragma omp barrier status = ucx_perf_funcs[params->api].run(perf); ucx_perf_funcs[params->api].barrier(perf); if (UCS_OK != status) { goto out; } ucx_perf_calc_result(perf, result); out: return status; } static void ucx_perf_thread_report_aggregated_results(ucx_perf_context_t *perf) { ucx_perf_thread_context_t* tctx = perf->ucp.tctx; /* all the thread contexts on perf */ unsigned i, thread_count = perf->params.thread_count; double lat_sum_total_avegare = 0.0; ucx_perf_result_t agg_result; agg_result.iters = tctx[0].result.iters; agg_result.bytes = tctx[0].result.bytes; agg_result.elapsed_time = tctx[0].result.elapsed_time; agg_result.bandwidth.total_average = 0.0; agg_result.bandwidth.typical = 0.0; /* Undefined since used only for latency calculations */ agg_result.latency.total_average = 0.0; agg_result.msgrate.total_average = 0.0; agg_result.msgrate.typical = 0.0; /* Undefined since used only for latency calculations */ /* when running with multiple threads, the moment average value is * undefined since we don't capture the values of the last iteration */ agg_result.msgrate.moment_average = 0.0; agg_result.bandwidth.moment_average = 0.0; agg_result.latency.moment_average = 0.0; agg_result.latency.typical = 0.0; /* in case of multiple threads, we have to aggregate the results so that the * final output of the result would show the performance numbers that were * collected from all the threads. * BW and message rate values will be the sum of their values from all * the threads, while the latency value is the average latency from the * threads. */ for (i = 0; i < thread_count; i++) { agg_result.bandwidth.total_average += tctx[i].result.bandwidth.total_average; agg_result.msgrate.total_average += tctx[i].result.msgrate.total_average; lat_sum_total_avegare += tctx[i].result.latency.total_average; } agg_result.latency.total_average = lat_sum_total_avegare / thread_count; rte_call(perf, report, &agg_result, perf->params.report_arg, 1, 1); } static ucs_status_t ucx_perf_thread_spawn(ucx_perf_context_t *perf, ucx_perf_result_t* result) { ucx_perf_thread_context_t* tctx = perf->ucp.tctx; /* all the thread contexts on perf */ int ti, thread_count = perf->params.thread_count; ucs_status_t* statuses; ucs_status_t status; omp_set_num_threads(thread_count); statuses = calloc(thread_count, sizeof(ucs_status_t)); if (statuses == NULL) { status = UCS_ERR_NO_MEMORY; goto out; } #pragma omp parallel private(ti) { ti = omp_get_thread_num(); tctx[ti].status = ucx_perf_thread_run_test((void*)&tctx[ti]); } status = UCS_OK; for (ti = 0; ti < thread_count; ti++) { if (UCS_OK != tctx[ti].status) { ucs_error("Thread %d failed to run test: %s", tctx[ti].tid, ucs_status_string(tctx[ti].status)); status = tctx[ti].status; } } ucx_perf_thread_report_aggregated_results(perf); free(statuses); out: return status; } #else static ucs_status_t ucx_perf_thread_spawn(ucx_perf_context_t *perf, ucx_perf_result_t* result) { ucs_error("Invalid test parameter (thread mode requested without OpenMP capabilities)"); return UCS_ERR_INVALID_PARAM; } #endif /* _OPENMP */ void ucx_perf_global_init() { static ucx_perf_allocator_t host_allocator = { .mem_type = UCS_MEMORY_TYPE_HOST, .init = ucs_empty_function_return_success, .ucp_alloc = ucp_perf_test_alloc_host, .ucp_free = ucp_perf_test_free_host, .uct_alloc = uct_perf_test_alloc_host, .uct_free = uct_perf_test_free_host, .memcpy = ucx_perf_test_memcpy_host, .memset = memset }; UCS_MODULE_FRAMEWORK_DECLARE(ucx_perftest); ucx_perf_mem_type_allocators[UCS_MEMORY_TYPE_HOST] = &host_allocator; /* FIXME Memtype allocator modules must be loaded to global scope, otherwise * alloc hooks, which are using dlsym() to get pointer to original function, * do not work. Need to use bistro for memtype hooks to fix it. */ UCS_MODULE_FRAMEWORK_LOAD(ucx_perftest, UCS_MODULE_LOAD_FLAG_GLOBAL); }
Tutorial.h
//================================================================================================= /*! // \file blaze/Tutorial.h // \brief Tutorial of the Blaze library // // Copyright (C) 2012-2018 Klaus Iglberger - All Rights Reserved // // This file is part of the Blaze library. You can redistribute it and/or modify it under // the terms of the New (Revised) BSD License. Redistribution and use in source and binary // forms, with or without modification, are permitted provided that the following conditions // are met: // // 1. Redistributions of source code must retain the above copyright notice, this list of // conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, this list // of conditions and the following disclaimer in the documentation and/or other materials // provided with the distribution. // 3. Neither the names of the Blaze development group nor the names of its contributors // may be used to endorse or promote products derived from this software without specific // prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT // SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR // BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN // ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH // DAMAGE. */ //================================================================================================= #ifndef _BLAZE_TUTORIAL_H_ #define _BLAZE_TUTORIAL_H_ //================================================================================================= // // BLAZE TUTORIAL // //================================================================================================= //**Mainpage*************************************************************************************** /*!\mainpage // // \image html blaze300x150.jpg // // This is the API for the \b Blaze high performance C++ math library. It gives a complete // overview of the individual features and sublibraries of \b Blaze. To get a first impression // on \b Blaze, the short \ref getting_started tutorial is a good place to start. Afterwards, // the following long tutorial covers the most important aspects of the \b Blaze math library. // The tabs at the top of the page allow a direct access to the individual modules, namespaces, // classes, and files of the \b Blaze library.\n\n // // \section table_of_content Table of Contents // // <ul> // <li> \ref configuration_and_installation </li> // <li> \ref getting_started </li> // <li> \ref vectors // <ul> // <li> \ref vector_types </li> // <li> \ref vector_operations </li> // </ul> // </li> // <li> \ref matrices // <ul> // <li> \ref matrix_types </li> // <li> \ref matrix_operations </li> // </ul> // </li> // <li> \ref adaptors // <ul> // <li> \ref adaptors_symmetric_matrices </li> // <li> \ref adaptors_hermitian_matrices </li> // <li> \ref adaptors_triangular_matrices </li> // </ul> // </li> // <li> \ref views // <ul> // <li> \ref views_subvectors </li> // <li> \ref views_element_selections </li> // <li> \ref views_submatrices </li> // <li> \ref views_rows </li> // <li> \ref views_row_selections </li> // <li> \ref views_columns </li> // <li> \ref views_column_selections </li> // <li> \ref views_bands </li> // </ul> // </li> // <li> \ref arithmetic_operations // <ul> // <li> \ref addition </li> // <li> \ref subtraction </li> // <li> \ref scalar_multiplication </li> // <li> \ref vector_vector_multiplication // <ul> // <li> \ref componentwise_multiplication </li> // <li> \ref inner_product </li> // <li> \ref outer_product </li> // <li> \ref cross_product </li> // </ul> // </li> // <li> \ref vector_vector_division </li> // <li> \ref matrix_vector_multiplication </li> // <li> \ref matrix_matrix_multiplication // <ul> // <li> \ref schur_product </li> // <li> \ref matrix_product </li> // </ul> // </li> // </ul> // </li> // <li> \ref shared_memory_parallelization // <ul> // <li> \ref hpx_parallelization </li> // <li> \ref cpp_threads_parallelization </li> // <li> \ref boost_threads_parallelization </li> // <li> \ref openmp_parallelization </li> // <li> \ref serial_execution </li> // </ul> // </li> // <li> \ref serialization // <ul> // <li> \ref vector_serialization </li> // <li> \ref matrix_serialization </li> // </ul> // </li> // <li> \ref customization // <ul> // <li> \ref configuration_files </li> // <li> \ref vector_and_matrix_customization // <ul> // <li> \ref custom_data_members </li> // <li> \ref custom_operations </li> // <li> \ref custom_data_types </li> // </ul> // </li> // <li> \ref error_reporting_customization </li> // </ul> // </li> // <li> \ref blas_functions </li> // <li> \ref lapack_functions </li> // <li> \ref block_vectors_and_matrices </li> // <li> \ref intra_statement_optimization </li> // <li> \ref faq </li> // <li> \ref issue_creation_guidelines </li> // <li> \ref blaze_references </li> // </ul> */ //************************************************************************************************* //**Configuration and Installation***************************************************************** /*!\page configuration_and_installation Configuration and Installation // // \tableofcontents // // // Since \b Blaze is a header-only library, setting up the \b Blaze library on a particular system // is a fairly easy two step process. In the following, this two step process is explained in // detail, preceded only by a short summary of the requirements. // // // \n \section requirements Requirements // <hr> // // For maximum performance the \b Blaze library expects you to have a BLAS library installed // (<a href="http://software.intel.com/en-us/articles/intel-mkl/">Intel MKL</a>, // <a href="http://developer.amd.com/libraries/acml/">ACML</a>, // <a href="http://math-atlas.sourceforge.net">Atlas</a>, // <a href="http://www.tacc.utexas.edu/tacc-projects/gotoblas2">Goto</a>, ...). If you don't // have a BLAS library installed on your system, \b Blaze will still work and will not be reduced // in functionality, but performance may be limited. Thus it is strongly recommended to install a // BLAS library. // // Additionally, for computing the determinant of a dense matrix, for the decomposition of dense // matrices, for the dense matrix inversion, and for the computation of eigenvalues and singular // values \b Blaze requires <a href="https://en.wikipedia.org/wiki/LAPACK">LAPACK</a>. When either // of these features is used it is necessary to link the LAPACK library to the final executable. // If no LAPACK library is available the use of these features will result in a linker error. // // Furthermore, it is possible to use Boost threads to run numeric operations in parallel. In this // case the Boost library is required to be installed on your system. It is recommended to use the // newest Boost library available, but \b Blaze requires at minimum the Boost version 1.54.0. If // you don't have Boost installed on your system, you can download it for free from // <a href="http://www.boost.org">www.boost.org</a>. // // // \n \section step_1_installation Step 1: Installation // <hr> // // \subsection step_1_cmake Installation via CMake // // The first step is the installation of the \b Blaze header files. The most convenient way // to do this is via <a href="https://cmake.org">CMake</a>. Linux and macOS users can use the // following two lines to copy the \b Blaze headers in the <tt>./blaze</tt> subdirectory to // the directory \c ${CMAKE_INSTALL_PREFIX}/include and the package configuration files to // \c ${CMAKE_INSTALL_PREFIX}/share/blaze/cmake. \code cmake -DCMAKE_INSTALL_PREFIX=/usr/local/ sudo make install \endcode // Windows users can do the same via the cmake-gui. Alternatively, it is possible to include // \b Blaze by adding the following lines in any \c CMakeLists.txt file: \code find_package( blaze ) if( blaze_FOUND ) add_library( blaze_target INTERFACE ) target_link_libraries( blaze_target INTERFACE blaze::blaze ) endif() \endcode // \n \subsection step_1_vcpkg Installation via the VC++ Packaging Tool // // An alternate way to install \b Blaze for Windows users is Microsoft's // <a href="https://github.com/Microsoft/vcpkg">VC++ Packaging Tool (vcpkg)</a>. \b Blaze can // be installed via the command line: \code C:\src\vcpkg> .\vcpkg install blaze \endcode // The tool automatically downloads the latest \b Blaze release and copies the header files to // the common include directory. Please note that since \b Blaze is a header-only library the // attempt to install any static or dynamic library will fail! // // \n \subsection step_1_installation_unix Manual Installation on Linux/macOS // // Since \b Blaze only consists of header files, the <tt>./blaze</tt> subdirectory can be simply // copied to a standard include directory (note that this requires root privileges): \code cp -r ./blaze /usr/local/include \endcode // Alternatively, on Unix-based machines (which includes Linux and Mac OS X) the // \c CPLUS_INCLUDE_PATH environment variable can be set. The specified directory will be // searched after any directories specified on the command line with the option \c -I and // before the standard default directories (such as \c /usr/local/include and \c /usr/include). // Assuming a user named 'Jon', the environment variable can be set as follows: \code CPLUS_INCLUDE_PATH=/usr/home/jon/blaze export CPLUS_INCLUDE_PATH \endcode // Last but not least, the <tt>./blaze</tt> subdirectory can be explicitly specified on the // command line. The following example demonstrates this by means of the GNU C++ compiler: \code g++ -I/usr/home/jon/blaze -o BlazeTest BlazeTest.cpp \endcode // \n \subsection step_1_installation_windows Manual Installation on Windows // // Windows doesn't have a standard include directory. Therefore the \b Blaze header files can be // copied to any other directory or simply left in the default \b Blaze directory. However, the // chosen include directory has to be explicitly specified as include path. In Visual Studio, // this is done via the project property pages, configuration properties, C/C++, General settings. // Here the additional include directories can be specified. // // // \n \section step_2_configuration Step 2: Configuration // <hr> // // The second step is the configuration and customization of the \b Blaze library. Many aspects // of \b Blaze can be adapted to specific requirements, environments and architectures. The most // convenient way to configure \b Blaze is to modify the headers in the <tt>./blaze/config/</tt> // subdirectory by means of <a href="https://cmake.org">CMake</a>. Alternatively these header // files can be customized manually. In both cases, however, the files are modified. If this is // not an option it is possible to configure \b Blaze via the command line (see the tutorial // section \ref configuration_files or the documentation in the configuration files). // // Since the default settings are reasonable for most systems this step can also be skipped. // However, in order to achieve maximum performance a customization of at least the following // configuration files is required: // // - <b><tt><blaze/config/BLAS.h></tt></b>: Via this configuration file \b Blaze can be enabled // to use a third-party BLAS library for several basic linear algebra functions (such as for // instance dense matrix multiplications). In case no BLAS library is used, all linear algebra // functions use the default implementations of the \b Blaze library and therefore BLAS is not a // requirement for the compilation process. However, please note that performance may be limited. // - <b><tt><blaze/config/CacheSize.h></tt></b>: This file contains the hardware specific cache // settings. \b Blaze uses this information to optimize its cache usage. For maximum performance // it is recommended to adapt these setting to a specific target architecture. // - <b><tt><blaze/config/Thresholds.h></tt></b>: This file contains all thresholds for the // customization of the \b Blaze compute kernels. In order to tune the kernels for a specific // architecture and to maximize performance it can be necessary to adjust the thresholds, // especially for a parallel execution (see \ref shared_memory_parallelization). // // For an overview of other customization options and more details, please see the section // \ref configuration_files. // // // \n \section blaze_version Blaze Version // <hr> // // The current major and minor version number of the \b Blaze library can be found in the // <b><tt><blaze/system/Version.h></tt></b> header file. It is automatically included via the // <b><tt><blaze/Blaze.h></tt></b> header file. The file contains the two following macros, // which can for instance be used for conditional compilation: \code #define BLAZE_MAJOR_VERSION 3 #define BLAZE_MINOR_VERSION 2 \endcode // \n Next: \ref getting_started */ //************************************************************************************************* //**Getting Started******************************************************************************** /*!\page getting_started Getting Started // // This short tutorial serves the purpose to give a quick overview of the way mathematical // expressions have to be formulated in \b Blaze. Starting with \ref vector_types, the following // long tutorial covers the most important aspects of the \b Blaze math library. // // // \n \section getting_started_vector_example A First Example // // \b Blaze is written such that using mathematical expressions is as close to mathematical // textbooks as possible and therefore as intuitive as possible. In nearly all cases the seemingly // easiest solution is the right solution and most users experience no problems when trying to // use \b Blaze in the most natural way. The following example gives a first impression of the // formulation of a vector addition in \b Blaze: \code #include <iostream> #include <blaze/Math.h> using blaze::StaticVector; using blaze::DynamicVector; // Instantiation of a static 3D column vector. The vector is directly initialized as // ( 4 -2 5 ) StaticVector<int,3UL> a{ 4, -2, 5 }; // Instantiation of a dynamic 3D column vector. Via the subscript operator the values are set to // ( 2 5 -3 ) DynamicVector<int> b( 3UL ); b[0] = 2; b[1] = 5; b[2] = -3; // Adding the vectors a and b DynamicVector<int> c = a + b; // Printing the result of the vector addition std::cout << "c =\n" << c << "\n"; \endcode // Note that the entire \b Blaze math library can be included via the \c blaze/Math.h header // file. Alternatively, the entire \b Blaze library, including both the math and the entire // utility module, can be included via the \c blaze/Blaze.h header file. Also note that all // classes and functions of \b Blaze are contained in the blaze namespace.\n\n // // Assuming that this program resides in a source file called \c FirstExample.cpp, it can be // compiled for instance via the GNU C++ compiler: \code g++ -ansi -O3 -DNDEBUG -mavx -o FirstExample FirstExample.cpp \endcode // Note the definition of the \c NDEBUG preprocessor symbol. In order to achieve maximum // performance, it is necessary to compile the program in release mode, which deactivates // all debugging functionality inside \b Blaze. It is also strongly recommended to specify // the available architecture specific instruction set (as for instance the AVX instruction // set, which if available can be activated via the \c -mavx flag). This allows \b Blaze // to optimize computations via vectorization.\n\n // // When running the resulting executable \c FirstExample, the output of the last line of // this small program is \code c = 6 3 2 \endcode // \n \section getting_started_matrix_example An Example Involving Matrices // // Similarly easy and intuitive are expressions involving matrices: \code #include <blaze/Math.h> using namespace blaze; // Instantiating a dynamic 3D column vector DynamicVector<int> x{ 4, -1, 3 }; // Instantiating a dynamic 2x3 row-major matrix, preinitialized with 0. Via the function call // operator three values of the matrix are explicitly set to get the matrix // ( 1 0 4 ) // ( 0 -2 0 ) DynamicMatrix<int> A( 2UL, 3UL, 0 ); A(0,0) = 1; A(0,2) = 4; A(1,1) = -2; // Performing a matrix/vector multiplication DynamicVector<int> y = A * x; // Printing the resulting vector std::cout << "y =\n" << y << "\n"; // Instantiating a static column-major matrix. The matrix is directly initialized as // ( 3 -1 ) // ( 0 2 ) // ( -1 0 ) StaticMatrix<int,3UL,2UL,columnMajor> B{ { 3, -1 }, { 0, 2 }, { -1, 0 } }; // Performing a matrix/matrix multiplication DynamicMatrix<int> C = A * B; // Printing the resulting matrix std::cout << "C =\n" << C << "\n"; \endcode // The output of this program is \code y = 16 2 C = ( -1 -1 ) ( 0 -4 ) \endcode // \n \section getting_started_complex_example A Complex Example // // The following example is much more sophisticated. It shows the implementation of the Conjugate // Gradient (CG) algorithm (http://en.wikipedia.org/wiki/Conjugate_gradient) by means of the // \b Blaze library: // // \image html cg.jpg // // In this example it is not important to understand the CG algorithm itself, but to see the // advantage of the API of the \b Blaze library. In the \b Blaze implementation we will use a // sparse matrix/dense vector multiplication for a 2D Poisson equation using \f$ N \times N \f$ // unknowns. It becomes apparent that the core of the algorithm is very close to the mathematical // formulation and therefore has huge advantages in terms of readability and maintainability, // while the performance of the code is close to the expected theoretical peak performance: \code const size_t NN( N*N ); blaze::CompressedMatrix<double,rowMajor> A( NN, NN ); blaze::DynamicVector<double,columnVector> x( NN, 1.0 ), b( NN, 0.0 ), r( NN ), p( NN ), Ap( NN ); double alpha, beta, delta; // ... Initializing the sparse matrix A // Performing the CG algorithm r = b - A * x; p = r; delta = (r,r); for( size_t iteration=0UL; iteration<iterations; ++iteration ) { Ap = A * p; alpha = delta / (p,Ap); x += alpha * p; r -= alpha * Ap; beta = (r,r); if( std::sqrt( beta ) < 1E-8 ) break; p = r + ( beta / delta ) * p; delta = beta; } \endcode // \n Hopefully this short tutorial gives a good first impression of how mathematical expressions // are formulated with \b Blaze. The following long tutorial, starting with \ref vector_types, // will cover all aspects of the \b Blaze math library, i.e. it will introduce all vector and // matrix types, all possible operations on vectors and matrices, and of course all possible // mathematical expressions. // // \n Previous: \ref configuration_and_installation &nbsp; &nbsp; Next: \ref vectors */ //************************************************************************************************* //**Vectors**************************************************************************************** /*!\page vectors Vectors // // \tableofcontents // // // \n \section vectors_general General Concepts // <hr> // // The \b Blaze library currently offers four dense vector types (\ref vector_types_static_vector, // \ref vector_types_dynamic_vector, \ref vector_types_hybrid_vector, and \ref vector_types_custom_vector) // and one sparse vector type (\ref vector_types_compressed_vector). All vectors can be specified // as either column vectors or row vectors: \code using blaze::DynamicVector; using blaze::columnVector; using blaze::rowVector; // Setup of the 3-dimensional dense column vector // // ( 1 ) // ( 2 ) // ( 3 ) // DynamicVector<int,columnVector> a{ 1, 2, 3 }; // Setup of the 3-dimensional dense row vector // // ( 4 5 6 ) // DynamicVector<int,rowVector> b{ 4, 5, 6 }; \endcode // Per default, all vectors in \b Blaze are column vectors: \code // Instantiation of a 3-dimensional column vector blaze::DynamicVector<int> c( 3UL ); \endcode // \n \section vectors_details Vector Details // <hr> // // - \ref vector_types // - \ref vector_operations // // // \n \section vectors_examples Examples // <hr> \code using blaze::StaticVector; using blaze::DynamicVector; using blaze::CompressedVector; using blaze::rowVector; using blaze::columnVector; StaticVector<int,6UL> a; // Instantiation of a 6-dimensional static column vector CompressedVector<int,rowVector> b; // Instantiation of a compressed row vector DynamicVector<int,columnVector> c; // Instantiation of a dynamic column vector // ... Resizing and initialization c = a + trans( b ); \endcode // \n Previous: \ref getting_started &nbsp; &nbsp; Next: \ref vector_types */ //************************************************************************************************* //**Vector Types*********************************************************************************** /*!\page vector_types Vector Types // // \tableofcontents // // // \n \section vector_types_static_vector StaticVector // <hr> // // The blaze::StaticVector class template is the representation of a fixed size vector with // statically allocated elements of arbitrary type. It can be included via the header file \code #include <blaze/math/StaticVector.h> \endcode // The type of the elements, the number of elements, and the transpose flag of the vector can // be specified via the three template parameters: \code template< typename Type, size_t N, bool TF > class StaticVector; \endcode // - \c Type: specifies the type of the vector elements. StaticVector can be used with any // non-cv-qualified, non-reference, non-pointer element type. // - \c N : specifies the total number of vector elements. It is expected that StaticVector is // only used for tiny and small vectors. // - \c TF : specifies whether the vector is a row vector (\c blaze::rowVector) or a column // vector (\c blaze::columnVector). The default value is \c blaze::columnVector. // // The blaze::StaticVector is perfectly suited for small to medium vectors whose size is known at // compile time: \code // Definition of a 3-dimensional integral column vector blaze::StaticVector<int,3UL> a; // Definition of a 4-dimensional single precision column vector blaze::StaticVector<float,4UL,blaze::columnVector> b; // Definition of a 6-dimensional double precision row vector blaze::StaticVector<double,6UL,blaze::rowVector> c; \endcode // \n \section vector_types_dynamic_vector DynamicVector // <hr> // // The blaze::DynamicVector class template is the representation of an arbitrary sized vector // with dynamically allocated elements of arbitrary type. It can be included via the header file \code #include <blaze/math/DynamicVector.h> \endcode // The type of the elements and the transpose flag of the vector can be specified via the two // template parameters: \code template< typename Type, bool TF > class DynamicVector; \endcode // - \c Type: specifies the type of the vector elements. DynamicVector can be used with any // non-cv-qualified, non-reference, non-pointer element type. // - \c TF : specifies whether the vector is a row vector (\c blaze::rowVector) or a column // vector (\c blaze::columnVector). The default value is \c blaze::columnVector. // // The blaze::DynamicVector is the default choice for all kinds of dense vectors and the best // choice for medium to large vectors. Its size can be modified at runtime: \code // Definition of a 3-dimensional integral column vector blaze::DynamicVector<int> a( 3UL ); // Definition of a 4-dimensional single precision column vector blaze::DynamicVector<float,blaze::columnVector> b( 4UL ); // Definition of a double precision row vector with size 0 blaze::DynamicVector<double,blaze::rowVector> c; \endcode // \n \section vector_types_hybrid_vector HybridVector // <hr> // // The blaze::HybridVector class template combines the advantages of the blaze::StaticVector and // the blaze::DynamicVector class templates. It represents a fixed size vector with statically // allocated elements, but still can be dynamically resized (within the bounds of the available // memory). It can be included via the header file \code #include <blaze/math/HybridVector.h> \endcode // The type of the elements, the number of elements, and the transpose flag of the vector can // be specified via the three template parameters: \code template< typename Type, size_t N, bool TF > class HybridVector; \endcode // - \c Type: specifies the type of the vector elements. HybridVector can be used with any // non-cv-qualified, non-reference, non-pointer element type. // - \c N : specifies the maximum number of vector elements. It is expected that HybridVector // is only used for tiny and small vectors. // - \c TF : specifies whether the vector is a row vector (\c blaze::rowVector) or a column // vector (\c blaze::columnVector). The default value is \c blaze::columnVector. // // The blaze::HybridVector is a suitable choice for small to medium vectors, whose size is not // known at compile time or not fixed at runtime, but whose maximum size is known at compile // time: \code // Definition of a 3-dimensional integral column vector with a maximum size of 6 blaze::HybridVector<int,6UL> a( 3UL ); // Definition of a 4-dimensional single precision column vector with a maximum size of 16 blaze::HybridVector<float,16UL,blaze::columnVector> b( 4UL ); // Definition of a double precision row vector with size 0 and a maximum size of 6 blaze::HybridVector<double,6UL,blaze::rowVector> c; \endcode // \n \section vector_types_custom_vector CustomVector // <hr> // // The blaze::CustomVector class template provides the functionality to represent an external // array of elements of arbitrary type and a fixed size as a native \b Blaze dense vector data // structure. Thus in contrast to all other dense vector types a custom vector does not perform // any kind of memory allocation by itself, but it is provided with an existing array of element // during construction. A custom vector can therefore be considered an alias to the existing // array. It can be included via the header file \code #include <blaze/math/CustomVector.h> \endcode // The type of the elements, the properties of the given array of elements and the transpose // flag of the vector can be specified via the following four template parameters: \code template< typename Type, bool AF, bool PF, bool TF > class CustomVector; \endcode // - Type: specifies the type of the vector elements. blaze::CustomVector can be used with // any non-cv-qualified, non-reference, non-pointer element type. // - AF : specifies whether the represented, external arrays are properly aligned with // respect to the available instruction set (SSE, AVX, ...) or not. // - PF : specified whether the represented, external arrays are properly padded with // respect to the available instruction set (SSE, AVX, ...) or not. // - TF : specifies whether the vector is a row vector (\c blaze::rowVector) or a column // vector (\c blaze::columnVector). The default value is \c blaze::columnVector. // // The blaze::CustomVector is the right choice if any external array needs to be represented as // a \b Blaze dense vector data structure or if a custom memory allocation strategy needs to be // realized: \code using blaze::CustomVector; using blaze::Deallocate; using blaze::aligned; using blaze::unaligned; using blaze::padded; using blaze::unpadded; // Definition of an unmanaged custom column vector for unaligned, unpadded integer arrays using UnalignedUnpadded = CustomVector<int,unaligned,unpadded,columnVector>; std::vector<int> vec( 7UL ); UnalignedUnpadded a( &vec[0], 7UL ); // Definition of a managed custom column vector for unaligned but padded 'float' arrays using UnalignedPadded = CustomVector<float,unaligned,padded,columnVector>; std::unique_ptr<float[]> memory1( new float[16] ); UnalignedPadded b( memory1.get(), 9UL, 16UL ); // Definition of a managed custom row vector for aligned, unpadded 'double' arrays using AlignedUnpadded = CustomVector<double,aligned,unpadded,rowVector>; std::unique_ptr<double[],Deallocate> memory2( blaze::allocate<double>( 7UL ) ); AlignedUnpadded c( memory2.get(), 7UL ); // Definition of a managed custom row vector for aligned, padded 'complex<double>' arrays using cplx = complex<double>; using AlignedPadded = CustomVector<cplx,aligned,padded,columnVector>; std::unique_ptr<cplx[],Deallocate> memory3( allocate<cplx>( 8UL ) ); AlignedPadded d( memory3.get(), 5UL, 8UL ); \endcode // In comparison with the remaining \b Blaze dense vector types blaze::CustomVector has several // special characteristics. All of these result from the fact that a custom vector is not // performing any kind of memory allocation, but instead is given an existing array of elements. // The following sections discuss all of these characteristics: // // -# <b>\ref vector_types_custom_vector_memory_management</b> // -# <b>\ref vector_types_custom_vector_copy_operations</b> // -# <b>\ref vector_types_custom_vector_alignment</b> // -# <b>\ref vector_types_custom_vector_padding</b> // // \n \subsection vector_types_custom_vector_memory_management Memory Management // // The blaze::CustomVector class template acts as an adaptor for an existing array of elements. As // such it provides everything that is required to use the array just like a native \b Blaze dense // vector data structure. However, this flexibility comes with the price that the user of a custom // vector is responsible for the resource management. // // The following examples give an impression of several possible types of custom vectors: \code using blaze::CustomVector; using blaze::Deallocate; using blaze::allocate; using blaze::aligned; using blaze::unaligned; using blaze::padded; using blaze::unpadded; // Definition of a 3-dimensional custom vector with unaligned, unpadded and externally // managed integer array. Note that the std::vector must be guaranteed to outlive the // custom vector! std::vector<int> vec( 3UL ); CustomVector<int,unaligned,unpadded> a( &vec[0], 3UL ); // Definition of a custom vector with size 3 and capacity 16 with aligned, padded and // externally managed integer array. Note that the std::unique_ptr must be guaranteed // to outlive the custom vector! std::unique_ptr<int[],Deallocate> memory( allocate<int>( 16UL ) ); CustomVector<int,aligned,padded> b( memory.get(), 3UL, 16UL ); \endcode // \n \subsection vector_types_custom_vector_copy_operations Copy Operations // // As with all dense vectors it is possible to copy construct a custom vector: \code using blaze::CustomVector; using blaze::unaligned; using blaze::unpadded; using CustomType = CustomVector<int,unaligned,unpadded>; std::vector<int> vec( 5UL, 10 ); // Vector of 5 integers of the value 10 CustomType a( &vec[0], 5UL ); // Represent the std::vector as Blaze dense vector a[1] = 20; // Also modifies the std::vector CustomType b( a ); // Creating a copy of vector a b[2] = 20; // Also affects vector a and the std::vector \endcode // It is important to note that a custom vector acts as a reference to the specified array. Thus // the result of the copy constructor is a new custom vector that is referencing and representing // the same array as the original custom vector. // // In contrast to copy construction, just as with references, copy assignment does not change // which array is referenced by the custom vector, but modifies the values of the array: \code std::vector<int> vec2( 5UL, 4 ); // Vector of 5 integers of the value 4 CustomType c( &vec2[0], 5UL ); // Represent the std::vector as Blaze dense vector a = c; // Copy assignment: Set all values of vector a and b to 4. \endcode // \n \subsection vector_types_custom_vector_alignment Alignment // // In case the custom vector is specified as \c aligned the passed array must be guaranteed to // be aligned according to the requirements of the used instruction set (SSE, AVX, ...). For // instance, if AVX is active an array of integers must be 32-bit aligned: \code using blaze::CustomVector; using blaze::Deallocate; using blaze::allocate; using blaze::aligned; using blaze::unpadded; // Allocation of 32-bit aligned memory std::unique_ptr<int[],Deallocate> memory( allocate<int>( 5UL ) ); CustomVector<int,aligned,unpadded> a( memory.get(), 5UL ); \endcode // In case the alignment requirements are violated, a \c std::invalid_argument exception is // thrown. // // \n \subsection vector_types_custom_vector_padding Padding // // Adding padding elements to the end of an array can have a significant impact on the performance. // For instance, assuming that AVX is available, then two aligned, padded, 3-dimensional vectors // of double precision values can be added via a single SIMD addition operation: \code using blaze::CustomVector; using blaze::Deallocate; using blaze::allocate; using blaze::aligned; using blaze::padded; using CustomType = CustomVector<double,aligned,padded>; std::unique_ptr<int[],Deallocate> memory1( allocate<double>( 4UL ) ); std::unique_ptr<int[],Deallocate> memory2( allocate<double>( 4UL ) ); std::unique_ptr<int[],Deallocate> memory3( allocate<double>( 4UL ) ); // Creating padded custom vectors of size 3 and a capacity of 4 CustomType a( memory1.get(), 3UL, 4UL ); CustomType b( memory2.get(), 3UL, 4UL ); CustomType c( memory3.get(), 3UL, 4UL ); // ... Initialization c = a + b; // AVX-based vector addition \endcode // In this example, maximum performance is possible. However, in case no padding elements are // inserted, a scalar addition has to be used: \code using blaze::CustomVector; using blaze::Deallocate; using blaze::allocate; using blaze::aligned; using blaze::unpadded; using CustomType = CustomVector<double,aligned,unpadded>; std::unique_ptr<int[],Deallocate> memory1( allocate<double>( 3UL ) ); std::unique_ptr<int[],Deallocate> memory2( allocate<double>( 3UL ) ); std::unique_ptr<int[],Deallocate> memory3( allocate<double>( 3UL ) ); // Creating unpadded custom vector of size 3 CustomType a( allocate<double>( 3UL ), 3UL ); CustomType b( allocate<double>( 3UL ), 3UL ); CustomType c( allocate<double>( 3UL ), 3UL ); // ... Initialization c = a + b; // Scalar vector addition \endcode // Note the different number of constructor parameters for unpadded and padded custom vectors: // In contrast to unpadded vectors, where during the construction only the size of the array // has to be specified, during the construction of a padded custom vector it is additionally // necessary to explicitly specify the capacity of the array. // // The number of padding elements is required to be sufficient with respect to the available // instruction set: In case of an aligned padded custom vector the added padding elements must // guarantee that the capacity is greater or equal than the size and a multiple of the SIMD vector // width. In case of unaligned padded vectors the number of padding elements can be greater or // equal the number of padding elements of an aligned padded custom vector. In case the padding // is insufficient with respect to the available instruction set, a \a std::invalid_argument // exception is thrown. // // Please also note that \b Blaze will zero initialize the padding elements in order to achieve // maximum performance! // // // \n \section vector_types_compressed_vector CompressedVector // <hr> // // The blaze::CompressedVector class is the representation of an arbitrarily sized sparse // vector, which stores only non-zero elements of arbitrary type. It can be included via the // header file \code #include <blaze/math/CompressedVector.h> \endcode // The type of the elements and the transpose flag of the vector can be specified via the two // template parameters: \code template< typename Type, bool TF > class CompressedVector; \endcode // - \c Type: specifies the type of the vector elements. CompressedVector can be used with any // non-cv-qualified, non-reference, non-pointer element type. // - \c TF : specifies whether the vector is a row vector (\c blaze::rowVector) or a column // vector (\c blaze::columnVector). The default value is \c blaze::columnVector. // // The blaze::CompressedVector is the right choice for all kinds of sparse vectors: \code // Definition of a 3-dimensional integral column vector blaze::CompressedVector<int> a( 3UL ); // Definition of a 4-dimensional single precision column vector with capacity for 3 non-zero elements blaze::CompressedVector<float,blaze::columnVector> b( 4UL, 3UL ); // Definition of a double precision row vector with size 0 blaze::CompressedVector<double,blaze::rowVector> c; \endcode // \n Previous: \ref vectors &nbsp; &nbsp; Next: \ref vector_operations */ //************************************************************************************************* //**Vector Operations****************************************************************************** /*!\page vector_operations Vector Operations // // \tableofcontents // // // \n \section vector_operations_constructors Constructors // <hr> // // Instantiating and setting up a vector is very easy and intuitive. However, there are a few // rules to take care of: // - In case the last template parameter (the transpose flag) is omitted, the vector is per // default a column vector. // - The elements of a \c StaticVector or \c HybridVector are default initialized (i.e. built-in // data types are initialized to 0, class types are initialized via the default constructor). // - Newly allocated elements of a \c DynamicVector or \c CompressedVector remain uninitialized // if they are of built-in type and are default constructed if they are of class type. // // \n \subsection vector_operations_default_construction Default Construction \code using blaze::StaticVector; using blaze::DynamicVector; using blaze::CompressedVector; // All vectors can be default constructed. Whereas the size // of StaticVectors is fixed via the second template parameter, // the initial size of a default constructed DynamicVector or // CompressedVector is 0. StaticVector<int,2UL> v1; // Instantiation of a 2D integer column vector. // All elements are initialized to 0. StaticVector<long,3UL,columnVector> v2; // Instantiation of a 3D long integer column vector. // Again, all elements are initialized to 0L. DynamicVector<float> v3; // Instantiation of a dynamic single precision column // vector of size 0. DynamicVector<double,rowVector> v4; // Instantiation of a dynamic double precision row // vector of size 0. CompressedVector<int> v5; // Instantiation of a compressed integer column // vector of size 0. CompressedVector<double,rowVector> v6; // Instantiation of a compressed double precision row // vector of size 0. \endcode // \n \subsection vector_operations_size_construction Construction with Specific Size // // The \c DynamicVector, \c HybridVector and \c CompressedVector classes offer a constructor that // allows to immediately give the vector the required size. Whereas both dense vectors (i.e. // \c DynamicVector and \c HybridVector) use this information to allocate memory for all vector // elements, \c CompressedVector merely acquires the size but remains empty. \code DynamicVector<int,columnVector> v7( 9UL ); // Instantiation of an integer dynamic column vector // of size 9. The elements are NOT initialized! HybridVector< complex<float>, 5UL > v8( 2UL ); // Instantiation of a column vector with two single // precision complex values. The elements are // default constructed. CompressedVector<int,rowVector> v9( 10UL ); // Instantiation of a compressed row vector with // size 10. Initially, the vector provides no // capacity for non-zero elements. \endcode // \n \subsection vector_operations_initialization_constructors Initialization Constructors // // All dense vector classes offer a constructor that allows for a direct, homogeneous initialization // of all vector elements. In contrast, for sparse vectors the predicted number of non-zero elements // can be specified \code StaticVector<int,3UL,rowVector> v10( 2 ); // Instantiation of a 3D integer row vector. // All elements are initialized to 2. DynamicVector<float> v11( 3UL, 7.0F ); // Instantiation of a dynamic single precision // column vector of size 3. All elements are // set to 7.0F. CompressedVector<float,rowVector> v12( 15UL, 3UL ); // Instantiation of a single precision column // vector of size 15, which provides enough // space for at least 3 non-zero elements. \endcode // \n \subsection vector_operations_array_construction Array Construction // // Alternatively, all dense vector classes offer a constructor for an initialization with a dynamic // or static array. If the vector is initialized from a dynamic array, the constructor expects the // actual size of the array as first argument, the array as second argument. In case of a static // array, the fixed size of the array is used: \code const unique_ptr<double[]> array1( new double[2] ); // ... Initialization of the dynamic array blaze::StaticVector<double,2UL> v13( 2UL, array1.get() ); int array2[4] = { 4, -5, -6, 7 }; blaze::StaticVector<int,4UL> v14( array2 ); \endcode // \n \subsection vector_operations_initializer_list_construction Initializer List Construction // // In addition, all dense and sparse vector classes can be directly initialized by means of an // initializer list: \code blaze::DynamicVector<float> v15{ 1.0F, 2.0F, 3.0F, 4.0F }; blaze::CompressedVector<int> v16{ 0, 2, 0, 0, 5, 0, 7, 0 }; \endcode // In case of sparse vectors, only the non-zero elements are used to initialize the vector. // // \n \subsection vector_operations_copy_construction Copy Construction // // All dense and sparse vectors can be created as the copy of any other dense or sparse vector // with the same transpose flag (i.e. blaze::rowVector or blaze::columnVector). \code StaticVector<int,9UL,columnVector> v17( v7 ); // Instantiation of the dense column vector v17 // as copy of the dense column vector v7. DynamicVector<int,rowVector> v18( v9 ); // Instantiation of the dense row vector v18 as // copy of the sparse row vector v9. CompressedVector<int,columnVector> v19( v1 ); // Instantiation of the sparse column vector v19 // as copy of the dense column vector v1. CompressedVector<float,rowVector> v20( v12 ); // Instantiation of the sparse row vector v20 as // copy of the row vector v12. \endcode // Note that it is not possible to create a \c StaticVector as a copy of a vector with a different // size: \code StaticVector<int,5UL,columnVector> v21( v7 ); // Runtime error: Size does not match! StaticVector<int,4UL,rowVector> v22( v10 ); // Compile time error: Size does not match! \endcode // \n \section vector_operations_assignment Assignment // <hr> // // There are several types of assignment to dense and sparse vectors: // \ref vector_operations_homogeneous_assignment, \ref vector_operations_array_assignment, // \ref vector_operations_copy_assignment, and \ref vector_operations_compound_assignment. // // \n \subsection vector_operations_homogeneous_assignment Homogeneous Assignment // // Sometimes it may be necessary to assign the same value to all elements of a dense vector. // For this purpose, the assignment operator can be used: \code blaze::StaticVector<int,3UL> v1; blaze::DynamicVector<double> v2; // Setting all integer elements of the StaticVector to 2 v1 = 2; // Setting all double precision elements of the DynamicVector to 5.0 v2 = 5.0; \endcode // \n \subsection vector_operations_array_assignment Array Assignment // // Dense vectors can also be assigned a static array: \code blaze::StaticVector<float,2UL> v1; blaze::DynamicVector<double,rowVector> v2; float array1[2] = { 1.0F, 2.0F }; double array2[5] = { 2.1, 4.0, -1.7, 8.6, -7.2 }; v1 = array1; v2 = array2; \endcode // \n \subsection vector_operations_initializer_list_assignment Initializer List Assignment // // Alternatively, it is possible to directly assign an initializer list to a dense or sparse // vector: \code blaze::DynamicVector<float> v1; blaze::CompressedVector<double,rowVector> v2; v1 = { 1.0F, 2.0F }; v2 = { 2.1, 0.0, -1.7, 0.0, -7.2 }; \endcode // In case of sparse vectors, only the non-zero elements are considered. // // \n \subsection vector_operations_copy_assignment Copy Assignment // // For all vector types it is generally possible to assign another vector with the same transpose // flag (i.e. blaze::columnVector or blaze::rowVector). Note that in case of \c StaticVectors, the // assigned vector is required to have the same size as the \c StaticVector since the size of a // \c StaticVector cannot be adapted! \code blaze::StaticVector<int,3UL,columnVector> v1; blaze::DynamicVector<int,columnVector> v2( 3UL ); blaze::DynamicVector<float,columnVector> v3( 5UL ); blaze::CompressedVector<int,columnVector> v4( 3UL ); blaze::CompressedVector<float,rowVector> v5( 3UL ); // ... Initialization of the vectors v1 = v2; // OK: Assignment of a 3D dense column vector to another 3D dense column vector v1 = v4; // OK: Assignment of a 3D sparse column vector to a 3D dense column vector v1 = v3; // Runtime error: Cannot assign a 5D vector to a 3D static vector v1 = v5; // Compilation error: Cannot assign a row vector to a column vector \endcode // \n \subsection vector_operations_compound_assignment Compound Assignment // // Next to plain assignment, it is also possible to use addition assignment, subtraction // assignment, and multiplication assignment. Note however, that in contrast to plain assignment // the size and the transpose flag of the vectors has be to equal in order to able to perform a // compound assignment. \code blaze::StaticVector<int,5UL,columnVector> v1; blaze::DynamicVector<int,columnVector> v2( 5UL ); blaze::CompressedVector<float,columnVector> v3( 7UL ); blaze::DynamicVector<float,rowVector> v4( 7UL ); blaze::CompressedVector<float,rowVector> v5( 7UL ); // ... Initialization of the vectors v1 += v2; // OK: Addition assignment between two column vectors of the same size v1 += v3; // Runtime error: No compound assignment between vectors of different size v1 -= v4; // Compilation error: No compound assignment between vectors of different transpose flag v4 *= v5; // OK: Multiplication assignment between two row vectors of the same size \endcode // \n \section vector_operations_element_access Element Access // <hr> // // \n \subsection vector_operations_subscript_operator_1 Subscript Operator // // The easiest and most intuitive way to access a dense or sparse vector is via the subscript // operator. The indices to access a vector are zero-based: \code blaze::DynamicVector<int> v1( 5UL ); v1[0] = 1; v1[1] = 3; // ... blaze::CompressedVector<float> v2( 5UL ); v2[2] = 7.3F; v2[4] = -1.4F; \endcode // Whereas using the subscript operator on a dense vector only accesses the already existing // element, accessing an element of a sparse vector via the subscript operator potentially // inserts the element into the vector and may therefore be more expensive. Consider the // following example: \code blaze::CompressedVector<int> v1( 10UL ); for( size_t i=0UL; i<v1.size(); ++i ) { ... = v1[i]; } \endcode // Although the compressed vector is only used for read access within the for loop, using the // subscript operator temporarily inserts 10 non-zero elements into the vector. Therefore the // preferred way to traverse the non-zero elements of a sparse vector is to use iterators. // // \n \subsection vector_operations_iterators Iterators // // All vectors (sparse as well as dense) offer an alternate way via the \c begin(), \c cbegin(), // \c end(), and \c cend() functions to traverse the currently contained elements by iterators. // In case of non-const vectors, \c begin() and \c end() return an \c Iterator, which allows a // manipulation of the non-zero value, in case of a constant vector or in case \c cbegin() or // \c cend() are used a \c ConstIterator is returned: \code using blaze::CompressedVector; CompressedVector<int> v1( 10UL ); // ... Initialization of the vector // Traversing the vector by Iterator for( CompressedVector<int>::Iterator it=v1.begin(); it!=v1.end(); ++it ) { it->value() = ...; // OK: Write access to the value of the non-zero element. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the non-zero element. } // Traversing the vector by ConstIterator for( CompressedVector<int>::ConstIterator it=v1.cbegin(); it!=v1.cend(); ++it ) { it->value() = ...; // Compilation error: Assignment to the value via a ConstIterator is invalid. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the non-zero element. } \endcode // Note that \c begin(), \c cbegin(), \c end(), and \c cend() are also available as free functions: \code for( CompressedVector<int>::Iterator it=begin( v1 ); it!=end( v1 ); ++it ) { // ... } for( CompressedVector<int>::ConstIterator it=cbegin( v1 ); it!=cend( v1 ); ++it ) { // ... } \endcode // \n \section vector_operations_element_insertion Element Insertion // <hr> // // In contrast to dense vectors, that store all elements independent of their value and that // offer direct access to all elements, spares vectors only store the non-zero elements contained // in the vector. Therefore it is necessary to explicitly add elements to the vector. // // \n \subsection vector_operations_subscript_operator_2 Subscript Operator // // The first option to add elements to a sparse vector is the subscript operator: \code using blaze::CompressedVector; CompressedVector<int> v1( 3UL ); v1[1] = 2; \endcode // In case the element at the given index is not yet contained in the vector, it is automatically // inserted. Otherwise the old value is replaced by the new value 2. The operator returns a // reference to the sparse vector element. // // \n \subsection vector_operations_set .set() // // An alternative to the subscript operator is the \c set() function: In case the element is not // yet contained in the vector the element is inserted, else the element's value is modified: \code // Insert or modify the value at index 3 v1.set( 3, 1 ); \endcode // \n \subsection vector_operations_insert .insert() // // The insertion of elements can be better controlled via the \c insert() function. In contrast to // the subscript operator and the \c set() function it emits an exception in case the element is // already contained in the vector. In order to check for this case, the \c find() function can be // used: \code // In case the element at index 4 is not yet contained in the matrix it is inserted // with a value of 6. if( v1.find( 4 ) == v1.end() ) v1.insert( 4, 6 ); \endcode // \n \subsection vector_operations_append .append() // // Although the \c insert() function is very flexible, due to performance reasons it is not suited // for the setup of large sparse vectors. A very efficient, yet also very low-level way to fill // a sparse vector is the \c append() function. It requires the sparse vector to provide enough // capacity to insert a new element. Additionally, the index of the new element must be larger // than the index of the previous element. Violating these conditions results in undefined // behavior! \code v1.reserve( 10 ); // Reserving space for 10 non-zero elements v1.append( 5, -2 ); // Appending the element -2 at index 5 v1.append( 6, 4 ); // Appending the element 4 at index 6 // ... \endcode // \n \section vector_operations_element_removal Element Removal // <hr> // // \subsection vector_operations_erase .erase() // // The \c erase() member functions can be used to remove elements from a sparse vector. The // following example gives an impression of the five different flavors of \c erase(): \code using blaze::CompressedVector; CompressedVector<int> v( 42 ); // ... Initialization of the vector // Erasing the element at index 21 v.erase( 21 ); // Erasing a single element via iterator v.erase( v.find( 4 ) ); // Erasing all non-zero elements in the range [7..24] v.erase( v.lowerBound( 7 ), v.upperBound( 24 ) ); // Erasing all non-zero elements with a value larger than 9 by passing a unary predicate v.erase( []( int i ){ return i > 9; } ); // Erasing all non-zero elements in the range [30..40] with a value larger than 5 v.erase( v.lowerBound( 30 ), v.upperBound( 40 ), []( int i ){ return i > 5; } ); \endcode // \n \section vector_operations_element_lookup Element Lookup // <hr> // // A sparse vector only stores the non-zero elements contained in the vector. Therefore, whenever // accessing a vector element at a specific index a lookup operation is required. Whereas the // subscript operator is performing this lookup automatically, it is also possible to use the // \c find(), \c lowerBound(), and \c upperBound() member functions for a manual lookup. // // \n \subsection vector_operations_find .find() // // The \c find() function can be used to check whether a specific element is contained in a sparse // vector. It specifically searches for the element at the given index. In case the element is // found, the function returns an iterator to the element. Otherwise an iterator just past the // last non-zero element of the compressed vector (the \c end() iterator) is returned. Note that // the returned iterator is subject to invalidation due to inserting operations via the subscript // operator, the \c set() function or the \c insert() function! \code using blaze::CompressedVector; CompressedVector<int> a( 42 ); // ... Initialization of the vector // Searching the element at index 7. In case the element is not // contained in the vector, the end() iterator is returned. CompressedVector<int>::Iterator pos( a.find( 7 ) ); if( pos != a.end( 7 ) ) { // ... } \endcode // \n \subsection vector_operations_lowerbound .lowerBound() // // The \c lowerBound() function returns an iterator to the first element with an index not less // then the given index. In combination with the \c upperBound() function this function can be // used to create a pair of iterators specifying a range of indices. Note that the returned // iterator is subject to invalidation due to inserting operations via the subscript operator, // the \c set() function or the \c insert() function! \code using blaze::CompressedVector; CompressedVector<int> a( 42 ); // ... Initialization of the vector // Searching the lower bound of index 17. CompressedVector<int>::Iterator pos1( A.lowerBound( 17 ) ); // Searching the upper bound of index 28 CompressedVector<int>::Iterator pos2( A.upperBound( 28 ) ); // Erasing all elements in the specified range a.erase( pos1, pos2 ); \endcode // \n \subsection vector_operations_upperbound .upperBound() // // The \c upperBound() function returns an iterator to the first element with an index greater then // the given index. In combination with the \c lowerBound() function this function can be used to // create a pair of iterators specifying a range of indices. Note that the returned iterator is // subject to invalidation due to inserting operations via the subscript operator, the \c set() // function or the \c insert() function! \code using blaze::CompressedVector; CompressedVector<int> a( 42 ); // ... Initialization of the vector // Searching the lower bound of index 17. CompressedVector<int>::Iterator pos1( A.lowerBound( 17 ) ); // Searching the upper bound of index 28 CompressedVector<int>::Iterator pos2( A.upperBound( 28 ) ); // Erasing all elements in the specified range a.erase( pos1, pos2 ); \endcode // \n \section vector_operations_non_modifying_operations Non-Modifying Operations // <hr> // // \subsection vector_operations_size .size() / size() // // Via the \c size() member function, the current size of a dense or sparse vector can be queried: \code // Instantiating a dynamic vector with size 10 blaze::DynamicVector<int> v1( 10UL ); v1.size(); // Returns 10 // Instantiating a compressed vector with size 12 and capacity for 3 non-zero elements blaze::CompressedVector<double> v2( 12UL, 3UL ); v2.size(); // Returns 12 \endcode // Alternatively, the free function \c size() can be used to query to current size of a vector. // In contrast to the member function, the free function can also be used to query the size of // vector expressions: \code size( v1 ); // Returns 10, i.e. has the same effect as the member function size( v2 ); // Returns 12, i.e. has the same effect as the member function blaze::DynamicMatrix<int> A( 15UL, 12UL ); size( A * v2 ); // Returns 15, i.e. the size of the resulting vector \endcode // \n \subsection vector_operations_capacity .capacity() / capacity() // // Via the \c capacity() (member) function the internal capacity of a dense or sparse vector // can be queried. Note that the capacity of a vector doesn't have to be equal to the size // of a vector. In case of a dense vector the capacity will always be greater or equal than // the size of the vector, in case of a sparse vector the capacity may even be less than // the size. \code v1.capacity(); // Returns at least 10 \endcode // For symmetry reasons, there is also a free function /c capacity() available that can be used // to query the capacity: \code capacity( v1 ); // Returns at least 10, i.e. has the same effect as the member function \endcode // Note, however, that it is not possible to query the capacity of a vector expression: \code capacity( A * v1 ); // Compilation error! \endcode // \n \subsection vector_operations_nonzeros .nonZeros() / nonZeros() // // For both dense and sparse vectors the number of non-zero elements can be determined via the // \c nonZeros() member function. Sparse vectors directly return their number of non-zero // elements, dense vectors traverse their elements and count the number of non-zero elements. \code v1.nonZeros(); // Returns the number of non-zero elements in the dense vector v2.nonZeros(); // Returns the number of non-zero elements in the sparse vector \endcode // There is also a free function \c nonZeros() available to query the current number of non-zero // elements: \code nonZeros( v1 ); // Returns the number of non-zero elements in the dense vector nonZeros( v2 ); // Returns the number of non-zero elements in the sparse vector \endcode // The free \c nonZeros() function can also be used to query the number of non-zero elements in // a vector expression. However, the result is not the exact number of non-zero elements, but // may be a rough estimation: \code nonZeros( A * v1 ); // Estimates the number of non-zero elements in the vector expression \endcode // \n \subsection vector_operations_isempty isEmpty() // // The \c isEmpty() function returns whether the total number of elements of the vector is zero: \code blaze::DynamicVector<int> a; // Create an empty vector isEmpty( a ); // Returns true a.resize( 10 ); // Resize to 10 elements isEmpty( a ); // Returns false \endcode // \n \subsection vector_operations_isnan isnan() // // The \c isnan() function provides the means to check a dense or sparse vector for non-a-number // elements: \code blaze::DynamicVector<double> a; // ... Resizing and initialization if( isnan( a ) ) { ... } \endcode \code blaze::CompressedVector<double> a; // ... Resizing and initialization if( isnan( a ) ) { ... } \endcode // If at least one element of the vector is not-a-number, the function returns \c true, otherwise // it returns \c false. Please note that this function only works for vectors with floating point // elements. The attempt to use it for a vector with a non-floating point element type results in // a compile time error. // // // \n \subsection vector_operations_isdefault isDefault() // // The \c isDefault() function returns whether the given dense or sparse vector is in default state: \code blaze::HybridVector<int,20UL> a; // ... Resizing and initialization if( isDefault( a ) ) { ... } \endcode // A vector is in default state if it appears to just have been default constructed. All resizable // vectors (\c HybridVector, \c DynamicVector, or \c CompressedVector) and \c CustomVector are // in default state if its size is equal to zero. A non-resizable vector (\c StaticVector, all // subvectors, element selections, rows, and columns) is in default state if all its elements are // in default state. For instance, in case the vector is instantiated for a built-in integral or // floating point data type, the function returns \c true in case all vector elements are 0 and // \c false in case any vector element is not 0. // // // \n \subsection vector_operations_isUniform isUniform() // // In order to check if all vector elements are identical, the \c isUniform function can be used: \code blaze::DynamicVector<int> a; // ... Resizing and initialization if( isUniform( a ) ) { ... } \endcode // Note that in case of sparse vectors also the zero elements are also taken into account! // // // \n \subsection vector_operations_length length() / sqrLength() // // In order to calculate the length (magnitude) of a dense or sparse vector, both the \c length() // and \c sqrLength() function can be used: \code blaze::StaticVector<float,3UL,rowVector> v{ -1.2F, 2.7F, -2.3F }; const float len = length ( v ); // Computes the current length of the vector const float sqrlen = sqrLength( v ); // Computes the square length of the vector \endcode // Note that both functions can only be used for vectors with built-in or complex element type! // // // \n \subsection vector_operations_vector_trans trans() // // As already mentioned, vectors can either be column vectors (blaze::columnVector) or row vectors // (blaze::rowVector). A column vector cannot be assigned to a row vector and vice versa. However, // vectors can be transposed via the \c trans() function: \code blaze::DynamicVector<int,columnVector> v1( 4UL ); blaze::CompressedVector<int,rowVector> v2( 4UL ); v1 = v2; // Compilation error: Cannot assign a row vector to a column vector v1 = trans( v2 ); // OK: Transposing the row vector to a column vector and assigning it // to the column vector v1 v2 = trans( v1 ); // OK: Transposing the column vector v1 and assigning it to the row vector v2 v1 += trans( v2 ); // OK: Addition assignment of two column vectors \endcode // \n \subsection vector_operations_ctrans ctrans() // // It is also possible to compute the conjugate transpose of a vector. This operation is available // via the \c ctrans() function: \code blaze::CompressedVector< complex<float>, rowVector > v1( 4UL ); blaze::DynamicVector< complex<float>, columnVector > v2( 4UL ); v1 = ctrans( v2 ); // Compute the conjugate transpose vector \endcode // Note that the \c ctrans() function has the same effect as manually applying the \c conj() and // \c trans() function in any order: \code v1 = trans( conj( v2 ) ); // Computing the conjugate transpose vector v1 = conj( trans( v2 ) ); // Computing the conjugate transpose vector \endcode // \n \subsection vector_operations_evaluate eval() / evaluate() // // The \c evaluate() function forces an evaluation of the given vector expression and enables // an automatic deduction of the correct result type of an operation. The following code example // demonstrates its intended use for the multiplication of a dense and a sparse vector: \code using blaze::DynamicVector; using blaze::CompressedVector; blaze::DynamicVector<double> a; blaze::CompressedVector<double> b; // ... Resizing and initialization auto c = evaluate( a * b ); \endcode // In this scenario, the \c evaluate() function assists in deducing the exact result type of // the operation via the \c auto keyword. Please note that if \c evaluate() is used in this // way, no temporary vector is created and no copy operation is performed. Instead, the result // is directly written to the target vector due to the return value optimization (RVO). However, // if \c evaluate() is used in combination with an explicit target type, a temporary will be // created and a copy operation will be performed if the used type differs from the type // returned from the function: \code CompressedVector<double> d( a * b ); // No temporary & no copy operation DynamicVector<double> e( a * b ); // Temporary & copy operation d = evaluate( a * b ); // Temporary & copy operation \endcode // Sometimes it might be desirable to explicitly evaluate a sub-expression within a larger // expression. However, please note that \c evaluate() is not intended to be used for this // purpose. This task is more elegantly and efficiently handled by the \c eval() function: \code blaze::DynamicVector<double> a, b, c, d; d = a + evaluate( b * c ); // Unnecessary creation of a temporary vector d = a + eval( b * c ); // No creation of a temporary vector \endcode // In contrast to the \c evaluate() function, \c eval() can take the complete expression // into account and therefore can guarantee the most efficient way to evaluate it (see also // \ref intra_statement_optimization). // // // \n \section vector_operations_modifying_operations Modifying Operations // <hr> // // \subsection vector_operations_resize_reserve .resize() / .reserve() // // The size of a \c StaticVector is fixed by the second template parameter and a \c CustomVector // cannot be resized. In contrast, the size of \c DynamicVectors, \c HybridVectors as well as // \c CompressedVectors can be changed via the \c resize() function: \code using blaze::DynamicVector; using blaze::CompressedVector; DynamicVector<int,columnVector> v1; CompressedVector<int,rowVector> v2( 4 ); v2[1] = -2; v2[3] = 11; // Adapting the size of the dynamic and compressed vectors. The (optional) second parameter // specifies whether the existing elements should be preserved. Per default, the existing // elements are preserved. v1.resize( 5UL ); // Resizing vector v1 to 5 elements. Elements of built-in type remain // uninitialized, elements of class type are default constructed. v1.resize( 3UL, false ); // Resizing vector v1 to 3 elements. The old elements are lost, the // new elements are NOT initialized! v2.resize( 8UL, true ); // Resizing vector v2 to 8 elements. The old elements are preserved. v2.resize( 5UL, false ); // Resizing vector v2 to 5 elements. The old elements are lost. \endcode // Note that resizing a vector invalidates all existing views (see e.g. \ref views_subvectors) // on the vector: \code blaze::DynamicVector<int,rowVector> v1( 10UL ); // Creating a dynamic vector of size 10 auto sv = subvector( v1, 2UL, 5UL ); // Creating a view on the range [2..6] v1.resize( 6UL ); // Resizing the vector invalidates the view \endcode // When the internal capacity of a vector is no longer sufficient, the allocation of a larger // junk of memory is triggered. In order to avoid frequent reallocations, the \c reserve() // function can be used up front to set the internal capacity: \code blaze::DynamicVector<int> v1; v1.reserve( 100 ); v1.size(); // Returns 0 v1.capacity(); // Returns at least 100 \endcode // Note that the size of the vector remains unchanged, but only the internal capacity is set // according to the specified value! // // \n \subsection vector_operations_shrinkToFit .shrinkToFit() // // The internal capacity of vectors with dynamic memory is preserved in order to minimize the // number of reallocations. For that reason, the \c resize() and \c reserve() functions can lead // to memory overhead. The \c shrinkToFit() member function can be used to minimize the internal // capacity: \code blaze::DynamicVector<int> v1( 1000UL ); // Create a vector of 1000 integers v1.resize( 10UL ); // Resize to 10, but the capacity is preserved v1.shrinkToFit(); // Remove the unused capacity \endcode // Please note that due to padding the capacity might not be reduced exactly to \c size(). Please // also note that in case a reallocation occurs, all iterators (including \c end() iterators), all // pointers and references to elements of the vector are invalidated. // // \subsection vector_operations_reset_clear reset() / clear() // // In order to reset all elements of a vector, the \c reset() function can be used: \code // Setup of a single precision column vector, whose elements are initialized with 2.0F. blaze::DynamicVector<float> v1( 3UL, 2.0F ); // Resetting all elements to 0.0F. Only the elements are reset, the size of the vector is unchanged. reset( v1 ); // Resetting all elements v1.size(); // Returns 3: size and capacity remain unchanged \endcode // In order to return a vector to its default state (i.e. the state of a default constructed // vector), the \c clear() function can be used: \code // Setup of a single precision column vector, whose elements are initialized with -1.0F. blaze::DynamicVector<float> v1( 5, -1.0F ); // Resetting the entire vector. clear( v1 ); // Resetting the entire vector v1.size(); // Returns 0: size is reset, but capacity remains unchanged \endcode // Note that resetting or clearing both dense and sparse vectors does not change the capacity // of the vectors. // // // \n \subsection vector_operations_swap swap() // // Via the \c swap() function it is possible to completely swap the contents of two vectors of // the same type: \code blaze::DynamicVector<int,columnVector> v1( 10UL ); blaze::DynamicVector<int,columnVector> v2( 20UL ); swap( v1, v2 ); // Swapping the contents of v1 and v2 \endcode // \n \section vector_operations_arithmetic_operations Arithmetic Operations // <hr> // // \subsection vector_operations_normalize normalize() // // The \c normalize() function can be used to scale any non-zero vector to a length of 1. In // case the vector does not contain a single non-zero element (i.e. is a zero vector), the // \c normalize() function returns a zero vector. \code blaze::DynamicVector<float,columnVector> v1( 10UL ); blaze::CompressedVector<double,columnVector> v2( 12UL ); v1 = normalize( v1 ); // Normalizing the dense vector v1 length( v1 ); // Returns 1 (or 0 in case of a zero vector) v1 = normalize( v2 ); // Assigning v1 the normalized vector v2 length( v1 ); // Returns 1 (or 0 in case of a zero vector) \endcode // Note that the \c normalize() function only works for floating point vectors. The attempt to // use it for an integral vector results in a compile time error. // // // \n \subsection vector_operations_min_max min() / max() // // The \c min() and \c max() functions can be used for a single vector or multiple vectors. If // passed a single vector, the functions return the smallest and largest element of the given // dense vector or the smallest and largest non-zero element of the given sparse vector, // respectively: \code blaze::StaticVector<int,4UL,rowVector> a{ -5, 2, 7, -4 }; min( a ); // Returns -5 max( a ); // Returns 7 \endcode \code blaze::CompressedVector<int> b{ 1, 0, 3, 0 }; min( b ); // Returns 1 max( b ); // Returns 3 \endcode // For more information on the unary \c min() and \c max() reduction operations see the // \ref vector_operations_reduction_operations section. // // If passed two or more dense vectors, the \c min() and \c max() functions compute the // componentwise minimum or maximum of the given vectors, respectively: \code blaze::StaticVector<int,4UL,rowVector> c{ -5, 1, -7, 4 }; blaze::StaticVector<int,4UL,rowVector> d{ -5, 3, 0, 2 }; min( a, c ); // Results in the vector ( -5, 1, -7, -4 ) max( a, c, d ); // Results in the vector ( -5, 3, 7, 4 ) \endcode // Please note that sparse vectors can only be used in the unary \c min() and \c max() functions. // Also note that all forms of the \c min() and \c max() functions can be used to compute the // smallest and largest element of a vector expression: \code min( a + b + c ); // Returns -9, i.e. the smallest value of the resulting vector max( a - b - c ); // Returns 11, i.e. the largest value of the resulting vector min( a + c, c - d ); // Results in ( -10 -2 -7 0 ) max( a - c, c + d ); // Results in ( 0 4 14 6 ) \endcode // \n \subsection vector_operators_softmax softmax() // // The <a href="https://en.wikipedia.org/wiki/Softmax_function">softmax function</a>, also called // the normalized exponential function, of a given dense vector can be computed via \c softmax(). // The resulting dense vector consists of real values in the range (0..1], which add up to 1. \code blaze::StaticVector<double,7UL,rowVector> x{ 1.0, 2.0, 3.0, 4.0, 1.0, 2.0, 3.0 }; blaze::StaticVector<double,7UL,rowVector> y; // Evaluating the softmax function y = softmax( x ); // Results in ( 0.024 0.064 0.175 0.475 0.024 0.064 0.175 ) double s = sum( y ); // Results in 1 \endcode // \n \subsection vector_operators_abs abs() // // The \c abs() function can be used to compute the absolute values of each element of a vector. // For instance, the following computation \code blaze::StaticVector<int,3UL,rowVector> a{ -1, 2, -3 }; blaze::StaticVector<int,3UL,rowVector> b( abs( a ) ); \endcode // results in the vector \f$ b = \left(\begin{array}{*{1}{c}} 1 \\ 2 \\ 3 \\ \end{array}\right)\f$ // \n \subsection vector_operators_sign sign() // // The \c sign() function can be used to evaluate the sign of each element of a vector \a a. For // each element \c i the corresponding result is 1 if \a a[i] is greater than zero, 0 if \a a[i] // is zero, and -1 if \a a[i] is less than zero. For instance, the following use of the \c sign() // function \code blaze::StaticVector<int,3UL,rowVector> a{ -1, 2, 0 }; blaze::StaticVector<int,3UL,rowVector> b( sign( a ) ); \endcode // results in the vector \f$ b = \left(\begin{array}{*{1}{c}} -1 \\ 1 \\ 0 \\ \end{array}\right)\f$ // \n \subsection vector_operations_rounding_functions floor() / ceil() / trunc() / round() // // The \c floor(), \c ceil(), \c trunc(), and \c round() functions can be used to round down/up // each element of a vector, respectively: \code blaze::StaticVector<double,3UL,rowVector> a, b; b = floor( a ); // Rounding down each element of the vector b = ceil ( a ); // Rounding up each element of the vector b = trunc( a ); // Truncating each element of the vector b = round( a ); // Rounding each element of the vector \endcode // \n \subsection vector_operators_conj conj() // // The \c conj() function can be applied on a dense or sparse vector to compute the complex // conjugate of each element of the vector: \code using blaze::StaticVector; using cplx = std::complex<double>; // Creating the vector // ( (-2,-1) ) // ( ( 1, 1) ) StaticVector<cplx,2UL> a{ cplx(-2.0,-1.0), cplx(1.0,1.0) }; // Computing the vector of complex conjugates // ( (-2, 1) ) // ( ( 1,-1) ) StaticVector<cplx,2UL> b; b = conj( a ); \endcode // Additionally, vectors can be conjugated in-place via the \c conjugate() function: \code blaze::DynamicVector<cplx> c( 5UL ); conjugate( c ); // In-place conjugate operation. c = conj( c ); // Same as above \endcode // \n \subsection vector_operators_real real() // // The \c real() function can be used on a dense or sparse vector to extract the real part of // each element of the vector: \code using blaze::StaticVector; using cplx = std::complex<double>; // Creating the vector // ( (-2,-1) ) // ( ( 1, 1) ) StaticVector<cplx,2UL> a{ cplx(-2.0,-1.0), cplx(1.0,1.0) }; // Extracting the real part of each vector element // ( -2 ) // ( 1 ) StaticVector<double,2UL> b; b = real( a ); \endcode // \n \subsection vector_operators_imag imag() // // The \c imag() function can be used on a dense or sparse vector to extract the imaginary part // of each element of the vector: \code using blaze::StaticVector; using cplx = std::complex<double>; // Creating the vector // ( (-2,-1) ) // ( ( 1, 1) ) StaticVector<cplx,2UL> a{ cplx(-2.0,-1.0), cplx(1.0,1.0) }; // Extracting the imaginary part of each vector element // ( -1 ) // ( 1 ) StaticVector<double,2UL> b; b = imag( a ); \endcode // \n \subsection vector_operations_sqrt sqrt() / invsqrt() // // Via the \c sqrt() and \c invsqrt() functions the (inverse) square root of each element of a // vector can be computed: \code blaze::DynamicVector<double> a, b, c; b = sqrt( a ); // Computes the square root of each element c = invsqrt( a ); // Computes the inverse square root of each element \endcode // Note that in case of sparse vectors only the non-zero elements are taken into account! // // // \n \subsection vector_operations_cbrt cbrt() / invcbrt() // // The \c cbrt() and \c invcbrt() functions can be used to compute the the (inverse) cubic root // of each element of a vector: \code blaze::HybridVector<double,3UL> a, b, c; b = cbrt( a ); // Computes the cubic root of each element c = invcbrt( a ); // Computes the inverse cubic root of each element \endcode // Note that in case of sparse vectors only the non-zero elements are taken into account! // // // \n \subsection vector_operations_hypot hypot() // // The \c hypot() function can be used to compute the componentwise hypotenous for a pair of // dense vectors: \code blaze::StaticVector<double,3UL> a, b, c; c = hypot( a, b ); // Computes the componentwise hypotenuous \endcode // \n \subsection vector_operations_clamp clamp() // // The \c clamp() function can be used to restrict all elements of a vector to a specific range: \code blaze::DynamicVector<double> a, b b = clamp( a, -1.0, 1.0 ); // Restrict all elements to the range [-1..1] \endcode // Note that in case of sparse vectors only the non-zero elements are taken into account! // // // \n \subsection vector_operations_pow pow() // // The \c pow() function can be used to compute the exponential value of each element of a vector. // If passed a vector and a numeric exponent, the function computes the exponential value of each // element of the vector using the same exponent. If passed a second vector, the function computes // the componentwise exponential value: \code blaze::StaticVector<double,3UL> a, b, c; c = pow( a, 1.2 ); // Computes the exponential value of each element c = pow( a, b ); // Computes the componentwise exponential value \endcode // \n \subsection vector_operations_exp exp() / exp2() / exp10() // // \c exp(), \c exp2() and \c exp10() compute the base e/2/10 exponential of each element of a // vector, respectively: \code blaze::DynamicVector<double> a, b; b = exp( a ); // Computes the base e exponential of each element b = exp2( a ); // Computes the base 2 exponential of each element b = exp10( a ); // Computes the base 10 exponential of each element \endcode // Note that in case of sparse vectors only the non-zero elements are taken into account! // // // \n \subsection vector_operations_log log() / log2() / log10() // // The \c log(), \c log2() and \c log10() functions can be used to compute the natural, binary // and common logarithm of each element of a vector: \code blaze::StaticVector<double,3UL> a, b; b = log( a ); // Computes the natural logarithm of each element b = log2( a ); // Computes the binary logarithm of each element b = log10( a ); // Computes the common logarithm of each element \endcode // \n \subsection vector_operations_trigonometric_functions sin() / cos() / tan() / asin() / acos() / atan() // // The following trigonometric functions are available for both dense and sparse vectors: \code blaze::DynamicVector<double> a, b; b = sin( a ); // Computes the sine of each element of the vector b = cos( a ); // Computes the cosine of each element of the vector b = tan( a ); // Computes the tangent of each element of the vector b = asin( a ); // Computes the inverse sine of each element of the vector b = acos( a ); // Computes the inverse cosine of each element of the vector b = atan( a ); // Computes the inverse tangent of each element of the vector \endcode // Note that in case of sparse vectors only the non-zero elements are taken into account! // // // \n \subsection vector_operations_hyperbolic_functions sinh() / cosh() / tanh() / asinh() / acosh() / atanh() // // The following hyperbolic functions are available for both dense and sparse vectors: \code blaze::DynamicVector<double> a, b; b = sinh( a ); // Computes the hyperbolic sine of each element of the vector b = cosh( a ); // Computes the hyperbolic cosine of each element of the vector b = tanh( a ); // Computes the hyperbolic tangent of each element of the vector b = asinh( a ); // Computes the inverse hyperbolic sine of each element of the vector b = acosh( a ); // Computes the inverse hyperbolic cosine of each element of the vector b = atanh( a ); // Computes the inverse hyperbolic tangent of each element of the vector \endcode // Note that in case of sparse vectors only the non-zero elements are taken into account! // // // \n \subsection vector_operations_atan2 atan2() // // The multi-valued inverse tangent is available for a pair of dense vectors: \code blaze::DynamicVector<double> a, b, c; c = atan2( a, b ); // Computes the componentwise multi-valued inverse tangent \endcode // \n \subsection vector_operations_erf erf() / erfc() // // The \c erf() and \c erfc() functions compute the (complementary) error function of each // element of a vector: \code blaze::StaticVector<double,3UL,rowVector> a, b; b = erf( a ); // Computes the error function of each element b = erfc( a ); // Computes the complementary error function of each element \endcode // Note that in case of sparse vectors only the non-zero elements are taken into account! // // // \n \subsection vector_operations_map map() / forEach() // // Via the unary and binary \c map() functions it is possible to execute componentwise custom // operations on vectors. The unary \c map() function can be used to apply a custom operation // on each element of a dense or sparse vector. For instance, the following example demonstrates // a custom square root computation via a lambda: \code blaze::DynamicVector<double> a, b; b = map( a, []( double d ) { return std::sqrt( d ); } ); \endcode // The binary \c map() function can be used to apply an operation pairwise to the elements of // two dense vectors. The following example demonstrates the merging of two vectors of double // precision values into a vector of double precision complex numbers: \code blaze::DynamicVector<double> real{ 2.1, -4.2, 1.0, 0.6 }; blaze::DynamicVector<double> imag{ 0.3, 1.4, 2.9, -3.4 }; blaze::DynamicVector< complex<double> > cplx; // Creating the vector // ( (-2.1, 0.3) ) // ( (-4.2, -1.4) ) // ( ( 1.0, 2.9) ) // ( ( 0.6, -3.4) ) cplx = map( real, imag, []( double r, double i ){ return complex( r, i ); } ); \endcode // Although the computation can be parallelized it is not vectorized and thus cannot perform at // peak performance. However, it is also possible to create vectorized custom operations. See // \ref custom_operations for a detailed overview of the possibilities of custom operations. // // Please note that unary custom operations on vectors have been introduced in \b Blaze 3.0 in // form of the \c forEach() function. With the introduction of binary custom functions, the // \c forEach() function has been renamed to \c map(). The \c forEach() function can still be // used (even for binary custom operations), but the function might be deprecated in future // releases of \b Blaze. // // // \n \section vector_operations_reduction_operations Reduction Operations // <hr> // // \subsection vector_operations_reduction_operations_reduce reduce() // // The \c reduce() function performs a total reduction of the elements of the given dense vector // or the non-zero elements of the given sparse vector. The following examples demonstrate the // total reduction of a dense and sparse vector: \code blaze::DynamicVector<double> a; // ... Resizing and initialization const double totalsum1 = reduce( a, blaze::Add() ); const double totalsum2 = reduce( a, []( double a, double b ){ return a + b; } ); \endcode \code blaze::CompressedVector<double> a; // ... Resizing and initialization const double totalmin1 = reduce( a, blaze::Min() ); const double totalmin2 = reduce( a, []( double a, double b ){ return blaze::min( a, b ); } ); \endcode // As demonstrated in the examples it is possible to pass any binary callable as custom reduction // operation. However, for instance in the case of lambdas the vectorization of the reduction // operation is compiler dependent and might not perform at peak performance. However, it is also // possible to create vectorized custom operations. See \ref custom_operations for a detailed // overview of the possibilities of custom operations. // // Please note that the evaluation order of the \c reduce() function is unspecified. Thus the // behavior is non-deterministic if the given reduction operation is not associative or not // commutative. Also, the operation is undefined if the given reduction operation modifies the // values. // // \n \subsection vector_operations_reduction_operations_sum sum() // // The \c sum() function reduces the elements of the given dense vector or the non-zero elements // of the given sparse vector by means of addition: \code blaze::DynamicVector<int> a{ 1, 2, 3, 4 }; const int totalsum = sum( a ); // Results in 10 \endcode \code blaze::CompressedVector<int> a{ 1, 2, 3, 4 }; const int totalsum = sum( a ); // Results in 10 \endcode // Please note that the evaluation order of the \c sum() function is unspecified. // // \n \subsection vector_operations_reduction_operations_prod prod() // // The \c prod() function reduces the elements of the given dense vector or the non-zero elements // of the given sparse vector by means of multiplication: \code blaze::DynamicVector<int> a{ 1, 2, 3, 4 }; const int totalprod = prod( a ); // Results in 24 \endcode \code blaze::CompressedVector<int> a{ 1, 2, 3, 4 }; const int totalprod = prod( a ); // Results in 24 \endcode // \n \subsection vector_operations_reduction_operations_min min() // // The unary \c min() function returns the smallest element of the given dense vector or the // smallest non-zero element of the given sparse vector. It can only be used for element types // that support the smaller-than relationship. In case the given vector currently has a size // of 0, the returned value is the default value (e.g. 0 in case of fundamental data types). \code blaze::DynamicVector<int> a{ 1, -2, 3, 0 }; const int totalmin = min( a ); // Results in -2 \endcode \code blaze::CompressedVector<int> a{ 1, 0, 3, 0 }; const int totalmin = min( a ); // Results in 1 \endcode // \note In case the sparse vector is not completely filled, the implicit zero elements are NOT // taken into account. In the previous example the compressed vector has only 2 non-zero elements. // However, the minimum of the vector is 1. // // \n \subsection vector_operations_reduction_operations_max max() // // The unary \c max() function returns the largest element of the given dense vector or the // largest non-zero element of the given sparse vector. It can only be used for element types // that support the smaller-than relationship. In case the given vector currently has a size // of 0, the returned value is the default value (e.g. 0 in case of fundamental data types). \code blaze::DynamicVector<int> a{ 1, -2, 3, 0 }; const int totalmax = max( a ); // Results in 3 \endcode \code blaze::CompressedVector<int> a{ -1, 0, -3, 0 }; const int totalmin = max( a ); // Results in -1 \endcode // \note In case the sparse vector is not completely filled, the implicit zero elements are NOT // taken into account. In the previous example the compressed vector has only 2 non-zero elements. // However, the maximum of the vector is -1. // // // \n \section vector_operations_norms Norms // <hr> // // \subsection vector_operations_norms_norm norm() // // The \c norm() function computes the L2 norm of the given dense or sparse vector: \code blaze::DynamicVector<double> a; // ... Resizing and initialization const double l2 = norm( a ); \endcode // \n \subsection vector_operations_norms_sqrnorm sqrNorm() // // The \c sqrNorm() function computes the squared L2 norm of the given dense or sparse vector: \code blaze::DynamicVector<double> a; // ... Resizing and initialization const double l2 = sqrNorm( a ); \endcode // \n \subsection vector_operations_norms_l1norm l1Norm() // // The \c l1Norm() function computes the squared L1 norm of the given dense or sparse vector: \code blaze::DynamicVector<double> a; // ... Resizing and initialization const double l1 = l1Norm( a ); \endcode // \n \subsection vector_operations_norms_l2norm l2Norm() // // The \c l2Norm() function computes the squared L2 norm of the given dense or sparse vector: \code blaze::DynamicVector<double> a; // ... Resizing and initialization const double l2 = l2Norm( a ); \endcode // \n \subsection vector_operations_norms_l3norm l3Norm() // // The \c l3Norm() function computes the squared L3 norm of the given dense or sparse vector: \code blaze::DynamicVector<double> a; // ... Resizing and initialization const double l3 = l3Norm( a ); \endcode // \n \subsection vector_operations_norms_l4norm l4Norm() // // The \c l4Norm() function computes the squared L4 norm of the given dense or sparse vector: \code blaze::DynamicVector<double> a; // ... Resizing and initialization const double l4 = l4Norm( a ); \endcode // \n \subsection vector_operations_norms_lpnorm lpNorm() // // The \c lpNorm() function computes the general Lp norm of the given dense or sparse vector, // where the norm is specified by either a compile time or a runtime argument: \code blaze::DynamicVector<double> a; // ... Resizing and initialization const double lp1 = lpNorm<2>( a ); // Compile time argument const double lp2 = lpNorm( a, 2.3 ); // Runtime argument \endcode // \n \subsection vector_operations_norms_maxnorm maxNorm() // // The \c maxNorm() function computes the maximum norm of the given dense or sparse vector: \code blaze::DynamicVector<double> a; // ... Resizing and initialization const double max = maxNorm( a ); \endcode // \n Previous: \ref vector_types &nbsp; &nbsp; Next: \ref matrices */ //************************************************************************************************* //**Matrices*************************************************************************************** /*!\page matrices Matrices // // \tableofcontents // // // \n \section matrices_general General Concepts // <hr> // // The \b Blaze library currently offers four dense matrix types (\ref matrix_types_static_matrix, // \ref matrix_types_dynamic_matrix, \ref matrix_types_hybrid_matrix, and \ref matrix_types_custom_matrix) // and one sparse matrix type (\ref matrix_types_compressed_matrix). All matrices can either be // stored as row-major matrices or column-major matrices: \code using blaze::DynamicMatrix; using blaze::rowMajor; using blaze::columnMajor; // Setup of the 2x3 row-major dense matrix // // ( 1 2 3 ) // ( 4 5 6 ) // DynamicMatrix<int,rowMajor> A{ { 1, 2, 3 }, { 4, 5, 6 } }; // Setup of the 3x2 column-major dense matrix // // ( 1 4 ) // ( 2 5 ) // ( 3 6 ) // DynamicMatrix<int,columnMajor> B{ { 1, 4 }, { 2, 5 }, { 3, 6 } }; \endcode // Per default, all matrices in \b Blaze are row-major matrices: \code // Instantiation of a 3x3 row-major matrix blaze::DynamicMatrix<int> C( 3UL, 3UL ); \endcode // \n \section matrices_details Matrix Details // <hr> // // - \ref matrix_types // - \ref matrix_operations // // // \n \section matrices_examples Examples // <hr> \code using blaze::StaticMatrix; using blaze::DynamicMatrix; using blaze::CompressedMatrix; using blaze::rowMajor; using blaze::columnMajor; StaticMatrix<double,6UL,20UL> A; // Instantiation of a 6x20 row-major static matrix CompressedMatrix<double,rowMajor> B; // Instantiation of a row-major compressed matrix DynamicMatrix<double,columnMajor> C; // Instantiation of a column-major dynamic matrix // ... Resizing and initialization C = A * B; \endcode // \n Previous: \ref vector_operations &nbsp; &nbsp; Next: \ref matrix_types */ //************************************************************************************************* //**Matrix Types*********************************************************************************** /*!\page matrix_types Matrix Types // // \tableofcontents // // // \n \section matrix_types_static_matrix StaticMatrix // <hr> // // The blaze::StaticMatrix class template is the representation of a fixed size matrix with // statically allocated elements of arbitrary type. It can be included via the header file \code #include <blaze/math/StaticMatrix.h> \endcode // The type of the elements, the number of rows and columns, and the storage order of the matrix // can be specified via the four template parameters: \code template< typename Type, size_t M, size_t N, bool SO > class StaticMatrix; \endcode // - \c Type: specifies the type of the matrix elements. StaticMatrix can be used with any // non-cv-qualified, non-reference element type. // - \c M : specifies the total number of rows of the matrix. // - \c N : specifies the total number of columns of the matrix. Note that it is expected // that StaticMatrix is only used for tiny and small matrices. // - \c SO : specifies the storage order (blaze::rowMajor, blaze::columnMajor) of the matrix. // The default value is blaze::rowMajor. // // The blaze::StaticMatrix is perfectly suited for small to medium matrices whose dimensions are // known at compile time: \code // Definition of a 3x4 integral row-major matrix blaze::StaticMatrix<int,3UL,4UL> A; // Definition of a 4x6 single precision row-major matrix blaze::StaticMatrix<float,4UL,6UL,blaze::rowMajor> B; // Definition of a 6x4 double precision column-major matrix blaze::StaticMatrix<double,6UL,4UL,blaze::columnMajor> C; \endcode // \n \section matrix_types_dynamic_matrix DynamicMatrix // <hr> // // The blaze::DynamicMatrix class template is the representation of an arbitrary sized matrix // with \f$ M \cdot N \f$ dynamically allocated elements of arbitrary type. It can be included // via the header file \code #include <blaze/math/DynamicMatrix.h> \endcode // The type of the elements and the storage order of the matrix can be specified via the two // template parameters: \code template< typename Type, bool SO > class DynamicMatrix; \endcode // - \c Type: specifies the type of the matrix elements. DynamicMatrix can be used with any // non-cv-qualified, non-reference element type. // - \c SO : specifies the storage order (blaze::rowMajor, blaze::columnMajor) of the matrix. // The default value is blaze::rowMajor. // // The blaze::DynamicMatrix is the default choice for all kinds of dense matrices and the best // choice for medium to large matrices. The number of rows and columns can be modified at runtime: \code // Definition of a 3x4 integral row-major matrix blaze::DynamicMatrix<int> A( 3UL, 4UL ); // Definition of a 4x6 single precision row-major matrix blaze::DynamicMatrix<float,blaze::rowMajor> B( 4UL, 6UL ); // Definition of a double precision column-major matrix with 0 rows and columns blaze::DynamicMatrix<double,blaze::columnMajor> C; \endcode // \n \section matrix_types_hybrid_matrix HybridMatrix // <hr> // // The HybridMatrix class template combines the flexibility of a dynamically sized matrix with // the efficiency and performance of a fixed size matrix. It is implemented as a crossing between // the blaze::StaticMatrix and the blaze::DynamicMatrix class templates: Similar to the static // matrix it uses static stack memory instead of dynamically allocated memory and similar to the // dynamic matrix it can be resized (within the extend of the static memory). It can be included // via the header file \code #include <blaze/math/HybridMatrix.h> \endcode // The type of the elements, the maximum number of rows and columns and the storage order of the // matrix can be specified via the four template parameters: \code template< typename Type, size_t M, size_t N, bool SO > class HybridMatrix; \endcode // - Type: specifies the type of the matrix elements. HybridMatrix can be used with any // non-cv-qualified, non-reference, non-pointer element type. // - M : specifies the maximum number of rows of the matrix. // - N : specifies the maximum number of columns of the matrix. Note that it is expected // that HybridMatrix is only used for tiny and small matrices. // - SO : specifies the storage order (blaze::rowMajor, blaze::columnMajor) of the matrix. // The default value is blaze::rowMajor. // // The blaze::HybridMatrix is a suitable choice for small to medium matrices, whose dimensions // are not known at compile time or not fixed at runtime, but whose maximum dimensions are known // at compile time: \code // Definition of a 3x4 integral row-major matrix with maximum dimensions of 6x8 blaze::HybridMatrix<int,6UL,8UL> A( 3UL, 4UL ); // Definition of a 4x6 single precision row-major matrix with maximum dimensions of 12x16 blaze::HybridMatrix<float,12UL,16UL,blaze::rowMajor> B( 4UL, 6UL ); // Definition of a 0x0 double precision column-major matrix and maximum dimensions of 6x6 blaze::HybridMatrix<double,6UL,6UL,blaze::columnMajor> C; \endcode // \n \section matrix_types_custom_matrix CustomMatrix // <hr> // // The blaze::CustomMatrix class template provides the functionality to represent an external // array of elements of arbitrary type and a fixed size as a native \b Blaze dense matrix data // structure. Thus in contrast to all other dense matrix types a custom matrix does not perform // any kind of memory allocation by itself, but it is provided with an existing array of element // during construction. A custom matrix can therefore be considered an alias to the existing // array. It can be included via the header file \code #include <blaze/math/CustomMatrix.h> \endcode // The type of the elements, the properties of the given array of elements and the storage order // of the matrix can be specified via the following four template parameters: \code template< typename Type, bool AF, bool PF, bool SO > class CustomMatrix; \endcode // - Type: specifies the type of the matrix elements. blaze::CustomMatrix can be used with // any non-cv-qualified, non-reference, non-pointer element type. // - AF : specifies whether the represented, external arrays are properly aligned with // respect to the available instruction set (SSE, AVX, ...) or not. // - PF : specified whether the represented, external arrays are properly padded with // respect to the available instruction set (SSE, AVX, ...) or not. // - SO : specifies the storage order (blaze::rowMajor, blaze::columnMajor) of the matrix. // The default value is blaze::rowMajor. // // The blaze::CustomMatrix is the right choice if any external array needs to be represented as // a \b Blaze dense matrix data structure or if a custom memory allocation strategy needs to be // realized: \code using blaze::CustomMatrix; using blaze::Deallocate; using blaze::aligned; using blaze::unaligned; using blaze::padded; using blaze::unpadded; // Definition of an unmanaged 3x4 custom matrix for unaligned, unpadded integer arrays using UnalignedUnpadded = CustomMatrix<int,unaligned,unpadded,rowMajor>; std::vector<int> vec( 12UL ) UnalignedUnpadded A( &vec[0], 3UL, 4UL ); // Definition of a managed 5x6 custom matrix for unaligned but padded 'float' arrays using UnalignedPadded = CustomMatrix<float,unaligned,padded,columnMajor>; std::unique_ptr<float[]> memory1( new float[40] ); UnalignedPadded B( memory1.get(), 5UL, 6UL, 8UL ); // Definition of a managed 12x13 custom matrix for aligned, unpadded 'double' arrays using AlignedUnpadded = CustomMatrix<double,aligned,unpadded,rowMajor>; std::unique_ptr<double[],Deallocate> memory2( blaze::allocate<double>( 192UL ) ); AlignedUnpadded C( memory2.get(), 12UL, 13UL, 16UL ); // Definition of a 7x14 custom matrix for aligned, padded 'complex<double>' arrays using cplx = complex<double>; using AlignedPadded = CustomMatrix<cplx,aligned,padded,columnMajor>; std::unique_ptr<cplx[],Deallocate> memory3( blaze::allocate<cplx>( 112UL ) ); AlignedPadded D( memory3.get(), 7UL, 14UL, 16UL ); \endcode // In comparison with the remaining \b Blaze dense matrix types blaze::CustomMatrix has several // special characteristics. All of these result from the fact that a custom matrix is not // performing any kind of memory allocation, but instead is given an existing array of elements. // The following sections discuss all of these characteristics: // // -# <b>\ref matrix_types_custom_matrix_memory_management</b> // -# <b>\ref matrix_types_custom_matrix_copy_operations</b> // -# <b>\ref matrix_types_custom_matrix_alignment</b> // -# <b>\ref matrix_types_custom_matrix_padding</b> // // \n \subsection matrix_types_custom_matrix_memory_management Memory Management // // The blaze::CustomMatrix class template acts as an adaptor for an existing array of elements. As // such it provides everything that is required to use the array just like a native \b Blaze dense // matrix data structure. However, this flexibility comes with the price that the user of a custom // matrix is responsible for the resource management. // // The following examples give an impression of several possible types of custom matrices: \code using blaze::CustomMatrix; using blaze::Deallocate; using blaze::allocate; using blaze::aligned; using blaze::unaligned; using blaze::padded; using blaze::unpadded; // Definition of a 3x4 custom row-major matrix with unaligned, unpadded and externally // managed integer array. Note that the std::vector must be guaranteed to outlive the // custom matrix! std::vector<int> vec( 12UL ); CustomMatrix<int,unaligned,unpadded> A( &vec[0], 3UL, 4UL ); // Definition of a custom 8x12 matrix for an aligned and padded integer array of // capacity 128 (including 8 padding elements per row). Note that the std::unique_ptr // must be guaranteed to outlive the custom matrix! std::unique_ptr<int[],Deallocate> memory( allocate<int>( 128UL ) ); CustomMatrix<int,aligned,padded> B( memory.get(), 8UL, 12UL, 16UL ); \endcode // \n \subsection matrix_types_custom_matrix_copy_operations Copy Operations // // As with all dense matrices it is possible to copy construct a custom matrix: \code using blaze::CustomMatrix; using blaze::unaligned; using blaze::unpadded; using CustomType = CustomMatrix<int,unaligned,unpadded>; std::vector<int> vec( 6UL, 10 ); // Vector of 6 integers of the value 10 CustomType A( &vec[0], 2UL, 3UL ); // Represent the std::vector as Blaze dense matrix a[1] = 20; // Also modifies the std::vector CustomType B( a ); // Creating a copy of vector a b[2] = 20; // Also affects matrix A and the std::vector \endcode // It is important to note that a custom matrix acts as a reference to the specified array. Thus // the result of the copy constructor is a new custom matrix that is referencing and representing // the same array as the original custom matrix. // // In contrast to copy construction, just as with references, copy assignment does not change // which array is referenced by the custom matrices, but modifies the values of the array: \code std::vector<int> vec2( 6UL, 4 ); // Vector of 6 integers of the value 4 CustomType C( &vec2[0], 2UL, 3UL ); // Represent the std::vector as Blaze dense matrix A = C; // Copy assignment: Set all values of matrix A and B to 4. \endcode // \n \subsection matrix_types_custom_matrix_alignment Alignment // // In case the custom matrix is specified as \c aligned the passed array must adhere to some // alignment restrictions based on the alignment requirements of the used data type and the // used instruction set (SSE, AVX, ...). The restriction applies to the first element of each // row/column: In case of a row-major matrix the first element of each row must be properly // aligned, in case of a column-major matrix the first element of each column must be properly // aligned. For instance, if a row-major matrix is used and AVX is active the first element of // each row must be 32-bit aligned: \code using blaze::CustomMatrix; using blaze::Deallocate; using blaze::allocate; using blaze::aligned; using blaze::padded; using blaze::rowMajor; // Allocation of 32-bit aligned memory std::unique_ptr<int[],Deallocate> memory( allocate<int>( 40UL ) ); CustomMatrix<int,aligned,padded,rowMajor> A( memory.get(), 5UL, 6UL, 8UL ); \endcode // In the example, the row-major matrix has six columns. However, since with AVX eight integer // values are loaded together the matrix is padded with two additional elements. This guarantees // that the first element of each row is 32-bit aligned. In case the alignment requirements are // violated, a \c std::invalid_argument exception is thrown. // // \n \subsection matrix_types_custom_matrix_padding Padding // // Adding padding elements to the end of each row/column can have a significant impact on the // performance. For instance, assuming that AVX is available, then two aligned, padded, 3x3 double // precision matrices can be added via three SIMD addition operations: \code using blaze::CustomMatrix; using blaze::Deallocate; using blaze::allocate; using blaze::aligned; using blaze::padded; using CustomType = CustomMatrix<double,aligned,padded>; std::unique_ptr<int[],Deallocate> memory1( allocate<double>( 12UL ) ); std::unique_ptr<int[],Deallocate> memory2( allocate<double>( 12UL ) ); std::unique_ptr<int[],Deallocate> memory3( allocate<double>( 12UL ) ); // Creating padded custom 3x3 matrix with an additional padding element in each row CustomType A( memory1.get(), 3UL, 3UL, 4UL ); CustomType B( memory2.get(), 3UL, 3UL, 4UL ); CustomType C( memory3.get(), 3UL, 3UL, 4UL ); // ... Initialization C = A + B; // AVX-based matrix addition \endcode // In this example, maximum performance is possible. However, in case no padding elements are // inserted a scalar addition has to be used: \code using blaze::CustomMatrix; using blaze::Deallocate; using blaze::allocate; using blaze::aligned; using blaze::unpadded; using CustomType = CustomMatrix<double,aligned,unpadded>; std::unique_ptr<int[],Deallocate> memory1( allocate<double>( 9UL ) ); std::unique_ptr<int[],Deallocate> memory2( allocate<double>( 9UL ) ); std::unique_ptr<int[],Deallocate> memory3( allocate<double>( 9UL ) ); // Creating unpadded custom 3x3 matrix CustomType A( memory1.get(), 3UL, 3UL ); CustomType B( memory2.get(), 3UL, 3UL ); CustomType C( memory3.get(), 3UL, 3UL ); // ... Initialization C = A + B; // Scalar matrix addition \endcode // Note that the construction of padded and unpadded aligned matrices looks identical. However, // in case of padded matrices, \b Blaze will zero initialize the padding element and use them // in all computations in order to achieve maximum performance. In case of an unpadded matrix // \b Blaze will ignore the elements with the downside that it is not possible to load a complete // row to an AVX register, which makes it necessary to fall back to a scalar addition. // // The number of padding elements is required to be sufficient with respect to the available // instruction set: In case of an aligned padded custom matrix the added padding elements must // guarantee that the total number of elements in each row/column is a multiple of the SIMD // vector width. In case of an unaligned padded matrix the number of padding elements can be // greater or equal the number of padding elements of an aligned padded custom matrix. In case // the padding is insufficient with respect to the available instruction set, a // \c std::invalid_argument exception is thrown. // // // \n \section matrix_types_compressed_matrix CompressedMatrix // <hr> // // The blaze::CompressedMatrix class template is the representation of an arbitrary sized sparse // matrix with \f$ M \cdot N \f$ dynamically allocated elements of arbitrary type. It can be // included via the header file \code #include <blaze/math/CompressedMatrix.h> \endcode // The type of the elements and the storage order of the matrix can be specified via the two // template parameters: \code template< typename Type, bool SO > class CompressedMatrix; \endcode // - \c Type: specifies the type of the matrix elements. CompressedMatrix can be used with // any non-cv-qualified, non-reference, non-pointer element type. // - \c SO : specifies the storage order (blaze::rowMajor, blaze::columnMajor) of the matrix. // The default value is blaze::rowMajor. // // The blaze::CompressedMatrix is the right choice for all kinds of sparse matrices: \code // Definition of a 3x4 integral row-major matrix blaze::CompressedMatrix<int> A( 3UL, 4UL ); // Definition of a 4x6 single precision row-major matrix blaze::CompressedMatrix<float,blaze::rowMajor> B( 4UL, 6UL ); // Definition of a double precision column-major matrix with 0 rows and columns blaze::CompressedMatrix<double,blaze::columnMajor> C; \endcode // \n \section matrix_types_identity_matrix IdentityMatrix // <hr> // // The blaze::IdentityMatrix class template is the representation of an immutable, arbitrary // sized identity matrix with \f$ N \cdot N \f$ elements of arbitrary type. It can be included // via the header file \code #include <blaze/math/IdentityMatrix.h> \endcode // The type of the elements and the storage order of the matrix can be specified via the two // template parameters: \code template< typename Type, bool SO > class IdentityMatrix; \endcode // - Type: specifies the type of the matrix elements. IdentityMatrix can be used with any // non-cv-qualified, non-reference, non-pointer element type. // - SO : specifies the storage order (blaze::rowMajor, blaze::columnMajor) of the matrix. // The default value is blaze::rowMajor. // // The blaze::IdentityMatrix is the perfect choice to represent an identity matrix: \code // Definition of a 3x3 integral row-major identity matrix blaze::IdentityMatrix<int> A( 3UL ); // Definition of a 6x6 single precision row-major identity matrix blaze::IdentityMatrix<float,blaze::rowMajor> B( 6UL ); // Definition of a double precision column-major identity matrix with 0 rows and columns blaze::IdentityMatrix<double,blaze::columnMajor> C; \endcode // \n Previous: \ref matrices &nbsp; &nbsp; Next: \ref matrix_operations */ //************************************************************************************************* //**Matrix Operations****************************************************************************** /*!\page matrix_operations Matrix Operations // // \tableofcontents // // // \n \section matrix_operations_constructors Constructors // <hr> // // Matrices are just as easy and intuitive to create as vectors. Still, there are a few rules // to be aware of: // - In case the last template parameter (the storage order) is omitted, the matrix is per // default stored in row-major order. // - The elements of a \c StaticMatrix or \c HybridMatrix are default initialized (i.e. built-in // data types are initialized to 0, class types are initialized via the default constructor). // - Newly allocated elements of a \c DynamicMatrix or \c CompressedMatrix remain uninitialized // if they are of built-in type and are default constructed if they are of class type. // // \n \subsection matrix_operations_default_construction Default Construction \code using blaze::StaticMatrix; using blaze::DynamicMatrix; using blaze::CompressedMatrix; // All matrices can be default constructed. Whereas the size of // a StaticMatrix is fixed via the second and third template // parameter, the initial size of a constructed DynamicMatrix // or CompressedMatrix is 0. StaticMatrix<int,2UL,2UL> M1; // Instantiation of a 2x2 integer row-major // matrix. All elements are initialized to 0. DynamicMatrix<float> M2; // Instantiation of a single precision dynamic // row-major matrix with 0 rows and 0 columns. DynamicMatrix<double,columnMajor> M3; // Instantiation of a double precision dynamic // column-major matrix with 0 rows and 0 columns. CompressedMatrix<int> M4; // Instantiation of a compressed integer // row-major matrix of size 0x0. CompressedMatrix<double,columnMajor> M5; // Instantiation of a compressed double precision // column-major matrix of size 0x0. \endcode // \n \subsection matrix_operations_size_construction Construction with Specific Size // // The \c DynamicMatrix, \c HybridMatrix, and \c CompressedMatrix classes offer a constructor // that allows to immediately give the matrices a specific number of rows and columns: \code DynamicMatrix<int> M6( 5UL, 4UL ); // Instantiation of a 5x4 dynamic row-major // matrix. The elements are not initialized. HybridMatrix<double,5UL,9UL> M7( 3UL, 7UL ); // Instantiation of a 3x7 hybrid row-major // matrix. The elements are not initialized. CompressedMatrix<float,columnMajor> M8( 8UL, 6UL ); // Instantiation of an empty 8x6 compressed // column-major matrix. \endcode // Note that dense matrices (in this case \c DynamicMatrix and \c HybridMatrix) immediately // allocate enough capacity for all matrix elements. Sparse matrices on the other hand (in this // example \c CompressedMatrix) merely acquire the size, but don't necessarily allocate memory. // // // \n \subsection matrix_operations_initialization_constructors Initialization Constructors // // All dense matrix classes offer a constructor for a direct, homogeneous initialization of all // matrix elements. In contrast, for sparse matrices the predicted number of non-zero elements // can be specified. \code StaticMatrix<int,4UL,3UL,columnMajor> M9( 7 ); // Instantiation of a 4x3 integer column-major // matrix. All elements are initialized to 7. DynamicMatrix<float> M10( 2UL, 5UL, 2.0F ); // Instantiation of a 2x5 single precision row-major // matrix. All elements are initialized to 2.0F. CompressedMatrix<int> M11( 3UL, 4UL, 4 ); // Instantiation of a 3x4 integer row-major // matrix with capacity for 4 non-zero elements. \endcode // \n \subsection matrix_operations_array_construction Array Construction // // Alternatively, all dense matrix classes offer a constructor for an initialization with a // dynamic or static array. If the matrix is initialized from a dynamic array, the constructor // expects the dimensions of values provided by the array as first and second argument, the // array as third argument. In case of a static array, the fixed size of the array is used: \code const std::unique_ptr<double[]> array1( new double[6] ); // ... Initialization of the dynamic array blaze::StaticMatrix<double,2UL,3UL> M12( 2UL, 3UL, array1.get() ); int array2[2][2] = { { 4, -5 }, { -6, 7 } }; blaze::StaticMatrix<int,2UL,2UL,rowMajor> M13( array2 ); \endcode // \n \subsection matrix_operations_initializer_list_construction // // In addition, all dense and sparse matrix classes can be directly initialized by means of an // initializer list: \code blaze::DynamicMatrix<float,columnMajor> M14{ { 3.1F, 6.4F }, { -0.9F, -1.2F }, { 4.8F, 0.6F } }; blaze::CompressedMatrix<int,rowMajor> M15{ { 3 }, { 1 }, { 0, 2 } }; \endcode // In case of sparse matrices, only the non-zero elements are used to initialize the matrix. // Missing values are considered to be default values. // // \n \subsection matrix_operations_copy_construction Copy Construction // // All dense and sparse matrices can be created as a copy of another dense or sparse matrix. \code StaticMatrix<int,5UL,4UL,rowMajor> M16( M6 ); // Instantiation of the dense row-major matrix M16 // as copy of the dense row-major matrix M6. DynamicMatrix<float,columnMajor> M17( M8 ); // Instantiation of the dense column-major matrix M17 // as copy of the sparse column-major matrix M8. CompressedMatrix<double,columnMajor> M18( M7 ); // Instantiation of the compressed column-major matrix // M18 as copy of the dense row-major matrix M7. CompressedMatrix<float,rowMajor> M19( M8 ); // Instantiation of the compressed row-major matrix // M19 as copy of the compressed column-major matrix M8. \endcode // Note that it is not possible to create a \c StaticMatrix as a copy of a matrix with a different // number of rows and/or columns: \code StaticMatrix<int,4UL,5UL,rowMajor> M20( M6 ); // Runtime error: Number of rows and columns // does not match! StaticMatrix<int,4UL,4UL,columnMajor> M21( M9 ); // Compile time error: Number of columns does // not match! \endcode // \n \section matrix_operations_assignment Assignment // <hr> // // There are several types of assignment to dense and sparse matrices: // \ref matrix_operations_homogeneous_assignment, \ref matrix_operations_array_assignment, // \ref matrix_operations_copy_assignment, and \ref matrix_operations_compound_assignment. // // // \n \subsection matrix_operations_homogeneous_assignment Homogeneous Assignment // // It is possible to assign the same value to all elements of a dense matrix. All dense matrix // classes provide an according assignment operator: \code blaze::StaticMatrix<int,3UL,2UL> M1; blaze::DynamicMatrix<double> M2; // Setting all integer elements of the StaticMatrix to 4 M1 = 4; // Setting all double precision elements of the DynamicMatrix to 3.5 M2 = 3.5 \endcode // \n \subsection matrix_operations_array_assignment Array Assignment // // Dense matrices can also be assigned a static array: \code blaze::StaticMatrix<int,2UL,2UL,rowMajor> M1; blaze::StaticMatrix<int,2UL,2UL,columnMajor> M2; blaze::DynamicMatrix<double> M3; int array1[2][2] = { { 1, 2 }, { 3, 4 } }; double array2[3][2] = { { 3.1, 6.4 }, { -0.9, -1.2 }, { 4.8, 0.6 } }; M1 = array1; M2 = array1; M3 = array2; \endcode // Note that the dimensions of the static array have to match the size of a \c StaticMatrix, // whereas a \c DynamicMatrix is resized according to the array dimensions: \f$ M3 = \left(\begin{array}{*{2}{c}} 3.1 & 6.4 \\ -0.9 & -1.2 \\ 4.8 & 0.6 \\ \end{array}\right)\f$ // \n \subsection matrix_operations_initializer_list_assignment Initializer List Assignment // // Alternatively, it is possible to directly assign an initializer list to a dense or sparse // matrix: \code blaze::DynamicMatrix<double> M1; blaze::CompressedMatrix<int> M2; M1 = { { 3.1, 6.4 }, { -0.9, -1.2 }, { 4.8, 0.6 } }; M2 = { { 1, 0 }, {}, { 0, 1 }, { 2 } }; \endcode // In case of sparse matrices, only the non-zero elements are considered. Missing values are // considered to be default values. // // \n \subsection matrix_operations_copy_assignment Copy Assignment // // All kinds of matrices can be assigned to each other. The only restriction is that since a // \c StaticMatrix cannot change its size, the assigned matrix must match both in the number of // rows and in the number of columns. \code blaze::StaticMatrix<int,3UL,2UL,rowMajor> M1; blaze::DynamicMatrix<int,rowMajor> M2( 3UL, 2UL ); blaze::DynamicMatrix<float,rowMajor> M3( 5UL, 2UL ); blaze::CompressedMatrix<int,rowMajor> M4( 3UL, 2UL ); blaze::CompressedMatrix<float,columnMajor> M5( 3UL, 2UL ); // ... Initialization of the matrices M1 = M2; // OK: Assignment of a 3x2 dense row-major matrix to another 3x2 dense row-major matrix M1 = M4; // OK: Assignment of a 3x2 sparse row-major matrix to a 3x2 dense row-major matrix M1 = M3; // Runtime error: Cannot assign a 5x2 matrix to a 3x2 static matrix M1 = M5; // OK: Assignment of a 3x2 sparse column-major matrix to a 3x2 dense row-major matrix \endcode // \n \subsection matrix_operations_compound_assignment Compound Assignment // // Compound assignment is also available for matrices: addition assignment, subtraction assignment, // and multiplication assignment. In contrast to plain assignment, however, the number of rows // and columns of the two operands have to match according to the arithmetic operation. \code blaze::StaticMatrix<int,2UL,3UL,rowMajor> M1; blaze::DynamicMatrix<int,rowMajor> M2( 2UL, 3UL ); blaze::CompressedMatrix<float,columnMajor> M3( 2UL, 3UL ); blaze::CompressedMatrix<float,rowMajor> M4( 2UL, 4UL ); blaze::StaticMatrix<float,2UL,4UL,rowMajor> M5; blaze::CompressedMatrix<float,rowMajor> M6( 3UL, 2UL ); // ... Initialization of the matrices M1 += M2; // OK: Addition assignment between two row-major matrices of the same dimensions M1 -= M3; // OK: Subtraction assignment between between a row-major and a column-major matrix M1 += M4; // Runtime error: No compound assignment between matrices of different size M1 -= M5; // Compilation error: No compound assignment between matrices of different size M2 *= M6; // OK: Multiplication assignment between two row-major matrices \endcode // Note that the multiplication assignment potentially changes the number of columns of the // target matrix: \f$\left(\begin{array}{*{3}{c}} 2 & 0 & 1 \\ 0 & 3 & 2 \\ \end{array}\right) \times \left(\begin{array}{*{2}{c}} 4 & 0 \\ 1 & 0 \\ 0 & 3 \\ \end{array}\right) = \left(\begin{array}{*{2}{c}} 8 & 3 \\ 3 & 6 \\ \end{array}\right)\f$ // Since a \c StaticMatrix cannot change its size, only a square StaticMatrix can be used in a // multiplication assignment with other square matrices of the same dimensions. // // // \n \section matrix_operations_element_access Element Access // <hr> // // \n \subsection matrix_operations_function_call_operator_1 Function Call Operator // // The easiest way to access a specific dense or sparse matrix element is via the function call // operator. The indices to access a matrix are zero-based: \code blaze::DynamicMatrix<int> M1( 4UL, 6UL ); M1(0,0) = 1; M1(0,1) = 3; // ... blaze::CompressedMatrix<double> M2( 5UL, 3UL ); M2(0,2) = 4.1; M2(1,1) = -6.3; \endcode // Since dense matrices allocate enough memory for all contained elements, using the function // call operator on a dense matrix directly returns a reference to the accessed value. In case // of a sparse matrix, if the accessed value is currently not contained in the matrix, the // value is inserted into the matrix prior to returning a reference to the value, which can // be much more expensive than the direct access to a dense matrix. Consider the following // example: \code blaze::CompressedMatrix<int> M1( 4UL, 4UL ); for( size_t i=0UL; i<M1.rows(); ++i ) { for( size_t j=0UL; j<M1.columns(); ++j ) { ... = M1(i,j); } } \endcode // Although the compressed matrix is only used for read access within the for loop, using the // function call operator temporarily inserts 16 non-zero elements into the matrix. Therefore // the preferred way to traverse the non-zero elements of a sparse matrix is to use iterators. // // \n \subsection matrix_operations_iterators Iterators // // All matrices (sparse as well as dense) offer an alternate way via the \c begin(), \c cbegin(), // \c end() and \c cend() functions to traverse all contained elements by iterator. Note that // it is not possible to traverse all elements of the matrix, but that it is only possible to // traverse elements in a row/column-wise fashion. In case of a non-const matrix, \c begin() and // \c end() return an \c Iterator, which allows a manipulation of the non-zero value, in case of // a constant matrix or in case \c cbegin() or \c cend() are used a \c ConstIterator is returned: \code using blaze::CompressedMatrix; CompressedMatrix<int,rowMajor> M1( 4UL, 6UL ); // Traversing the matrix by Iterator for( size_t i=0UL; i<A.rows(); ++i ) { for( CompressedMatrix<int,rowMajor>::Iterator it=A.begin(i); it!=A.end(i); ++it ) { it->value() = ...; // OK: Write access to the value of the non-zero element. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the non-zero element. } } // Traversing the matrix by ConstIterator for( size_t i=0UL; i<A.rows(); ++i ) { for( CompressedMatrix<int,rowMajor>::ConstIterator it=A.cbegin(i); it!=A.cend(i); ++it ) { it->value() = ...; // Compilation error: Assignment to the value via a ConstIterator is invalid. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the non-zero element. } } \endcode // Note that \c begin(), \c cbegin(), \c end(), and \c cend() are also available as free functions: \code for( size_t i=0UL; i<A.rows(); ++i ) { for( CompressedMatrix<int,rowMajor>::Iterator it=begin( A, i ); it!=end( A, i ); ++it ) { // ... } } for( size_t i=0UL; i<A.rows(); ++i ) { for( CompressedMatrix<int,rowMajor>::ConstIterator it=cbegin( A, i ); it!=cend( A, i ); ++it ) { // ... } } \endcode // \n \section matrix_operations_element_insertion Element Insertion // <hr> // // Whereas a dense matrix always provides enough capacity to store all matrix elements, a sparse // matrix only stores the non-zero elements. Therefore it is necessary to explicitly add elements // to the matrix. // // \n \subsection matrix_operations_function_call_operator_2 Function Call Operator // // The first possibility to add elements to a sparse matrix is the function call operator: \code using blaze::CompressedMatrix; CompressedMatrix<int> M1( 3UL, 4UL ); M1(1,2) = 9; \endcode // In case the element at the given position is not yet contained in the sparse matrix, it is // automatically inserted. Otherwise the old value is replaced by the new value 2. The operator // returns a reference to the sparse vector element. // // \n \subsection matrix_operations_set .set() // // An alternative to the function call operator is the \c set() function: In case the element is // not yet contained in the matrix the element is inserted, else the element's value is modified: \code // Insert or modify the value at position (2,0) M1.set( 2, 0, 1 ); \endcode // \n \subsection matrix_operations_insert .insert() // The insertion of elements can be better controlled via the \c insert() function. In contrast // to the function call operator and the \c set() function it emits an exception in case the // element is already contained in the matrix. In order to check for this case, the \c find() // function can be used: \code // In case the element at position (2,3) is not yet contained in the matrix it is inserted // with a value of 4. if( M1.find( 2, 3 ) == M1.end( 2 ) ) M1.insert( 2, 3, 4 ); \endcode // \n \subsection matrix_operations_append .append() // // Although the \c insert() function is very flexible, due to performance reasons it is not // suited for the setup of large sparse matrices. A very efficient, yet also very low-level // way to fill a sparse matrix is the \c append() function. It requires the sparse matrix to // provide enough capacity to insert a new element in the specified row/column. Additionally, // the index of the new element must be larger than the index of the previous element in the // same row/column. Violating these conditions results in undefined behavior! \code M1.reserve( 0, 3 ); // Reserving space for three non-zero elements in row 0 M1.append( 0, 1, 2 ); // Appending the element 2 in row 0 at column index 1 M1.append( 0, 2, -4 ); // Appending the element -4 in row 0 at column index 2 // ... \endcode // The most efficient way to fill a sparse matrix with elements, however, is a combination of // \c reserve(), \c append(), and the \c finalize() function: \code // Setup of the compressed row-major matrix // // ( 0 1 0 2 0 ) // A = ( 0 0 0 0 0 ) // ( 3 0 0 0 0 ) // blaze::CompressedMatrix<int> M1( 3UL, 5UL ); M1.reserve( 3 ); // Reserving enough space for 3 non-zero elements M1.append( 0, 1, 1 ); // Appending the value 1 in row 0 with column index 1 M1.append( 0, 3, 2 ); // Appending the value 2 in row 0 with column index 3 M1.finalize( 0 ); // Finalizing row 0 M1.finalize( 1 ); // Finalizing the empty row 1 to prepare row 2 M1.append( 2, 0, 3 ); // Appending the value 3 in row 2 with column index 0 M1.finalize( 2 ); // Finalizing row 2 \endcode // \note The \c finalize() function has to be explicitly called for each row or column, even // for empty ones! // \note Although \c append() does not allocate new memory, it still invalidates all iterators // returned by the \c end() functions! // // // \n \section matrix_operations_element_removal Element Removal // <hr> // // \subsection matrix_operations_erase .erase() // // The \c erase() member functions can be used to remove elements from a sparse matrix. The // following example gives an impression of the five different flavors of \c erase(): \code using blaze::CompressedMatrix; CompressedMatrix<int,rowMajor> A( 42, 53 ); // ... Initialization of the matrix // Erasing the element at position (21,23) A.erase( 21, 23 ); // Erasing a single element in row 17 via iterator A.erase( 17, A.find( 4 ) ); // Erasing all non-zero elements in the range [7..24] of row 33 A.erase( 33, A.lowerBound( 33, 7 ), A.upperBound( 33, 24 ) ); // Erasing all non-zero elements with a value larger than 9 by passing a unary predicate A.erase( []( int i ){ return i > 9; } ); // Erasing all non-zero elements in the range [30..40] of row 37 with a value larger than 5 CompressedMatrix<int,rowMajor>::Iterator pos1( A.lowerBound( 37, 30 ) ); CompressedMatrix<int,rowMajor>::Iterator pos2( A.upperBound( 37, 40 ) ); A.erase( 37, pos1, pos2, []( int i ){ return i > 5; } ); \endcode // \n \section matrix_operations_element_lookup Element Lookup // <hr> // // A sparse matrix only stores the non-zero elements contained in the matrix. Therefore, whenever // accessing a matrix element at a specific position a lookup operation is required. Whereas the // function call operator is performing this lookup automatically, it is also possible to use the // \c find(), \c lowerBound(), and \c upperBound() member functions for a manual lookup. // // \n \subsection matrix_operations_find .find() // // The \c find() function can be used to check whether a specific element is contained in the // sparse matrix. It specifically searches for the element at the specified position. In case // the element is found, the function returns an iterator to the element. Otherwise an iterator // just past the last non-zero element of the according row or column (the \c end() iterator) // is returned. Note that the returned iterator is subject to invalidation due to inserting // operations via the function call operator, the \c set() function or the \c insert() function! \code using blaze::CompressedMatrix; CompressedMatrix<int,rowMajor> A( 42, 53 ); // ... Initialization of the matrix // Searching the element at position (7,17). In case the element is not // contained in the vector, the end() iterator of row 7 is returned. CompressedMatrix<int,rowMajor>::Iterator pos( A.find( 7, 17 ) ); if( pos != A.end( 7 ) ) { // ... } \endcode // \n \subsection matrix_operations_lowerbound .lowerBound() // // In case of a row-major matrix, this function returns a row iterator to the first element with // an index not less then the given column index. In case of a column-major matrix, the function // returns a column iterator to the first element with an index not less then the given row // index. In combination with the \c upperBound() function this function can be used to create a // pair of iterators specifying a range of indices. Note that the returned iterator is subject // to invalidation due to inserting operations via the function call operator, the \c set() // function or the \c insert() function! \code using blaze::CompressedMatrix; CompressedMatrix<int,rowMajor> A( 42, 53 ); // ... Initialization of the matrix // Searching the lower bound of column index 17 in row 7. CompressedMatrix<int,rowMajor>::Iterator pos1( A.lowerBound( 7, 17 ) ); // Searching the upper bound of column index 28 in row 7 CompressedMatrix<int,rowMajor>::Iterator pos2( A.upperBound( 7, 28 ) ); // Erasing all elements in the specified range A.erase( 7, pos1, pos2 ); \endcode // \n \subsection matrix_operations_upperbound .upperBound() // // In case of a row-major matrix, this function returns a row iterator to the first element with // an index greater then the given column index. In case of a column-major matrix, the function // returns a column iterator to the first element with an index greater then the given row // index. In combination with the \c lowerBound() function this function can be used to create a // pair of iterators specifying a range of indices. Note that the returned iterator is subject // to invalidation due to inserting operations via the function call operator, the \c set() // function or the \c insert() function! \code using blaze::CompressedMatrix; CompressedMatrix<int,columnMajor> A( 42, 53 ); // ... Initialization of the matrix // Searching the lower bound of row index 17 in column 9. CompressedMatrix<int,columnMajor>::Iterator pos1( A.lowerBound( 17, 9 ) ); // Searching the upper bound of row index 28 in column 9 CompressedMatrix<int,columnMajor>::Iterator pos2( A.upperBound( 28, 9 ) ); // Erasing all elements in the specified range A.erase( 9, pos1, pos2 ); \endcode // \n \section matrix_operations_non_modifying_operations Non-Modifying Operations // <hr> // // \subsection matrix_operations_rows .rows() / rows() // // The current number of rows of a matrix can be acquired via the \c rows() member function: \code // Instantiating a dynamic matrix with 10 rows and 8 columns blaze::DynamicMatrix<int> M1( 10UL, 8UL ); M1.rows(); // Returns 10 // Instantiating a compressed matrix with 8 rows and 12 columns blaze::CompressedMatrix<double> M2( 8UL, 12UL ); M2.rows(); // Returns 8 \endcode // Alternatively, the free functions \c rows() can be used to query the current number of rows of // a matrix. In contrast to the member function, the free function can also be used to query the // number of rows of a matrix expression: \code rows( M1 ); // Returns 10, i.e. has the same effect as the member function rows( M2 ); // Returns 8, i.e. has the same effect as the member function rows( M1 * M2 ); // Returns 10, i.e. the number of rows of the resulting matrix \endcode // \n \subsection matrix_operations_columns .columns() / columns() // // The current number of columns of a matrix can be acquired via the \c columns() member function: \code // Instantiating a dynamic matrix with 6 rows and 8 columns blaze::DynamicMatrix<int> M1( 6UL, 8UL ); M1.columns(); // Returns 8 // Instantiating a compressed matrix with 8 rows and 7 columns blaze::CompressedMatrix<double> M2( 8UL, 7UL ); M2.columns(); // Returns 7 \endcode // There is also a free function \c columns() available, which can also be used to query the number // of columns of a matrix expression: \code columns( M1 ); // Returns 8, i.e. has the same effect as the member function columns( M2 ); // Returns 7, i.e. has the same effect as the member function columns( M1 * M2 ); // Returns 7, i.e. the number of columns of the resulting matrix \endcode // \subsection matrix_operations_size size() // // The \c size() function returns the total number of elements of a matrix: \code // Instantiating a dynamic matrix with 6 rows and 8 columns blaze::DynamicMatrix<int> M1( 6UL, 8UL ); size( M1 ); // Returns 48 // Instantiating a compressed matrix with 8 rows and 7 columns blaze::CompressedMatrix<double> M2( 8UL, 7UL ); size( M2 ); // Returns 56 \endcode // \subsection matrix_operations_spacing .spacing() / spacing() // // The total number of elements of a row or column of a dense matrix, including potential padding // elements, can be acquired via the \c spacing member function. In case of a row-major matrix // (i.e. in case the storage order is set to blaze::rowMajor) the function returns the spacing // between two rows, in case of a column-major matrix (i.e. in case the storage flag is set to // blaze::columnMajor) the function returns the spacing between two columns: \code // Instantiating a row-major dynamic matrix with 7 rows and 8 columns blaze::DynamicMatrix<int,blaze::rowMajor> M1( 7UL, 8UL ); M1.spacing(); // Returns the total number of elements in a row // Instantiating a column-major dynamic matrix with 8 rows and 12 columns blaze::CompressedMatrix<double> M2( 8UL, 12UL ); M2.spacing(); // Returns the total number of element in a column \endcode // Alternatively, the free functions \c spacing() can be used to query the current number of // elements in a row/column. \code spacing( M1 ); // Returns the total number of elements in a row spacing( M2 ); // Returns the total number of elements in a column \endcode // \n \subsection matrix_operations_capacity .capacity() / capacity() // // The \c capacity() member function returns the internal capacity of a dense or sparse matrix. // Note that the capacity of a matrix doesn't have to be equal to the size of a matrix. In case of // a dense matrix the capacity will always be greater or equal than the total number of elements // of the matrix. In case of a sparse matrix, the capacity will usually be much less than the // total number of elements. \code blaze::DynamicMatrix<float> M1( 5UL, 7UL ); blaze::StaticMatrix<float,7UL,4UL> M2; M1.capacity(); // Returns at least 35 M2.capacity(); // Returns at least 28 \endcode // There is also a free function \c capacity() available to query the capacity. However, please // note that this function cannot be used to query the capacity of a matrix expression: \code capacity( M1 ); // Returns at least 35, i.e. has the same effect as the member function capacity( M2 ); // Returns at least 28, i.e. has the same effect as the member function capacity( M1 * M2 ); // Compilation error! \endcode // \n \subsection matrix_operations_nonzeros .nonZeros() / nonZeros() // // For both dense and sparse matrices the current number of non-zero elements can be queried // via the \c nonZeros() member function. In case of matrices there are two flavors of the // \c nonZeros() function: One returns the total number of non-zero elements in the matrix, // the second returns the number of non-zero elements in a specific row (in case of a row-major // matrix) or column (in case of a column-major matrix). Sparse matrices directly return their // number of non-zero elements, dense matrices traverse their elements and count the number of // non-zero elements. \code blaze::DynamicMatrix<int,rowMajor> M1( 3UL, 5UL ); // ... Initializing the dense matrix M1.nonZeros(); // Returns the total number of non-zero elements in the dense matrix M1.nonZeros( 2 ); // Returns the number of non-zero elements in row 2 \endcode \code blaze::CompressedMatrix<double,columnMajor> M2( 4UL, 7UL ); // ... Initializing the sparse matrix M2.nonZeros(); // Returns the total number of non-zero elements in the sparse matrix M2.nonZeros( 3 ); // Returns the number of non-zero elements in column 3 \endcode // The free \c nonZeros() function can also be used to query the number of non-zero elements in a // matrix expression. However, the result is not the exact number of non-zero elements, but may be // a rough estimation: \code nonZeros( M1 ); // Has the same effect as the member function nonZeros( M1, 2 ); // Has the same effect as the member function nonZeros( M2 ); // Has the same effect as the member function nonZeros( M2, 3 ); // Has the same effect as the member function nonZeros( M1 * M2 ); // Estimates the number of non-zero elements in the matrix expression \endcode // \n \subsection matrix_operations_isempty isEmpty() // // The \c isEmpty() function returns whether the total number of elements of the matrix is zero: \code blaze::DynamicMatrix<int> A; // Create an empty matrix isEmpty( A ); // Returns true a.resize( 5, 0 ); // Resize to a 5x0 matrix isEmpty( A ); // Returns true a.resize( 5, 3 ); // Resize to a 5x3 matrix isEmpty( A ); // Returns false \endcode // \n \subsection matrix_operations_isnan isnan() // // The \c isnan() function provides the means to check a dense or sparse matrix for non-a-number // elements: \code blaze::DynamicMatrix<double> A( 3UL, 4UL ); // ... Initialization if( isnan( A ) ) { ... } \endcode \code blaze::CompressedMatrix<double> A( 3UL, 4UL ); // ... Initialization if( isnan( A ) ) { ... } \endcode // If at least one element of the matrix is not-a-number, the function returns \c true, otherwise // it returns \c false. Please note that this function only works for matrices with floating point // elements. The attempt to use it for a matrix with a non-floating point element type results in // a compile time error. // // // \n \subsection matrix_operations_isdefault isDefault() // // The \c isDefault() function returns whether the given dense or sparse matrix is in default state: \code blaze::HybridMatrix<int,5UL,4UL> A; // ... Resizing and initialization if( isDefault( A ) ) { ... } \endcode // A matrix is in default state if it appears to just have been default constructed. All resizable // matrices (\c HybridMatrix, \c DynamicMatrix, or \c CompressedMatrix) and \c CustomMatrix are in // default state if its size is equal to zero. A non-resizable matrix (\c StaticMatrix and all // submatrices) is in default state if all its elements are in default state. For instance, in case // the matrix is instantiated for a built-in integral or floating point data type, the function // returns \c true in case all matrix elements are 0 and \c false in case any matrix element is // not 0. // // // \n \subsection matrix_operations_isSquare isSquare() // // Whether a dense or sparse matrix is a square matrix (i.e. if the number of rows is equal to the // number of columns) can be checked via the \c isSquare() function: \code blaze::DynamicMatrix<double> A; // ... Resizing and initialization if( isSquare( A ) ) { ... } \endcode // \n \subsection matrix_operations_issymmetric isSymmetric() // // Via the \c isSymmetric() function it is possible to check whether a dense or sparse matrix // is symmetric: \code blaze::DynamicMatrix<float> A; // ... Resizing and initialization if( isSymmetric( A ) ) { ... } \endcode // Note that non-square matrices are never considered to be symmetric! // // // \n \subsection matrix_operations_isUniform isUniform() // // In order to check if all matrix elements are identical, the \c isUniform function can be used: \code blaze::DynamicMatrix<int> A; // ... Resizing and initialization if( isUniform( A ) ) { ... } \endcode // Note that in case of a sparse matrix also the zero elements are also taken into account! // // // \n \subsection matrix_operations_islower isLower() // // Via the \c isLower() function it is possible to check whether a dense or sparse matrix is // lower triangular: \code blaze::DynamicMatrix<float> A; // ... Resizing and initialization if( isLower( A ) ) { ... } \endcode // Note that non-square matrices are never considered to be lower triangular! // // // \n \subsection matrix_operations_isunilower isUniLower() // // Via the \c isUniLower() function it is possible to check whether a dense or sparse matrix is // lower unitriangular: \code blaze::DynamicMatrix<float> A; // ... Resizing and initialization if( isUniLower( A ) ) { ... } \endcode // Note that non-square matrices are never considered to be lower unitriangular! // // // \n \subsection matrix_operations_isstrictlylower isStrictlyLower() // // Via the \c isStrictlyLower() function it is possible to check whether a dense or sparse matrix // is strictly lower triangular: \code blaze::DynamicMatrix<float> A; // ... Resizing and initialization if( isStrictlyLower( A ) ) { ... } \endcode // Note that non-square matrices are never considered to be strictly lower triangular! // // // \n \subsection matrix_operations_isUpper isUpper() // // Via the \c isUpper() function it is possible to check whether a dense or sparse matrix is // upper triangular: \code blaze::DynamicMatrix<float> A; // ... Resizing and initialization if( isUpper( A ) ) { ... } \endcode // Note that non-square matrices are never considered to be upper triangular! // // // \n \subsection matrix_operations_isuniupper isUniUpper() // // Via the \c isUniUpper() function it is possible to check whether a dense or sparse matrix is // upper unitriangular: \code blaze::DynamicMatrix<float> A; // ... Resizing and initialization if( isUniUpper( A ) ) { ... } \endcode // Note that non-square matrices are never considered to be upper unitriangular! // // // \n \subsection matrix_operations_isstrictlyupper isStrictlyUpper() // // Via the \c isStrictlyUpper() function it is possible to check whether a dense or sparse matrix // is strictly upper triangular: \code blaze::DynamicMatrix<float> A; // ... Resizing and initialization if( isStrictlyUpper( A ) ) { ... } \endcode // Note that non-square matrices are never considered to be strictly upper triangular! // // // \n \subsection matrix_operations_isdiagonal isDiagonal() // // The \c isDiagonal() function checks if the given dense or sparse matrix is a diagonal matrix, // i.e. if it has only elements on its diagonal and if the non-diagonal elements are default // elements: \code blaze::CompressedMatrix<float> A; // ... Resizing and initialization if( isDiagonal( A ) ) { ... } \endcode // Note that non-square matrices are never considered to be diagonal! // // // \n \subsection matrix_operations_isidentity isIdentity() // // The \c isIdentity() function checks if the given dense or sparse matrix is an identity matrix, // i.e. if all diagonal elements are 1 and all non-diagonal elements are 0: \code blaze::CompressedMatrix<float> A; // ... Resizing and initialization if( isIdentity( A ) ) { ... } \endcode // Note that non-square matrices are never considered to be identity matrices! // // // \n \subsection matrix_operations_matrix_determinant det() // // The determinant of a square dense matrix can be computed by means of the \c det() function: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization double d = det( A ); // Compute the determinant of A \endcode // In case the given dense matrix is not a square matrix, a \c std::invalid_argument exception is // thrown. // // \note The \c det() function can only be used for dense matrices with \c float, \c double, // \c complex<float> or \c complex<double> element type. The attempt to call the function with // matrices of any other element type or with a sparse matrix results in a compile time error! // // \note The function is depending on LAPACK kernels. Thus the function can only be used if the // fitting LAPACK library is available and linked to the executable. Otherwise a linker error // will be created. // // // \n \subsection matrix_operations_matrix_trans trans() // // Matrices can be transposed via the \c trans() function. Row-major matrices are transposed into // a column-major matrix and vice versa: \code blaze::DynamicMatrix<int,rowMajor> M1( 5UL, 2UL ); blaze::CompressedMatrix<int,columnMajor> M2( 3UL, 7UL ); M1 = M2; // Assigning a column-major matrix to a row-major matrix M1 = trans( M2 ); // Assigning the transpose of M2 (i.e. a row-major matrix) to M1 M1 += trans( M2 ); // Addition assignment of two row-major matrices \endcode // \n \subsection matrix_operations_ctrans ctrans() // // The conjugate transpose of a dense or sparse matrix (also called adjoint matrix, Hermitian // conjugate, or transjugate) can be computed via the \c ctrans() function: \code blaze::DynamicMatrix< complex<float>, rowMajor > M1( 5UL, 2UL ); blaze::CompressedMatrix< complex<float>, columnMajor > M2( 2UL, 5UL ); M1 = ctrans( M2 ); // Compute the conjugate transpose matrix \endcode // Note that the \c ctrans() function has the same effect as manually applying the \c conj() and // \c trans() function in any order: \code M1 = trans( conj( M2 ) ); // Computing the conjugate transpose matrix M1 = conj( trans( M2 ) ); // Computing the conjugate transpose matrix \endcode // \n \subsection matrix_operations_matrix_evaluate eval() / evaluate() // // The \c evaluate() function forces an evaluation of the given matrix expression and enables // an automatic deduction of the correct result type of an operation. The following code example // demonstrates its intended use for the multiplication of a lower and a strictly lower dense // matrix: \code using blaze::DynamicMatrix; using blaze::LowerMatrix; using blaze::StrictlyLowerMatrix; LowerMatrix< DynamicMatrix<double> > A; StrictlyLowerMatrix< DynamicMatrix<double> > B; // ... Resizing and initialization auto C = evaluate( A * B ); \endcode // In this scenario, the \c evaluate() function assists in deducing the exact result type of // the operation via the \c auto keyword. Please note that if \c evaluate() is used in this // way, no temporary matrix is created and no copy operation is performed. Instead, the result // is directly written to the target matrix due to the return value optimization (RVO). However, // if \c evaluate() is used in combination with an explicit target type, a temporary will be // created and a copy operation will be performed if the used type differs from the type // returned from the function: \code StrictlyLowerMatrix< DynamicMatrix<double> > D( A * B ); // No temporary & no copy operation LowerMatrix< DynamicMatrix<double> > E( A * B ); // Temporary & copy operation DynamicMatrix<double> F( A * B ); // Temporary & copy operation D = evaluate( A * B ); // Temporary & copy operation \endcode // Sometimes it might be desirable to explicitly evaluate a sub-expression within a larger // expression. However, please note that \c evaluate() is not intended to be used for this // purpose. This task is more elegantly and efficiently handled by the \c eval() function: \code blaze::DynamicMatrix<double> A, B, C, D; D = A + evaluate( B * C ); // Unnecessary creation of a temporary matrix D = A + eval( B * C ); // No creation of a temporary matrix \endcode // In contrast to the \c evaluate() function, \c eval() can take the complete expression // into account and therefore can guarantee the most efficient way to evaluate it (see also // \ref intra_statement_optimization). // // // \n \section matrix_operations_modifying_operations Modifying Operations // <hr> // // \subsection matrix_operations_resize_reserve .resize() / .reserve() // // The dimensions of a \c StaticMatrix are fixed at compile time by the second and third template // parameter and a \c CustomMatrix cannot be resized. In contrast, the number or rows and columns // of \c DynamicMatrix, \c HybridMatrix, and \c CompressedMatrix can be changed at runtime: \code using blaze::DynamicMatrix; using blaze::CompressedMatrix; DynamicMatrix<int,rowMajor> M1; CompressedMatrix<int,columnMajor> M2( 3UL, 2UL ); // Adapting the number of rows and columns via the resize() function. The (optional) // third parameter specifies whether the existing elements should be preserved. Per // default, the existing elements are preserved. M1.resize( 2UL, 2UL ); // Resizing matrix M1 to 2x2 elements. Elements of built-in type // remain uninitialized, elements of class type are default // constructed. M1.resize( 3UL, 1UL, false ); // Resizing M1 to 3x1 elements. The old elements are lost, the // new elements are NOT initialized! M2.resize( 5UL, 7UL, true ); // Resizing M2 to 5x7 elements. The old elements are preserved. M2.resize( 3UL, 2UL, false ); // Resizing M2 to 3x2 elements. The old elements are lost. \endcode // Note that resizing a matrix invalidates all existing views (see e.g. \ref views_submatrices) // on the matrix: \code blaze::DynamicMatrix<int,rowMajor> M1( 10UL, 20UL ); // Creating a 10x20 matrix auto row8 = row( M1, 8UL ); // Creating a view on the 8th row of the matrix M1.resize( 6UL, 20UL ); // Resizing the matrix invalidates the view \endcode // When the internal capacity of a matrix is no longer sufficient, the allocation of a larger // junk of memory is triggered. In order to avoid frequent reallocations, the \c reserve() // function can be used up front to set the internal capacity: \code blaze::DynamicMatrix<int> M1; M1.reserve( 100 ); M1.rows(); // Returns 0 M1.capacity(); // Returns at least 100 \endcode // Additionally it is possible to reserve memory in a specific row (for a row-major matrix) or // column (for a column-major matrix): \code blaze::CompressedMatrix<int> M1( 4UL, 6UL ); M1.reserve( 1, 4 ); // Reserving enough space for four non-zero elements in row 1 \endcode // \n \subsection matrix_operations_shrinkToFit .shrinkToFit() // // The internal capacity of matrices with dynamic memory is preserved in order to minimize the // number of reallocations. For that reason, the \c resize() and \c reserve() functions can lead // to memory overhead. The \c shrinkToFit() member function can be used to minimize the internal // capacity: \code blaze::DynamicMatrix<int> M1( 100UL, 100UL ); // Create a 100x100 integer matrix M1.resize( 10UL, 10UL ); // Resize to 10x10, but the capacity is preserved M1.shrinkToFit(); // Remove the unused capacity \endcode // Please note that due to padding the capacity might not be reduced exactly to \c rows() times // \c columns(). Please also note that in case a reallocation occurs, all iterators (including // \c end() iterators), all pointers and references to elements of this matrix are invalidated. // // // \subsection matrix_operations_reset_clear reset() / clear // // In order to reset all elements of a dense or sparse matrix, the \c reset() function can be // used. The number of rows and columns of the matrix are preserved: \code // Setting up a single precision row-major matrix, whose elements are initialized with 2.0F. blaze::DynamicMatrix<float> M1( 4UL, 5UL, 2.0F ); // Resetting all elements to 0.0F. reset( M1 ); // Resetting all elements M1.rows(); // Returns 4: size and capacity remain unchanged \endcode // Alternatively, only a single row or column of the matrix can be resetted: \code blaze::DynamicMatrix<int,blaze::rowMajor> M1( 7UL, 6UL, 5 ); // Setup of a row-major matrix blaze::DynamicMatrix<int,blaze::columnMajor> M2( 4UL, 5UL, 4 ); // Setup of a column-major matrix reset( M1, 2UL ); // Resetting the 2nd row of the row-major matrix reset( M2, 3UL ); // Resetting the 3rd column of the column-major matrix \endcode // In order to reset a row of a column-major matrix or a column of a row-major matrix, use a // row or column view (see \ref views_rows and views_colums). // // In order to return a matrix to its default state (i.e. the state of a default constructed // matrix), the \c clear() function can be used: \code // Setting up a single precision row-major matrix, whose elements are initialized with 2.0F. blaze::DynamicMatrix<float> M1( 4UL, 5UL, 2.0F ); // Resetting all elements to 0.0F. clear( M1 ); // Resetting the entire matrix M1.rows(); // Returns 0: size is reset, but capacity remains unchanged \endcode // \n \subsection matrix_operations_matrix_transpose transpose() // // In addition to the non-modifying \c trans() function, matrices can be transposed in-place via // the \c transpose() function: \code blaze::DynamicMatrix<int,rowMajor> M( 5UL, 2UL ); transpose( M ); // In-place transpose operation. M = trans( M ); // Same as above \endcode // Note however that the transpose operation fails if ... // // - ... the given matrix has a fixed size and is non-square; // - ... the given matrix is a triangular matrix; // - ... the given submatrix affects the restricted parts of a triangular matrix; // - ... the given submatrix would cause non-deterministic results in a symmetric/Hermitian matrix. // // // \n \subsection matrix_operations_ctranspose ctranspose() // // The \c ctranspose() function can be used to perform an in-place conjugate transpose operation: \code blaze::DynamicMatrix<int,rowMajor> M( 5UL, 2UL ); ctranspose( M ); // In-place conjugate transpose operation. M = ctrans( M ); // Same as above \endcode // Note however that the conjugate transpose operation fails if ... // // - ... the given matrix has a fixed size and is non-square; // - ... the given matrix is a triangular matrix; // - ... the given submatrix affects the restricted parts of a triangular matrix; // - ... the given submatrix would cause non-deterministic results in a symmetric/Hermitian matrix. // // // \n \subsection matrix_operations_swap swap() // // Via the \c \c swap() function it is possible to completely swap the contents of two matrices // of the same type: \code blaze::DynamicMatrix<int,blaze::rowMajor> M1( 10UL, 15UL ); blaze::DynamicMatrix<int,blaze::rowMajor> M2( 20UL, 10UL ); swap( M1, M2 ); // Swapping the contents of M1 and M2 \endcode // \n \section matrix_operations_arithmetic_operations Arithmetic Operations // <hr> // // \subsection matrix_operations_min_max min() / max() // // The \c min() and \c max() functions can be used for a single vector or multiple vectors. If // passed a single matrix, the functions return the smallest and largest element of the given // dense matrix or the smallest and largest non-zero element of the given sparse matrix, // respectively: \code blaze::StaticMatrix<int,2UL,3UL> A{ { -5, 2, 7 }, { -4, 0, 1 } }; min( A ); // Returns -5 max( A ); // Returns 7 \endcode \code blaze::CompressedMatrix<int> B{ { 1, 0, 3 }, { 0, 0, 0 } }; min( B ); // Returns 1 max( B ); // Returns 3 \endcode // For more information on the unary \c min() and \c max() reduction operations see the // \ref matrix_operations_reduction_operations section. // // If passed two or more dense matrices, the \c min() and \c max() functions compute the // componentwise minimum or maximum of the given matrices, respectively: \code blaze::StaticMatrix<int,2UL,3UL,rowMajor> C{ { -5, 1, -7 }, { 4, 1, 0 } }; blaze::StaticMatrix<int,2UL,3UL,rowMajor> D{ { -5, 3, 0 }, { 2, 2, -2 } }; min( A, C ); // Results in the matrix ( -5, 1, -7 ) ( -4, 0, 0 ) max( A, C, D ); // Results in the matrix ( -5, 3, 7 ) ( 4, 2, 1 ) \endcode // Please note that sparse matrices can only be used in the unary \c min() and \c max() functions. // Also note that all forms of the \c min() and \c max() functions can be used to compute the // smallest and largest element of a matrix expression: \code min( A + B + C ); // Returns -9, i.e. the smallest value of the resulting matrix max( A - B - C ); // Returns 11, i.e. the largest value of the resulting matrix \endcode // \n \subsection matrix_operators_softmax softmax() // // The <a href="https://en.wikipedia.org/wiki/Softmax_function">softmax function</a>, also called // the normalized exponential function, of a given dense matrix can be computed via \c softmax(). // The resulting dense matrix consists of real values in the range (0..1], which add up to 1. \code blaze::StaticMatrix<double,3UL,3UL> A{ { 1.0, 2.0, 3.0 } , { 4.0, 1.0, 2.0 } , { 3.0, 4.0, 1.0 } }; blaze::StaticMatrix<double,3UL,3UL> B; // Evaluating the softmax function B = softmax( A ); // Results in ( 0.0157764 0.0428847 0.116573 ) // ( 0.316878 0.0157764 0.0428847 ) // ( 0.116573 0.316878 0.0157764 ) double s = sum( B ); // Results in 1 \endcode // \n \subsection matrix_operators_trace trace() // // The \c trace() function sums the diagonal elements of a square dense or sparse matrix: \code blaze::StaticMatrix<int,3UL,3UL> A{ { -1, 2, -3 } , { -4, -5, 6 } , { 7, -8, -9 } }; trace( A ); // Returns the sum of the diagonal elements, i.e. -15 \endcode // In case the given matrix is not a square matrix, a \c std::invalid_argument exception is // thrown. // // // \n \subsection matrix_operators_abs abs() // // The \c abs() function can be used to compute the absolute values of each element of a matrix. // For instance, the following computation \code blaze::StaticMatrix<int,2UL,3UL,rowMajor> A{ { -1, 2, -3 }, { 4, -5, 6 } }; blaze::StaticMatrix<int,2UL,3UL,rowMajor> B( abs( A ) ); \endcode // results in the matrix \f$ B = \left(\begin{array}{*{3}{c}} 1 & 2 & 3 \\ 4 & 5 & 6 \\ \end{array}\right)\f$ // \n \subsection matrix_operators_sign sign() // // The \c sign() function can be used to evaluate the sign of each element of a matrix \a A. For // each element \c (i,j) the corresponding result is 1 if \a A(i,j) is greater than zero, 0 if // \a A(i,j) is zero, and -1 if \a A(i,j) is less than zero. For instance, the following use of // the \c sign() function \code blaze::StaticMatrix<int,2UL,3UL,rowMajor> A{ { -1, 2, 0 }, { 4, 0, -6 } }; blaze::StaticMatrix<int,2UL,3UL,rowMajor> B( sign( A ) ); \endcode // results in the matrix \f$ B = \left(\begin{array}{*{3}{c}} -1 & 1 & 0 \\ 1 & 0 & -1 \\ \end{array}\right)\f$ // \n \subsection matrix_operators_rounding_functions floor() / ceil() / trunc() / round() // // The \c floor(), \c ceil(), \c trunc(), and \c round() functions can be used to round down/up // each element of a matrix, respectively: \code blaze::StaticMatrix<double,3UL,3UL> A, B; B = floor( A ); // Rounding down each element of the matrix B = ceil ( A ); // Rounding up each element of the matrix B = trunc( A ); // Truncating each element of the matrix B = round( A ); // Rounding each element of the matrix \endcode // \n \subsection matrix_operators_conj conj() // // The \c conj() function can be applied on a dense or sparse matrix to compute the complex // conjugate of each element of the matrix: \code using blaze::StaticMatrix; using cplx = std::complex<double>; // Creating the matrix // ( (1,0) (-2,-1) ) // ( (1,1) ( 0, 1) ) StaticMatrix<cplx,2UL,2UL> A{ { cplx( 1.0, 0.0 ), cplx( -2.0, -1.0 ) }, { cplx( 1.0, 1.0 ), cplx( 0.0, 1.0 ) } }; // Computing the matrix of conjugate values // ( (1, 0) (-2, 1) ) // ( (1,-1) ( 0,-1) ) StaticMatrix<cplx,2UL,2UL> B; B = conj( A ); \endcode // Additionally, matrices can be conjugated in-place via the \c conjugate() function: \code blaze::DynamicMatrix<cplx> C( 5UL, 2UL ); conjugate( C ); // In-place conjugate operation. C = conj( C ); // Same as above \endcode // \n \subsection matrix_operators_real real() // // The \c real() function can be used on a dense or sparse matrix to extract the real part of // each element of the matrix: \code using blaze::StaticMatrix; using cplx = std::complex<double>; // Creating the matrix // ( (1,0) (-2,-1) ) // ( (1,1) ( 0, 1) ) StaticMatrix<cplx,2UL,2UL> A{ { cplx( 1.0, 0.0 ), cplx( -2.0, -1.0 ) }, { cplx( 1.0, 1.0 ), cplx( 0.0, 1.0 ) } }; // Extracting the real part of each matrix element // ( 1 -2 ) // ( 1 0 ) StaticMatrix<double,2UL,2UL> B; B = real( A ); \endcode // \n \subsection matrix_operators_imag imag() // // The \c imag() function can be used on a dense or sparse matrix to extract the imaginary part // of each element of the matrix: \code using blaze::StaticMatrix; using cplx = std::complex<double>; // Creating the matrix // ( (1,0) (-2,-1) ) // ( (1,1) ( 0, 1) ) StaticMatrix<cplx,2UL,2UL> A{ { cplx( 1.0, 0.0 ), cplx( -2.0, -1.0 ) }, { cplx( 1.0, 1.0 ), cplx( 0.0, 1.0 ) } }; // Extracting the imaginary part of each matrix element // ( 0 -1 ) // ( 1 1 ) StaticMatrix<double,2UL,2UL> B; B = imag( A ); \endcode // \n \subsection matrix_operators_sqrt sqrt() / invsqrt() // // Via the \c sqrt() and \c invsqrt() functions the (inverse) square root of each element of a // matrix can be computed: \code blaze::StaticMatrix<double,3UL,3UL> A, B, C; B = sqrt( A ); // Computes the square root of each element C = invsqrt( A ); // Computes the inverse square root of each element \endcode // Note that in case of sparse matrices only the non-zero elements are taken into account! // // // \n \subsection matrix_operators_cbrt cbrt() / invcbrt() // // The \c cbrt() and \c invcbrt() functions can be used to compute the the (inverse) cubic root // of each element of a matrix: \code blaze::DynamicMatrix<double> A, B, C; B = cbrt( A ); // Computes the cubic root of each element C = invcbrt( A ); // Computes the inverse cubic root of each element \endcode // Note that in case of sparse matrices only the non-zero elements are taken into account! // // // \n \subsection matrix_operations_hypot hypot() // // The \c hypot() function can be used to compute the componentwise hypotenous for a pair of // dense matrices: \code blaze::StaticMatrix<double,3UL,3UL> A, B, C; C = hypot( A, B ); // Computes the componentwise hypotenuous \endcode // \n \subsection matrix_operators_clamp clamp() // // The \c clamp() function can be used to restrict all elements of a matrix to a specific range: \code blaze::DynamicMatrix<double> A, B; B = clamp( A, -1.0, 1.0 ); // Restrict all elements to the range [-1..1] \endcode // Note that in case of sparse matrices only the non-zero elements are taken into account! // // // \n \subsection matrix_operators_pow pow() // // The \c pow() function can be used to compute the exponential value of each element of a matrix. // If passed a matrix and a numeric exponent, the function computes the exponential value of each // element of the matrix using the same exponent. If passed a second matrix, the function computes // the componentwise exponential value: \code blaze::StaticMatrix<double,3UL,3UL> A, B, C; C = pow( A, 1.2 ); // Computes the exponential value of each element C = pow( A, B ); // Computes the componentwise exponential value \endcode // \n \subsection matrix_operators_exp exp() // // \c exp(), \c exp2() and \c exp10() compute the base e/2/10 exponential of each element of a // matrix, respectively: \code blaze::HybridMatrix<double,3UL,3UL> A, B; B = exp( A ); // Computes the base e exponential of each element B = exp2( A ); // Computes the base 2 exponential of each element B = exp10( A ); // Computes the base 10 exponential of each element \endcode // Note that in case of sparse matrices only the non-zero elements are taken into account! // // // \n \subsection matrix_operators_log log() / log2() / log10() // // The \c log(), \c log2() and \c log10() functions can be used to compute the natural, binary // and common logarithm of each element of a matrix: \code blaze::StaticMatrix<double,3UL,3UL> A, B; B = log( A ); // Computes the natural logarithm of each element B = log2( A ); // Computes the binary logarithm of each element B = log10( A ); // Computes the common logarithm of each element \endcode // \n \subsection matrix_operators_trigonometric_functions sin() / cos() / tan() / asin() / acos() / atan() // // The following trigonometric functions are available for both dense and sparse matrices: \code blaze::DynamicMatrix<double> A, B; B = sin( A ); // Computes the sine of each element of the matrix B = cos( A ); // Computes the cosine of each element of the matrix B = tan( A ); // Computes the tangent of each element of the matrix B = asin( A ); // Computes the inverse sine of each element of the matrix B = acos( A ); // Computes the inverse cosine of each element of the matrix B = atan( A ); // Computes the inverse tangent of each element of the matrix \endcode // Note that in case of sparse matrices only the non-zero elements are taken into account! // // // \n \subsection matrix_operators_hyperbolic_functions sinh() / cosh() / tanh() / asinh() / acosh() / atanh() // // The following hyperbolic functions are available for both dense and sparse matrices: \code blaze::DynamicMatrix<double> A, B; B = sinh( A ); // Computes the hyperbolic sine of each element of the matrix B = cosh( A ); // Computes the hyperbolic cosine of each element of the matrix B = tanh( A ); // Computes the hyperbolic tangent of each element of the matrix B = asinh( A ); // Computes the inverse hyperbolic sine of each element of the matrix B = acosh( A ); // Computes the inverse hyperbolic cosine of each element of the matrix B = atanh( A ); // Computes the inverse hyperbolic tangent of each element of the matrix \endcode // \n \subsection matrix_operations_atan2 atan2() // // The multi-valued inverse tangent is available for a pair of dense matrices: \code blaze::DynamicMatrix<double> A, B, C; C = atan2( A, B ); // Computes the componentwise multi-valued inverse tangent \endcode // \n \subsection matrix_operators_erf erf() / erfc() // // The \c erf() and \c erfc() functions compute the (complementary) error function of each // element of a matrix: \code blaze::StaticMatrix<double,3UL,3UL> A, B; B = erf( A ); // Computes the error function of each element B = erfc( A ); // Computes the complementary error function of each element \endcode // Note that in case of sparse matrices only the non-zero elements are taken into account! // // // \n \subsection matrix_operations_map map() / forEach() // // Via the unary and binary \c map() functions it is possible to execute componentwise custom // operations on matrices. The unary \c map() function can be used to apply a custom operation // on each element of a dense or sparse matrix. For instance, the following example demonstrates // a custom square root computation via a lambda: \code blaze::DynamicMatrix<double> A, B; B = map( A, []( double d ) { return std::sqrt( d ); } ); \endcode // The binary \c map() function can be used to apply an operation pairwise to the elements of // two dense matrices. The following example demonstrates the merging of two matrices of double // precision values into a matrix of double precision complex numbers: \code blaze::DynamicMatrix<double> real{ { 2.1, -4.2 }, { 1.0, 0.6 } }; blaze::DynamicMatrix<double> imag{ { 0.3, 1.4 }, { 2.9, -3.4 } }; blaze::DynamicMatrix< complex<double> > cplx; // Creating the matrix // ( (-2.1, 0.3) (-4.2, -1.4) ) // ( ( 1.0, 2.9) ( 0.6, -3.4) ) cplx = map( real, imag, []( double r, double i ){ return complex( r, i ); } ); \endcode // Although the computation can be parallelized it is not vectorized and thus cannot perform at // peak performance. However, it is also possible to create vectorized custom operations. See // \ref custom_operations for a detailed overview of the possibilities of custom operations. // // Please note that unary custom operations on vectors have been introduced in \b Blaze 3.0 in // form of the \c forEach() function. With the introduction of binary custom functions, the // \c forEach() function has been renamed to \c map(). The \c forEach() function can still be // used (even for binary custom operations), but the function might be deprecated in future // releases of \b Blaze. // // // \n \section matrix_operations_reduction_operations Reduction Operations // <hr> // // \subsection matrix_operations_reduction_operations_reduce reduce() // // The \c reduce() function performs either a total reduction, a rowwise reduction or a columnwise // reduction of the elements of the given dense matrix or the non-zero elements of the given sparse // matrix. The following examples demonstrate the total reduction of a dense and sparse matrix: \code blaze::DynamicMatrix<double> A; // ... Resizing and initialization const double totalsum1 = reduce( A, blaze::Add() ); const double totalsum2 = reduce( A, []( double a, double b ){ return a + b; } ); \endcode \code blaze::CompressedMatrix<double> A; // ... Resizing and initialization const double totalsum1 = reduce( A, blaze::Add() ); const double totalsum2 = reduce( A, []( double a, double b ){ return a + b; } ); \endcode // By specifying \c blaze::columnwise or \c blaze::rowwise the \c reduce() function performs a // column-wise or row-wise reduction, respectively. In case \c blaze::columnwise is specified, the // (non-zero) elements of the matrix are reduced column-wise and the result is a row vector. In // case \c blaze::rowwise is specified, the (non-zero) elements of the matrix are reduced row-wise // and the result is a column vector: \code blaze::DynamicMatrix<double> A; blaze::CompressedMatrix<double> B; blaze::DynamicVector<double,rowVector> colsum1, colsum2; // ... Resizing and initialization colsum1 = reduce<columnwise>( A, blaze::Add() ); colsum2 = reduce<columnwise>( B, []( double a, double b ){ return a + b; } ); \endcode \code blaze::DynamicMatrix<double> A; blaze::CompressedMatrix<double> B; blaze::DynamicVector<double,columnVector> rowsum1, rowsum2; // ... Resizing and initialization rowsum1 = reduce<rowwise>( A, blaze::Add() ); rowsum2 = reduce<rowwise>( B, []( double a, double b ){ return a + b; } ); \endcode // As demonstrated in the examples it is possible to pass any binary callable as custom reduction // operation. However, for instance in the case of lambdas the vectorization of the reduction // operation is compiler dependent and might not perform at peak performance. However, it is also // possible to create vectorized custom operations. See \ref custom_operations for a detailed // overview of the possibilities of custom operations. // // Please note that the evaluation order of the \c reduce() function is unspecified. Thus the // behavior is non-deterministic if the given reduction operation is not associative or not // commutative. Also, the operation is undefined if the given reduction operation modifies the // values. // // \n \subsection matrix_operations_reduction_operations_sum sum() // // The \c sum() function reduces the elements of the given dense vector or the non-zero elements // of the given sparse vector by means of addition: \code blaze::DynamicMatrix<int> A{ { 1, 2 }, { 3, 4 } }; const int totalsum = sum( A ); // Results in 10 \endcode \code blaze::CompressedMatrix<int> a{ { 1, 2 }, { 3, 4 } }; const int totalsum = sum( A ); // Results in 10 \endcode // By specifying \c blaze::columnwise or \c blaze::rowwise the \c sum() function performs a // column-wise or row-wise summation, respectively. In case \c blaze::columnwise is specified, // the (non-zero) elements of the matrix are summed up column-wise and the result is a row vector. // In case \c blaze::rowwise is specified, the (non-zero) elements of the matrix are summed up // row-wise and the result is a column vector: \code using blaze::columnwise; blaze::DynamicMatrix<int> A{ { 1, 0, 2 }, { 1, 3, 4 } }; blaze::CompressedMatrix<int> B{ { 1, 0, 2 }, { 1, 3, 4 } }; blaze::DynamicVector<int,rowVector> colsum1, colsum2; colsum1 = sum<columnwise>( A ); // Results in ( 2, 3, 6 ) colsum2 = sum<columnwise>( B ); // Same result \endcode \code using blaze::rowwise; blaze::DynamicMatrix<int> A{ { 1, 0, 2 }, { 1, 3, 4 } }; blaze::CompressedMatrix<int> B{ { 1, 0, 2 }, { 1, 3, 4 } }; blaze::DynamicVector<int,columnVector> rowsum1, rowsum2; rowsum1 = sum<rowwise>( A ); // Results in ( 3, 8 ) rowsum2 = sum<rowwise>( B ); // Same result \endcode // Please note that the evaluation order of the \c sum() function is unspecified. // // \n \subsection matrix_operations_reduction_operations_prod prod() // // The \c prod() function reduces the elements of the given dense vector or the non-zero elements // of the given sparse vector by means of multiplication: \code blaze::DynamicMatrix<int> A{ { 1, 2 }, { 3, 4 } }; const int totalprod = prod( A ); // Results in 24 \endcode \code blaze::CompressedMatrix<int> A{ { 1, 2 }, { 3, 4 } }; const int totalprod = prod( A ); // Results in 24 \endcode // By specifying \c blaze::columnwise or \c blaze::rowwise the \c prod() function performs a // column-wise or row-wise multiplication, respectively. In case \c blaze::columnwise is specified, // the (non-zero) elements of the matrix are multiplied column-wise and the result is a row vector. // In case \c blaze::rowwise is specified, the (non-zero) elements of the matrix are multiplied // row-wise and the result is a column vector: \code using blaze::columnwise; blaze::DynamicMatrix<int> A{ { 1, 0, 2 }, { 1, 3, 4 } }; blaze::CompressedMatrix<int> B{ { 1, 0, 2 }, { 1, 3, 4 } }; blaze::DynamicVector<int,rowVector> colprod1, colprod2; colprod1 = prod<columnwise>( A ); // Results in ( 1, 0, 8 ) colprod2 = prod<columnwise>( A ); // Results in ( 1, 3, 8 ) \endcode \code using blaze::rowwise; blaze::DynamicMatrix<int> A{ { 1, 0, 2 }, { 1, 3, 4 } }; blaze::CompressedMatrix<int> B{ { 1, 0, 2 }, { 1, 3, 4 } }; blaze::DynamicVector<int,columnVector> rowprod1, rowprod2; rowprod1 = prod<rowwise>( A ); // Results in ( 0, 12 ) rowprod2 = prod<rowwise>( A ); // Results in ( 2, 12 ) \endcode // Please note that the evaluation order of the \c prod() function is unspecified. // // \n \subsection matrix_operations_reduction_operations_min min() // // The unary \c min() function returns the smallest element of the given dense matrix or the // smallest non-zero element of the given sparse matrix. This function can only be used for // element types that support the smaller-than relationship. In case the given matrix currently // has either 0 rows or 0 columns, the returned value is the default value (e.g. 0 in case of // fundamental data types). \code blaze::DynamicMatrix<int> A{ { 1, 2 }, { 3, 4 } }; const int totalmin = min( A ); // Results in 1 \endcode \code blaze::CompressedMatrix<int> A{ { 1, 0 }, { 3, 0 } }; const int totalmin = min( A ); // Results in 1 \endcode // \note In case the sparse matrix is not completely filled, the implicit zero elements are NOT // taken into account. In the previous example the compressed matrix has only 2 non-zero elements. // However, the minimum of this matrix is 1. // // By specifying \c blaze::columnwise or \c blaze::rowwise the \c min() function determines the // smallest (non-zero) element in each row or column, respectively. In case \c blaze::columnwise // is specified, the smallest (non-zero) element of each column is determined and the result is // a row vector. In case \c blaze::rowwise is specified, the smallest (non-zero) element of each // row is determined and the result is a column vector. \code using blaze::columnwise; blaze::DynamicMatrix<int> A{ { 1, 0, 2 }, { 1, 3, 4 } }; blaze::CompressedMatrix<int> B{ { 1, 0, 2 }, { 1, 3, 4 } }; blaze::DynamicVector<int,rowVector> colmin1, colmin2; colmin1 = min<columnwise>( A ); // Results in ( 1, 0, 2 ) colmin2 = min<columnwise>( B ); // Results in ( 1, 3, 2 ) \endcode \code using blaze::rowwise; blaze::DynamicMatrix<int> A{ { 1, 0, 2 }, { 1, 3, 4 } }; blaze::CompressedMatrix<int> B{ { 1, 0, 2 }, { 1, 3, 4 } }; blaze::DynamicVector<int,columnVector> rowmin1, rowmin2; rowmin1 = min<rowwise>( A ); // Results in ( 0, 1 ) rowmin2 = min<rowwise>( B ); // Results in ( 1, 1 ) \endcode // \note In case the sparse matrix is not completely filled, the implicit zero elements are NOT // taken into account. // // \n \subsection matrix_operations_reduction_operations_max max() // // The unary \c max() function returns the largest element of the given dense matrix or the // largest non-zero element of the given sparse matrix. This function can only be used for // element types that support the smaller-than relationship. In case the given matrix currently // has either 0 rows or 0 columns, the returned value is the default value (e.g. 0 in case of // fundamental data types). \code blaze::DynamicMatrix<int> A{ { 1, 2 }, { 3, 4 } }; const int totalmax = max( A ); // Results in 4 \endcode \code blaze::CompressedMatrix<int> A{ { -1, 0 }, { -3, 0 } }; const int totalmax = max( A ); // Results in -1 \endcode // \note In case the sparse matrix is not completely filled, the implicit zero elements are NOT // taken into account. In the previous example the compressed matrix has only 2 non-zero elements. // However, the maximum of this matrix is -1. // // By specifying \c blaze::columnwise or \c blaze::rowwise the \c max() function determines the // largest (non-zero) element in each row or column, respectively. In case \c blaze::columnwise // is specified, the largest (non-zero) element of each column is determined and the result is // a row vector. In case \c blaze::rowwise is specified, the largest (non-zero) element of each // row is determined and the result is a column vector. \code using blaze::columnwise; blaze::DynamicMatrix<int> A{ { 1, 0, 2 }, { 1, 3, 4 } }; blaze::CompressedMatrix<int> B{ { -1, 0, -2 }, { -1, -3, -4 } }; blaze::DynamicVector<int,rowVector> colmax1, colmax2; colmax1 = max<columnwise>( A ); // Results in ( 1, 3, 4 ) colmax2 = max<columnwise>( B ); // Results in ( -1, -3, -2 ) \endcode \code using blaze::rowwise; blaze::DynamicMatrix<int> A{ { 1, 0, 2 }, { 1, 3, 4 } }; blaze::CompressedMatrix<int> B{ { -1, 0, -2 }, { -1, -3, -4 } }; blaze::DynamicVector<int,columnVector> rowmax1, rowmax2; rowmax1 = max<rowwise>( A ); // Results in ( 2, 4 ) rowmax2 = max<rowwise>( B ); // Results in ( -1, -1 ) \endcode // \note In case the sparse matrix is not completely filled, the implicit zero elements are NOT // taken into account. // // // \n \section matrix_operations_norms Norms // <hr> // // \subsection matrix_operations_norms_norm norm() // // The \c norm() function computes the L2 norm of the given dense or sparse matrix: \code blaze::DynamicMatrix<double> A; // ... Resizing and initialization const double l2 = norm( A ); \endcode // \n \subsection matrix_operations_norms_sqrnorm sqrNorm() // // The \c sqrNorm() function computes the squared L2 norm of the given dense or sparse matrix: \code blaze::DynamicMatrix<double> A; // ... Resizing and initialization const double l2 = sqrNorm( A ); \endcode // \n \subsection matrix_operations_norms_l1norm l1Norm() // // The \c l1Norm() function computes the squared L1 norm of the given dense or sparse matrix: \code blaze::DynamicMatrix<double> A; // ... Resizing and initialization const double l1 = l1Norm( A ); \endcode // \n \subsection matrix_operations_norms_l2norm l2Norm() // // The \c l2Norm() function computes the squared L2 norm of the given dense or sparse matrix: \code blaze::DynamicMatrix<double> A; // ... Resizing and initialization const double l2 = l2Norm( A ); \endcode // \n \subsection matrix_operations_norms_l3norm l3Norm() // // The \c l3Norm() function computes the squared L3 norm of the given dense or sparse matrix: \code blaze::DynamicMatrix<double> A; // ... Resizing and initialization const double l3 = l3Norm( A ); \endcode // \n \subsection matrix_operations_norms_l4norm l4Norm() // // The \c l4Norm() function computes the squared L4 norm of the given dense or sparse matrix: \code blaze::DynamicMatrix<double> A; // ... Resizing and initialization const double l4 = l4Norm( A ); \endcode // \n \subsection matrix_operations_norms_lpnorm lpNorm() // // The \c lpNorm() function computes the general Lp norm of the given dense or sparse matrix, // where the norm is specified by either a compile time or a runtime argument: \code blaze::DynamicMatrix<double> A; // ... Resizing and initialization const double lp1 = lpNorm<2>( A ); // Compile time argument const double lp2 = lpNorm( A, 2.3 ); // Runtime argument \endcode // \n \subsection matrix_operations_norms_maxnorm maxNorm() // // The \c maxNorm() function computes the maximum norm of the given dense or sparse matrix: \code blaze::DynamicMatrix<double> A; // ... Resizing and initialization const double max = maxNorm( A ); \endcode // \n \section matrix_operations_declaration_operations Declaration Operations // <hr> // // \subsection matrix_operations_declsym declsym() // // The \c declsym() operation can be used to explicitly declare any matrix or matrix expression // as symmetric: \code blaze::DynamicMatrix<double> A, B; // ... Resizing and initialization B = declsym( A ); \endcode // Any matrix or matrix expression that has been declared as symmetric via \c declsym() will // gain all the benefits of a symmetric matrix, which range from reduced runtime checking to // a considerable speed-up in computations: \code using blaze::DynamicMatrix; using blaze::SymmetricMatrix; DynamicMatrix<double> A, B, C; SymmetricMatrix< DynamicMatrix<double> > S; // ... Resizing and initialization isSymmetric( declsym( A ) ); // Will always return true without runtime effort S = declsym( A ); // Omit any runtime check for symmetry C = declsym( A * B ); // Declare the result of the matrix multiplication as symmetric, // i.e. perform an optimized matrix multiplication \endcode // \warning The \c declsym() operation has the semantics of a cast: The caller is completely // responsible and the system trusts the given information. Declaring a non-symmetric matrix or // matrix expression as symmetric via the \c declsym() operation leads to undefined behavior // (which can be violated invariants or wrong computation results)! // // // \n \subsection matrix_operations_declherm declherm() // // The \c declherm() operation can be used to explicitly declare any matrix or matrix expression // as Hermitian: \code blaze::DynamicMatrix<double> A, B; // ... Resizing and initialization B = declherm( A ); \endcode // Any matrix or matrix expression that has been declared as Hermitian via \c declherm() will // gain all the benefits of an Hermitian matrix, which range from reduced runtime checking to // a considerable speed-up in computations: \code using blaze::DynamicMatrix; using blaze::HermitianMatrix; DynamicMatrix<double> A, B, C; HermitianMatrix< DynamicMatrix<double> > S; // ... Resizing and initialization isHermitian( declherm( A ) ); // Will always return true without runtime effort S = declherm( A ); // Omit any runtime check for Hermitian symmetry C = declherm( A * B ); // Declare the result of the matrix multiplication as Hermitian, // i.e. perform an optimized matrix multiplication \endcode // \warning The \c declherm() operation has the semantics of a cast: The caller is completely // responsible and the system trusts the given information. Declaring a non-Hermitian matrix or // matrix expression as Hermitian via the \c declherm() operation leads to undefined behavior // (which can be violated invariants or wrong computation results)! // // // \n \subsection matrix_operations_decllow decllow() // // The \c decllow() operation can be used to explicitly declare any matrix or matrix expression // as lower triangular: \code blaze::DynamicMatrix<double> A, B; // ... Resizing and initialization B = decllow( A ); \endcode // Any matrix or matrix expression that has been declared as lower triangular via \c decllow() // will gain all the benefits of a lower triangular matrix, which range from reduced runtime // checking to a considerable speed-up in computations: \code using blaze::DynamicMatrix; using blaze::LowerMatrix; DynamicMatrix<double> A, B, C; LowerMatrix< DynamicMatrix<double> > L; // ... Resizing and initialization isLower( decllow( A ) ); // Will always return true without runtime effort L = decllow( A ); // Omit any runtime check for A being a lower matrix C = decllow( A * B ); // Declare the result of the matrix multiplication as lower triangular, // i.e. perform an optimized matrix multiplication \endcode // \warning The \c decllow() operation has the semantics of a cast: The caller is completely // responsible and the system trusts the given information. Declaring a non-lower matrix or // matrix expression as lower triangular via the \c decllow() operation leads to undefined // behavior (which can be violated invariants or wrong computation results)! // // // \n \subsection matrix_operations_declupp declupp() // // The \c declupp() operation can be used to explicitly declare any matrix or matrix expression // as upper triangular: \code blaze::DynamicMatrix<double> A, B; // ... Resizing and initialization B = declupp( A ); \endcode // Any matrix or matrix expression that has been declared as upper triangular via \c declupp() // will gain all the benefits of a upper triangular matrix, which range from reduced runtime // checking to a considerable speed-up in computations: \code using blaze::DynamicMatrix; using blaze::UpperMatrix; DynamicMatrix<double> A, B, C; UpperMatrix< DynamicMatrix<double> > U; // ... Resizing and initialization isUpper( declupp( A ) ); // Will always return true without runtime effort U = declupp( A ); // Omit any runtime check for A being a upper matrix C = declupp( A * B ); // Declare the result of the matrix multiplication as upper triangular, // i.e. perform an optimized matrix multiplication \endcode // \warning The \c declupp() operation has the semantics of a cast: The caller is completely // responsible and the system trusts the given information. Declaring a non-upper matrix or // matrix expression as upper triangular via the \c declupp() operation leads to undefined // behavior (which can be violated invariants or wrong computation results)! // // // \n \subsection matrix_operations_decldiag decldiag() // // The \c decldiag() operation can be used to explicitly declare any matrix or matrix expression // as diagonal: \code blaze::DynamicMatrix<double> A, B; // ... Resizing and initialization B = decldiag( A ); \endcode // Any matrix or matrix expression that has been declared as diagonal via \c decldiag() will // gain all the benefits of a diagonal matrix, which range from reduced runtime checking to // a considerable speed-up in computations: \code using blaze::DynamicMatrix; using blaze::DiagonalMatrix; DynamicMatrix<double> A, B, C; DiagonalMatrix< DynamicMatrix<double> > D; // ... Resizing and initialization isDiagonal( decldiag( A ) ); // Will always return true without runtime effort D = decldiag( A ); // Omit any runtime check for A being a diagonal matrix C = decldiag( A * B ); // Declare the result of the matrix multiplication as diagonal, // i.e. perform an optimized matrix multiplication \endcode // \warning The \c decldiag() operation has the semantics of a cast: The caller is completely // responsible and the system trusts the given information. Declaring a non-diagonal matrix // or matrix expression as diagonal via the \c decldiag() operation leads to undefined // behavior (which can be violated invariants or wrong computation results)! // // // \n \subsection matrix_operations_declid declid() // // The \c declid() operation can be used to explicitly declare any matrix or matrix expression // as identity matrix: \code blaze::DynamicMatrix<double> A, B; // ... Resizing and initialization B = declid( A ); \endcode // Any matrix or matrix expression that has been declared as identity matrix via \c declid() will // gain all the benefits of an identity matrix, which range from reduced runtime checking to a // considerable speed-up in computations: \code using blaze::DynamicMatrix; using blaze::DiagonalMatrix; DynamicMatrix<double> A, B, C; DiagonalMatrix< DynamicMatrix<double> > D; // ... Resizing and initialization isIdentity( declid( A ) ); // Will always return true without runtime effort D = declid( A ); // Omit any runtime check for A being a diagonal matrix C = declid( A ) * B; // Declare the left operand of the matrix multiplication as an // identity matrix, i.e. perform an optimized matrix multiplication \endcode // \warning The \c declid() operation has the semantics of a cast: The caller is completely // responsible and the system trusts the given information. Declaring a non-identity matrix // or matrix expression as identity matrix via the \c declid() operation leads to undefined // behavior (which can be violated invariants or wrong computation results)! // // // \n \section matrix_operations_matrix_inversion Matrix Inversion // <hr> // // The inverse of a square dense matrix can be computed via the \c inv() function: \code blaze::DynamicMatrix<float,blaze::rowMajor> A, B; // ... Resizing and initialization B = inv( A ); // Compute the inverse of A \endcode // Alternatively, an in-place inversion of a dense matrix can be performed via the \c invert() // function: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization invert( A ); // In-place matrix inversion \endcode // Both the \c inv() and the \c invert() functions will automatically select the most suited matrix // inversion algorithm depending on the size and type of the given matrix. For small matrices of // up to 6x6, both functions use manually optimized kernels for maximum performance. For matrices // larger than 6x6 the inversion is performed by means of the most suited matrix decomposition // method: In case of a general matrix the LU decomposition is used, for symmetric matrices the // LDLT decomposition is applied, for Hermitian matrices the LDLH decomposition is performed, and // for triangular matrices the inverse is computed via a forward or back substitution. // // In case the type of the matrix does not provide additional compile time information about its // structure (symmetric, lower, upper, diagonal, ...), the information can be provided manually // when calling the \c invert() function: \code using blaze::asGeneral; using blaze::asSymmetric; using blaze::asHermitian; using blaze::asLower; using blaze::asUniLower; using blaze::asUpper; using blaze::asUniUpper; using blaze::asDiagonal; invert<asGeneral> ( A ); // In-place inversion of a general matrix invert<asSymmetric>( A ); // In-place inversion of a symmetric matrix invert<asHermitian>( A ); // In-place inversion of a Hermitian matrix invert<asLower> ( A ); // In-place inversion of a lower triangular matrix invert<asUniLower> ( A ); // In-place inversion of a lower unitriangular matrix invert<asUpper> ( A ); // In-place inversion of a upper triangular matrix invert<asUniUpper> ( A ); // In-place inversion of a upper unitriangular matrix invert<asDiagonal> ( A ); // In-place inversion of a diagonal matrix \endcode // Alternatively, via the \c invert() function it is possible to explicitly specify the inversion // algorithm: \code using blaze::byLU; using blaze::byLDLT; using blaze::byLDLH; using blaze::byLLH; // In-place inversion of a general matrix by means of an LU decomposition invert<byLU>( A ); // In-place inversion of a symmetric indefinite matrix by means of a Bunch-Kaufman decomposition invert<byLDLT>( A ); // In-place inversion of a Hermitian indefinite matrix by means of a Bunch-Kaufman decomposition invert<byLDLH>( A ); // In-place inversion of a positive definite matrix by means of a Cholesky decomposition invert<byLLH>( A ); \endcode // Whereas the inversion by means of an LU decomposition works for every general square matrix, // the inversion by LDLT only works for symmetric indefinite matrices, the inversion by LDLH is // restricted to Hermitian indefinite matrices and the Cholesky decomposition (LLH) only works // for Hermitian positive definite matrices. Please note that it is in the responsibility of the // function caller to guarantee that the selected algorithm is suited for the given matrix. In // case this precondition is violated the result can be wrong and might not represent the inverse // of the given matrix! // // For both the \c inv() and \c invert() function the matrix inversion fails if ... // // - ... the given matrix is not a square matrix; // - ... the given matrix is singular and not invertible. // // In all failure cases either a compilation error is created if the failure can be predicted at // compile time or a \c std::invalid_argument exception is thrown. // // \note The matrix inversion can only be used for dense matrices with \c float, \c double, // \c complex<float> or \c complex<double> element type. The attempt to call the function with // matrices of any other element type or with a sparse matrix results in a compile time error! // // \note The functions invert the dense matrix by means of LAPACK kernels. Thus the functions can // only be used if a fitting LAPACK library is available and linked to the executable. Otherwise // a linker error will be created. // // \note It is not possible to use any kind of view on the expression object returned by the // \c inv() function. Also, it is not possible to access individual elements via the function call // operator on the expression object: \code row( inv( A ), 2UL ); // Compilation error: Views cannot be used on an inv() expression! inv( A )(1,2); // Compilation error: It is not possible to access individual elements! \endcode // \note The inversion functions do not provide any exception safety guarantee, i.e. in case an // exception is thrown the matrix may already have been modified. // // // \n \section matrix_operations_decomposition Matrix Decomposition // <hr> // // \note All decomposition functions can only be used for dense matrices with \c float, \c double, // \c complex<float> or \c complex<double> element type. The attempt to call the function with // matrices of any other element type or with a sparse matrix results in a compile time error! // // \note The functions decompose a dense matrix by means of LAPACK kernels. Thus the functions can // only be used if a fitting LAPACK library is available and linked to the executable. Otherwise // a linker error will be created. // // \subsection matrix_operations_decomposition_lu LU Decomposition // // The LU decomposition of a dense matrix can be computed via the \c lu() function: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization blaze::DynamicMatrix<double,blaze::rowMajor> L, U, P; lu( A, L, U, P ); // LU decomposition of a row-major matrix assert( A == L * U * P ); \endcode \code blaze::DynamicMatrix<double,blaze::columnMajor> A; // ... Resizing and initialization blaze::DynamicMatrix<double,blaze::columnMajor> L, U, P; lu( A, L, U, P ); // LU decomposition of a column-major matrix assert( A == P * L * U ); \endcode // The function works for both \c rowMajor and \c columnMajor matrices. Note, however, that the // three matrices \c A, \c L and \c U are required to have the same storage order. Also, please // note that the way the permutation matrix \c P needs to be applied differs between row-major and // column-major matrices, since the algorithm uses column interchanges for row-major matrices and // row interchanges for column-major matrices. // // Furthermore, \c lu() can be used with adaptors. For instance, the following example demonstrates // the LU decomposition of a symmetric matrix into a lower and upper triangular matrix: \code blaze::SymmetricMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > A; // ... Resizing and initialization blaze::LowerMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > L; blaze::UpperMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > U; blaze::DynamicMatrix<double,blaze::columnMajor> P; lu( A, L, U, P ); // LU decomposition of A \endcode // \n \subsection matrix_operations_decomposition_llh Cholesky Decomposition // // The Cholesky (LLH) decomposition of a dense matrix can be computed via the \c llh() function: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization blaze::DynamicMatrix<double,blaze::rowMajor> L; llh( A, L ); // LLH decomposition of a row-major matrix assert( A == L * ctrans( L ) ); \endcode // The function works for both \c rowMajor and \c columnMajor matrices and the two matrices \c A // and \c L can have any storage order. // // Furthermore, \c llh() can be used with adaptors. For instance, the following example demonstrates // the LLH decomposition of a symmetric matrix into a lower triangular matrix: \code blaze::SymmetricMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > A; // ... Resizing and initialization blaze::LowerMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > L; llh( A, L ); // Cholesky decomposition of A \endcode // \n \subsection matrix_operations_decomposition_qr QR Decomposition // // The QR decomposition of a dense matrix can be computed via the \c qr() function: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization blaze::DynamicMatrix<double,blaze::columnMajor> Q; blaze::DynamicMatrix<double,blaze::rowMajor> R; qr( A, Q, R ); // QR decomposition of a row-major matrix assert( A == Q * R ); \endcode // The function works for both \c rowMajor and \c columnMajor matrices and the three matrices // \c A, \c Q and \c R can have any storage order. // // Furthermore, \c qr() can be used with adaptors. For instance, the following example demonstrates // the QR decomposition of a symmetric matrix into a general matrix and an upper triangular matrix: \code blaze::SymmetricMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > A; // ... Resizing and initialization blaze::DynamicMatrix<double,blaze::rowMajor> Q; blaze::UpperMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > R; qr( A, Q, R ); // QR decomposition of A \endcode // \n \subsection matrix_operations_decomposition_rq RQ Decomposition // // Similar to the QR decomposition, the RQ decomposition of a dense matrix can be computed via // the \c rq() function: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization blaze::DynamicMatrix<double,blaze::rowMajor> R; blaze::DynamicMatrix<double,blaze::columnMajor> Q; rq( A, R, Q ); // RQ decomposition of a row-major matrix assert( A == R * Q ); \endcode // The function works for both \c rowMajor and \c columnMajor matrices and the three matrices // \c A, \c R and \c Q can have any storage order. // // Also the \c rq() function can be used in combination with matrix adaptors. For instance, the // following example demonstrates the RQ decomposition of an Hermitian matrix into a general // matrix and an upper triangular matrix: \code blaze::HermitianMatrix< blaze::DynamicMatrix<complex<double>,blaze::columnMajor> > A; // ... Resizing and initialization blaze::UpperMatrix< blaze::DynamicMatrix<complex<double>,blaze::columnMajor> > R; blaze::DynamicMatrix<complex<double>,blaze::rowMajor> Q; rq( A, R, Q ); // RQ decomposition of A \endcode // \n \subsection matrix_operations_decomposition_ql QL Decomposition // // The QL decomposition of a dense matrix can be computed via the \c ql() function: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization blaze::DynamicMatrix<double,blaze::rowMajor> Q; blaze::DynamicMatrix<double,blaze::columnMajor> L; ql( A, Q, L ); // QL decomposition of a row-major matrix assert( A == Q * L ); \endcode // The function works for both \c rowMajor and \c columnMajor matrices and the three matrices // \c A, \c Q and \c L can have any storage order. // // Also the \c ql() function can be used in combination with matrix adaptors. For instance, the // following example demonstrates the QL decomposition of a symmetric matrix into a general // matrix and a lower triangular matrix: \code blaze::SymmetricMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > A; // ... Resizing and initialization blaze::DynamicMatrix<double,blaze::rowMajor> Q; blaze::LowerMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > L; ql( A, Q, L ); // QL decomposition of A \endcode // \n \subsection matrix_operations_decomposition_lq LQ Decomposition // // The LQ decomposition of a dense matrix can be computed via the \c lq() function: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization blaze::DynamicMatrix<double,blaze::rowMajor> L; blaze::DynamicMatrix<double,blaze::columnMajor> Q; lq( A, L, Q ); // LQ decomposition of a row-major matrix assert( A == L * Q ); \endcode // The function works for both \c rowMajor and \c columnMajor matrices and the three matrices // \c A, \c L and \c Q can have any storage order. // // Furthermore, \c lq() can be used with adaptors. For instance, the following example demonstrates // the LQ decomposition of an Hermitian matrix into a lower triangular matrix and a general matrix: \code blaze::HermitianMatrix< blaze::DynamicMatrix<complex<double>,blaze::columnMajor> > A; // ... Resizing and initialization blaze::LowerMatrix< blaze::DynamicMatrix<complex<double>,blaze::columnMajor> > L; blaze::DynamicMatrix<complex<double>,blaze::rowMajor> Q; lq( A, L, Q ); // LQ decomposition of A \endcode // \n \section matrix_operations_eigenvalues Eigenvalues/Eigenvectors // <hr> // // The eigenvalues and eigenvectors of a dense matrix can be computed via the \c eigen() functions: \code namespace blaze { template< typename MT, bool SO, typename VT, bool TF > void eigen( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& w ); template< typename MT1, bool SO1, typename VT, bool TF, typename MT2, bool SO2 > void eigen( const DenseMatrix<MT1,SO1>& A, DenseVector<VT,TF>& w, DenseMatrix<MT2,SO2>& V ); } // namespace blaze \endcode // The first function computes only the eigenvalues of the given \a n-by-\a n matrix, the second // function additionally computes the eigenvectors. The eigenvalues are returned in the given vector // \a w and the eigenvectors are returned in the given matrix \a V, which are both resized to the // correct dimensions (if possible and necessary). // // Depending on the given matrix type, the resulting eigenvalues are either of floating point // or complex type: In case the given matrix is either a compile time symmetric matrix with // floating point elements or an Hermitian matrix with complex elements, the resulting eigenvalues // will be of floating point type and therefore the elements of the given eigenvalue vector are // expected to be of floating point type. In all other cases they are expected to be of complex // type. Please note that for complex eigenvalues no order of eigenvalues can be assumed, except // that complex conjugate pairs of eigenvalues appear consecutively with the eigenvalue having // the positive imaginary part first. // // In case \a A is a row-major matrix, the left eigenvectors are returned in the rows of \a V, // in case \a A is a column-major matrix, the right eigenvectors are returned in the columns of // \a V. In case the given matrix is a compile time symmetric matrix with floating point elements, // the resulting eigenvectors will be of floating point type and therefore the elements of the // given eigenvector matrix are expected to be of floating point type. In all other cases they // are expected to be of complex type. // // The following examples give an impression of the computation of eigenvalues and eigenvectors // for a general, a symmetric, and an Hermitian matrix: \code using blaze::DynamicMatrix; using blaze::DynamicVector; using blaze::rowMajor; using blaze::columnVector; DynamicMatrix<double,rowMajor> A( 5UL, 5UL ); // The general matrix A // ... Initialization DynamicVector<complex<double>,columnVector> w( 5UL ); // The vector for the complex eigenvalues DynamicMatrix<complex<double>,rowMajor> V( 5UL, 5UL ); // The matrix for the left eigenvectors eigen( A, w, V ); \endcode \code using blaze::SymmetricMatrix; using blaze::DynamicMatrix; using blaze::DynamicVector; using blaze::rowMajor; using blaze::columnVector; SymmetricMatrix< DynamicMatrix<double,rowMajor> > A( 5UL, 5UL ); // The symmetric matrix A // ... Initialization DynamicVector<double,columnVector> w( 5UL ); // The vector for the real eigenvalues DynamicMatrix<double,rowMajor> V( 5UL, 5UL ); // The matrix for the left eigenvectors eigen( A, w, V ); \endcode \code using blaze::HermitianMatrix; using blaze::DynamicMatrix; using blaze::DynamicVector; using blaze::rowMajor; using blaze::columnVector; HermitianMatrix< DynamicMatrix<complex<double>,rowMajor> > A( 5UL, 5UL ); // The Hermitian matrix A // ... Initialization DynamicVector<double,columnVector> w( 5UL ); // The vector for the real eigenvalues DynamicMatrix<complex<double>,rowMajor> V( 5UL, 5UL ); // The matrix for the left eigenvectors eigen( A, w, V ); \endcode // The functions fail if ... // // - ... the given matrix \a A is not a square matrix; // - ... the given vector \a w is a fixed size vector and the size doesn't match; // - ... the given matrix \a V is a fixed size matrix and the dimensions don't match; // - ... the eigenvalue computation fails. // // In all failure cases an exception is thrown. // // \note All \c eigen() functions can only be used for dense matrices with \c float, \c double, // \c complex<float> or \c complex<double> element type. The attempt to call the function with // matrices of any other element type or with a sparse matrix results in a compile time error! // // \note The functions compute the eigenvalues and/or eigenvectors of a dense matrix by means of // LAPACK kernels. Thus the functions can only be used if a fitting LAPACK library is available // and linked to the executable. Otherwise a linker error will be created. // // // \n \section matrix_operations_singularvalues Singular Values/Singular Vectors // <hr> // // The singular value decomposition (SVD) of a dense matrix can be computed via the \c svd() // functions: \code namespace blaze { template< typename MT, bool SO, typename VT, bool TF > void svd( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& s ); template< typename MT1, bool SO, typename VT, bool TF, typename MT2, typename MT3 > void svd( const DenseMatrix<MT1,SO>& A, DenseMatrix<MT2,SO>& U, DenseVector<VT,TF>& s, DenseMatrix<MT3,SO>& V ); template< typename MT, bool SO, typename VT, bool TF, typename ST > size_t svd( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& s, ST low, ST upp ); template< typename MT1, bool SO, typename VT, bool TF, typename MT2, typename MT3, typename ST > size_t svd( const DenseMatrix<MT1,SO>& A, DenseMatrix<MT2,SO>& U, DenseVector<VT,TF>& s, DenseMatrix<MT3,SO>& V, ST low, ST upp ); } // namespace blaze \endcode // The first and third function compute only singular values of the given general \a m-by-\a n // matrix, the second and fourth function additionally compute singular vectors. The resulting // singular values are returned in the given vector \a s, the left singular vectors are returned // in the given matrix \a U, and the right singular vectors are returned in the matrix \a V. \a s, // \a U, and \a V are resized to the correct dimensions (if possible and necessary). // // The third and fourth function allow for the specification of a subset of singular values and/or // vectors. The number of singular values and vectors to be computed is specified by the lower // bound \a low and the upper bound \a upp, which either form an integral or a floating point // range. // // In case \a low and \a upp form are of integral type, the function computes all singular values // in the index range \f$[low..upp]\f$. The \a num resulting real and non-negative singular values // are stored in descending order in the given vector \a s, which is either resized (if possible) // or expected to be a \a num-dimensional vector. The resulting left singular vectors are stored // in the given matrix \a U, which is either resized (if possible) or expected to be a // \a m-by-\a num matrix. The resulting right singular vectors are stored in the given matrix \a V, // which is either resized (if possible) or expected to be a \a num-by-\a n matrix. // // In case \a low and \a upp are of floating point type, the function computes all singular values // in the half-open interval \f$(low..upp]\f$. The resulting real and non-negative singular values // are stored in descending order in the given vector \a s, which is either resized (if possible) // or expected to be a min(\a m,\a n)-dimensional vector. The resulting left singular vectors are // stored in the given matrix \a U, which is either resized (if possible) or expected to be a // \a m-by-min(\a m,\a n) matrix. The resulting right singular vectors are stored in the given // matrix \a V, which is either resized (if possible) or expected to be a min(\a m,\a n)-by-\a n // matrix. // // The functions fail if ... // // - ... the given matrix \a U is a fixed size matrix and the dimensions don't match; // - ... the given vector \a s is a fixed size vector and the size doesn't match; // - ... the given matrix \a V is a fixed size matrix and the dimensions don't match; // - ... the given scalar values don't form a proper range; // - ... the singular value decomposition fails. // // In all failure cases an exception is thrown. // // Examples: \code using blaze::DynamicMatrix; using blaze::DynamicVector; using blaze::rowMajor; using blaze::columnVector; DynamicMatrix<double,rowMajor> A( 5UL, 8UL ); // The general matrix A // ... Initialization DynamicMatrix<double,rowMajor> U; // The matrix for the left singular vectors DynamicVector<double,columnVector> s; // The vector for the singular values DynamicMatrix<double,rowMajor> V; // The matrix for the right singular vectors svd( A, U, s, V ); \endcode \code using blaze::DynamicMatrix; using blaze::DynamicVector; using blaze::rowMajor; using blaze::columnVector; DynamicMatrix<complex<double>,rowMajor> A( 5UL, 8UL ); // The general matrix A // ... Initialization DynamicMatrix<complex<double>,rowMajor> U; // The matrix for the left singular vectors DynamicVector<double,columnVector> s; // The vector for the singular values DynamicMatrix<complex<double>,rowMajor> V; // The matrix for the right singular vectors svd( A, U, s, V, 0, 2 ); \endcode // \note All \c svd() functions can only be used for dense matrices with \c float, \c double, // \c complex<float> or \c complex<double> element type. The attempt to call the function with // matrices of any other element type or with a sparse matrix results in a compile time error! // // \note The functions compute the singular values and/or singular vectors of a dense matrix by // means of LAPACK kernels. Thus the functions can only be used if a fitting LAPACK library is // available and linked to the executable. Otherwise a linker error will be created. // // // \n Previous: \ref matrix_types &nbsp; &nbsp; Next: \ref adaptors */ //************************************************************************************************* //**Adaptors*************************************************************************************** /*!\page adaptors Adaptors // // \tableofcontents // // // \section adaptors_general General Concepts // <hr> // // Adaptors act as wrappers around the general \ref matrix_types. They adapt the interface of the // matrices such that certain invariants are preserved. Due to this adaptors can provide a compile // time guarantee of certain properties, which can be exploited for optimized performance. // // The \b Blaze library provides a total of 9 different adaptors: // // <ul> // <li> \ref adaptors_symmetric_matrices </li> // <li> \ref adaptors_hermitian_matrices </li> // <li> \ref adaptors_triangular_matrices // <ul> // <li> \ref adaptors_triangular_matrices "Lower Triangular Matrices" // <ul> // <li> \ref adaptors_triangular_matrices_lowermatrix </li> // <li> \ref adaptors_triangular_matrices_unilowermatrix </li> // <li> \ref adaptors_triangular_matrices_strictlylowermatrix </li> // </ul> // </li> // <li> \ref adaptors_triangular_matrices "Upper Triangular Matrices" // <ul> // <li> \ref adaptors_triangular_matrices_uppermatrix </li> // <li> \ref adaptors_triangular_matrices_uniuppermatrix </li> // <li> \ref adaptors_triangular_matrices_strictlyuppermatrix </li> // </ul> // </li> // <li> \ref adaptors_triangular_matrices "Diagonal Matrices" // <ul> // <li> \ref adaptors_triangular_matrices_diagonalmatrix </li> // </ul> // </li> // </ul> // </li> // </ul> // // In combination with the general matrix types, \b Blaze provides a total of 40 different matrix // types that make it possible to exactly adapt the type of matrix to every specific problem. // // // \n \section adaptors_examples Examples // <hr> // // The following code examples give an impression on the use of adaptors. The first example shows // the multiplication between two lower matrices: \code using blaze::DynamicMatrix; using blaze::LowerMatrix; using blaze::rowMajor; using blaze::columnMajor; LowerMatrix< DynamicMatrix<double,rowMajor> > A; LowerMatrix< DynamicMatrix<double,columnMajor> > B; DynamicMatrix<double,columnMajor> C; // ... Resizing and initialization C = A * B; \endcode // When multiplying two matrices, at least one of which is triangular, \b Blaze can exploit the // fact that either the lower or upper part of the matrix contains only default elements and // restrict the algorithm to the non-zero elements. Thus the adaptor provides a significant // performance advantage in comparison to a general matrix multiplication, especially for large // matrices. // // The second example shows the \c SymmetricMatrix adaptor in a row-major dense matrix/sparse // vector multiplication: \code using blaze::DynamicMatrix; using blaze::DynamicVector; using blaze::CompressedVector; using blaze::rowMajor; using blaze::columnVector; SymmetricMatrix< DynamicMatrix<double,rowMajor> > A; CompressedVector<double,columnVector> x; DynamicVector<double,columnVector> y; // ... Resizing and initialization y = A * x; \endcode // In this example it is not intuitively apparent that using a row-major matrix is not the best // possible choice in terms of performance since the computation cannot be vectorized. Choosing // a column-major matrix instead, however, would enable a vectorized computation. Therefore // \b Blaze exploits the fact that \c A is symmetric, selects the best suited storage order and // evaluates the multiplication as \code y = trans( A ) * x; \endcode // which significantly increases the performance. // // \n Previous: \ref matrix_operations &nbsp; &nbsp; Next: \ref adaptors_symmetric_matrices */ //************************************************************************************************* //**Symmetric Matrices***************************************************************************** /*!\page adaptors_symmetric_matrices Symmetric Matrices // // \tableofcontents // // // \n \section adaptors_symmetric_matrices_general Symmetric Matrices // <hr> // // In contrast to general matrices, which have no restriction in their number of rows and columns // and whose elements can have any value, symmetric matrices provide the compile time guarantee // to be square matrices with pair-wise identical values. Mathematically, this means that a // symmetric matrix is always equal to its transpose (\f$ A = A^T \f$) and that all non-diagonal // values have an identical counterpart (\f$ a_{ij} == a_{ji} \f$). This symmetry property can // be exploited to provide higher efficiency and/or lower memory consumption. Within the \b Blaze // library, symmetric matrices are realized by the \ref adaptors_symmetric_matrices_symmetricmatrix // class template. // // // \n \section adaptors_symmetric_matrices_symmetricmatrix SymmetricMatrix // <hr> // // The SymmetricMatrix class template is an adapter for existing dense and sparse matrix types. // It inherits the properties and the interface of the given matrix type \c MT and extends it // by enforcing the additional invariant of symmetry (i.e. the matrix is always equal to its // transpose \f$ A = A^T \f$). It can be included via the header file \code #include <blaze/math/SymmetricMatrix.h> \endcode // The type of the adapted matrix can be specified via template parameter: \code template< typename MT > class SymmetricMatrix; \endcode // \c MT specifies the type of the matrix to be adapted. SymmetricMatrix can be used with any // non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix type. Note // that the given matrix type must be either resizable (as for instance blaze::HybridMatrix or // blaze::DynamicMatrix) or must be square at compile time (as for instance blaze::StaticMatrix). // // The following examples give an impression of several possible symmetric matrices: \code using blaze::unaligned; using blaze::unpadded; using blaze::rowMajor; using blaze::columnMajor; // Definition of a 3x3 row-major dense symmetric matrix with static memory blaze::SymmetricMatrix< blaze::StaticMatrix<int,3UL,3UL,rowMajor> > A; // Definition of a resizable column-major dense symmetric matrix based on HybridMatrix blaze::SymmetricMatrix< blaze::HybridMatrix<float,4UL,4UL,columnMajor> B; // Definition of a resizable row-major dense symmetric matrix based on DynamicMatrix blaze::SymmetricMatrix< blaze::DynamicMatrix<double,rowMajor> > C; // Definition of a fixed size row-major dense symmetric matrix based on CustomMatrix blaze::SymmetricMatrix< blaze::CustomMatrix<double,unaligned,unpadded,rowMajor> > D; // Definition of a compressed row-major single precision symmetric matrix blaze::SymmetricMatrix< blaze::CompressedMatrix<float,blaze::rowMajor> > E; \endcode // The storage order of a symmetric matrix is depending on the storage order of the adapted matrix // type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e. is specified as // blaze::rowMajor), the symmetric matrix will also be a row-major matrix. Otherwise, if the // adapted matrix is column-major (i.e. is specified as blaze::columnMajor), the symmetric matrix // will also be a column-major matrix. // // // \n \section adaptors_symmetric_matrices_special_properties Special Properties of Symmetric Matrices // <hr> // // A symmetric matrix is used exactly like a matrix of the underlying, adapted matrix type \c MT. // It also provides (nearly) the same interface as the underlying matrix type. However, there are // some important exceptions resulting from the symmetry constraint: // // -# <b>\ref adaptors_symmetric_matrices_square</b> // -# <b>\ref adaptors_symmetric_matrices_symmetry</b> // -# <b>\ref adaptors_symmetric_matrices_initialization</b> // // \n \subsection adaptors_symmetric_matrices_square Symmetric Matrices Must Always be Square! // // In case a resizable matrix is used (as for instance blaze::HybridMatrix, blaze::DynamicMatrix, // or blaze::CompressedMatrix), this means that the according constructors, the \c resize() and // the \c extend() functions only expect a single parameter, which specifies both the number of // rows and columns, instead of two (one for the number of rows and one for the number of columns): \code using blaze::DynamicMatrix; using blaze::SymmetricMatrix; using blaze::rowMajor; // Default constructed, default initialized, row-major 3x3 symmetric dynamic matrix SymmetricMatrix< DynamicMatrix<double,rowMajor> > A( 3 ); // Resizing the matrix to 5x5 A.resize( 5 ); // Extending the number of rows and columns by 2, resulting in a 7x7 matrix A.extend( 2 ); \endcode // In case a matrix with a fixed size is used (as for instance blaze::StaticMatrix), the number // of rows and number of columns must be specified equally: \code using blaze::StaticMatrix; using blaze::SymmetricMatrix; using blaze::columnMajor; // Correct setup of a fixed size column-major 3x3 symmetric static matrix SymmetricMatrix< StaticMatrix<int,3UL,3UL,columnMajor> > A; // Compilation error: the provided matrix type is not a square matrix type SymmetricMatrix< StaticMatrix<int,3UL,4UL,columnMajor> > B; \endcode // \n \subsection adaptors_symmetric_matrices_symmetry The Symmetric Property is Always Enforced! // // This means that modifying the element \f$ a_{ij} \f$ of a symmetric matrix also modifies its // counterpart element \f$ a_{ji} \f$. Also, it is only possible to assign matrices that are // symmetric themselves: \code using blaze::CompressedMatrix; using blaze::DynamicMatrix; using blaze::StaticMatrix; using blaze::SymmetricMatrix; using blaze::rowMajor; // Default constructed, row-major 3x3 symmetric compressed matrix SymmetricMatrix< CompressedMatrix<double,rowMajor> > A( 3 ); // Initializing three elements via the function call operator A(0,0) = 1.0; // Initialization of the diagonal element (0,0) A(0,2) = 2.0; // Initialization of the elements (0,2) and (2,0) // Inserting three more elements via the insert() function A.insert( 1, 1, 3.0 ); // Inserting the diagonal element (1,1) A.insert( 1, 2, 4.0 ); // Inserting the elements (1,2) and (2,1) // Access via a non-const iterator *A.begin(1UL) = 10.0; // Modifies both elements (1,0) and (0,1) // Erasing elements via the erase() function A.erase( 0, 0 ); // Erasing the diagonal element (0,0) A.erase( 0, 2 ); // Erasing the elements (0,2) and (2,0) // Construction from a symmetric dense matrix StaticMatrix<double,3UL,3UL> B{ { 3.0, 8.0, -2.0 }, { 8.0, 0.0, -1.0 }, { -2.0, -1.0, 4.0 } }; SymmetricMatrix< DynamicMatrix<double,rowMajor> > C( B ); // OK // Assignment of a non-symmetric dense matrix StaticMatrix<double,3UL,3UL> D{ { 3.0, 7.0, -2.0 }, { 8.0, 0.0, -1.0 }, { -2.0, -1.0, 4.0 } }; C = D; // Throws an exception; symmetric invariant would be violated! \endcode // The same restriction also applies to the \c append() function for sparse matrices: Appending // the element \f$ a_{ij} \f$ additionally inserts the element \f$ a_{ji} \f$ into the matrix. // Despite the additional insertion, the \c append() function still provides the most efficient // way to set up a symmetric sparse matrix. In order to achieve the maximum efficiency, the // capacity of the individual rows/columns of the matrix should to be specifically prepared with // \c reserve() calls: \code using blaze::CompressedMatrix; using blaze::SymmetricMatrix; using blaze::rowMajor; // Setup of the symmetric matrix // // ( 0 1 3 ) // A = ( 1 2 0 ) // ( 3 0 0 ) // SymmetricMatrix< CompressedMatrix<double,rowMajor> > A( 3 ); A.reserve( 5 ); // Reserving enough space for 5 non-zero elements A.reserve( 0, 2 ); // Reserving two non-zero elements in the first row A.reserve( 1, 2 ); // Reserving two non-zero elements in the second row A.reserve( 2, 1 ); // Reserving a single non-zero element in the third row A.append( 0, 1, 1.0 ); // Appending the value 1 at position (0,1) and (1,0) A.append( 1, 1, 2.0 ); // Appending the value 2 at position (1,1) A.append( 2, 0, 3.0 ); // Appending the value 3 at position (2,0) and (0,2) \endcode // The symmetry property is also enforced for symmetric custom matrices: In case the given array // of elements does not represent a symmetric matrix, a \c std::invalid_argument exception is // thrown: \code using blaze::CustomMatrix; using blaze::SymmetricMatrix; using blaze::unaligned; using blaze::unpadded; using blaze::rowMajor; using CustomSymmetric = SymmetricMatrix< CustomMatrix<double,unaligned,unpadded,rowMajor> >; // Creating a 3x3 symmetric custom matrix from a properly initialized array double array[9] = { 1.0, 2.0, 4.0, 2.0, 3.0, 5.0, 4.0, 5.0, 6.0 }; CustomSymmetric A( array, 3UL ); // OK // Attempt to create a second 3x3 symmetric custom matrix from an uninitialized array std::unique_ptr<double[]> memory( new double[9UL] ); CustomSymmetric B( memory.get(), 3UL ); // Throws an exception \endcode // Finally, the symmetry property is enforced for views (rows, columns, submatrices, ...) on the // symmetric matrix. The following example demonstrates that modifying the elements of an entire // row of the symmetric matrix also affects the counterpart elements in the according column of // the matrix: \code using blaze::DynamicMatrix; using blaze::SymmetricMatrix; // Setup of the symmetric matrix // // ( 0 1 0 2 ) // A = ( 1 3 4 0 ) // ( 0 4 0 5 ) // ( 2 0 5 0 ) // SymmetricMatrix< DynamicMatrix<int> > A( 4 ); A(0,1) = 1; A(0,3) = 2; A(1,1) = 3; A(1,2) = 4; A(2,3) = 5; // Setting all elements in the 1st row to 0 results in the matrix // // ( 0 0 0 2 ) // A = ( 0 0 0 0 ) // ( 0 0 0 5 ) // ( 2 0 5 0 ) // row( A, 1 ) = 0; \endcode // The next example demonstrates the (compound) assignment to submatrices of symmetric matrices. // Since the modification of element \f$ a_{ij} \f$ of a symmetric matrix also modifies the // element \f$ a_{ji} \f$, the matrix to be assigned must be structured such that the symmetry // of the symmetric matrix is preserved. Otherwise a \c std::invalid_argument exception is // thrown: \code using blaze::DynamicMatrix; using blaze::SymmetricMatrix; // Setup of two default 4x4 symmetric matrices SymmetricMatrix< DynamicMatrix<int> > A1( 4 ), A2( 4 ); // Setup of the 3x2 dynamic matrix // // ( 1 2 ) // B = ( 3 4 ) // ( 5 6 ) // DynamicMatrix<int> B{ { 1, 2 }, { 3, 4 }, { 5, 6 } }; // OK: Assigning B to a submatrix of A1 such that the symmetry can be preserved // // ( 0 0 1 2 ) // A1 = ( 0 0 3 4 ) // ( 1 3 5 6 ) // ( 2 4 6 0 ) // submatrix( A1, 0UL, 2UL, 3UL, 2UL ) = B; // OK // Error: Assigning B to a submatrix of A2 such that the symmetry cannot be preserved! // The elements marked with X cannot be assigned unambiguously! // // ( 0 1 2 0 ) // A2 = ( 1 3 X 0 ) // ( 2 X 6 0 ) // ( 0 0 0 0 ) // submatrix( A2, 0UL, 1UL, 3UL, 2UL ) = B; // Assignment throws an exception! \endcode // \n \subsection adaptors_symmetric_matrices_initialization The Elements of a Dense Symmetric Matrix are Always Default Initialized! // // Although this results in a small loss of efficiency (especially in case all default values are // overridden afterwards), this property is important since otherwise the symmetric property of // dense symmetric matrices could not be guaranteed: \code using blaze::DynamicMatrix; using blaze::SymmetricMatrix; // Uninitialized, 5x5 row-major dynamic matrix DynamicMatrix<int,rowMajor> A( 5, 5 ); // Default initialized, 5x5 row-major symmetric dynamic matrix SymmetricMatrix< DynamicMatrix<int,rowMajor> > B( 5 ); \endcode // \n \section adaptors_symmetric_matrices_arithmetic_operations Arithmetic Operations // <hr> // // A SymmetricMatrix matrix can participate in numerical operations in any way any other dense // or sparse matrix can participate. It can also be combined with any other dense or sparse vector // or matrix. The following code example gives an impression of the use of SymmetricMatrix within // arithmetic operations: \code using blaze::SymmetricMatrix; using blaze::DynamicMatrix; using blaze::HybridMatrix; using blaze::StaticMatrix; using blaze::CompressedMatrix; using blaze::rowMajor; using blaze::columnMajor; DynamicMatrix<double,rowMajor> A( 3, 3 ); CompressedMatrix<double,rowMajor> B( 3, 3 ); SymmetricMatrix< DynamicMatrix<double,rowMajor> > C( 3 ); SymmetricMatrix< CompressedMatrix<double,rowMajor> > D( 3 ); SymmetricMatrix< HybridMatrix<float,3UL,3UL,rowMajor> > E; SymmetricMatrix< StaticMatrix<float,3UL,3UL,columnMajor> > F; E = A + B; // Matrix addition and assignment to a row-major symmetric matrix (includes runtime check) F = C - D; // Matrix subtraction and assignment to a column-major symmetric matrix (only compile time check) F = A * D; // Matrix multiplication between a dense and a sparse matrix (includes runtime check) C *= 2.0; // In-place scaling of matrix C E = 2.0 * B; // Scaling of matrix B (includes runtime check) F = C * 2.0; // Scaling of matrix C (only compile time check) E += A - B; // Addition assignment (includes runtime check) F -= C + D; // Subtraction assignment (only compile time check) F *= A * D; // Multiplication assignment (includes runtime check) \endcode // Note that it is possible to assign any kind of matrix to a symmetric matrix. In case the matrix // to be assigned is not symmetric at compile time, a runtime check is performed. // // // \n \section adaptors_symmetric_matrices_block_matrices Symmetric Block Matrices // <hr> // // It is also possible to use symmetric block matrices: \code using blaze::CompressedMatrix; using blaze::StaticMatrix; using blaze::SymmetricMatrix; // Definition of a 3x3 symmetric block matrix based on CompressedMatrix SymmetricMatrix< CompressedMatrix< StaticMatrix<int,3UL,3UL> > > A( 3 ); \endcode // Also in this case, the SymmetricMatrix class template enforces the invariant of symmetry and // guarantees that a modifications of element \f$ a_{ij} \f$ of the adapted matrix is also // applied to element \f$ a_{ji} \f$: \code // Inserting the elements (2,4) and (4,2) A.insert( 2, 4, StaticMatrix<int,3UL,3UL>{ { 1, -4, 5 }, { 6, 8, -3 }, { 2, -1, 2 } } ); // Manipulating the elements (2,4) and (4,2) A(2,4)(1,1) = -5; \endcode // For more information on block matrices, see the tutorial on \ref block_vectors_and_matrices. // // // \n \section adaptors_symmetric_matrices_performance Performance Considerations // <hr> // // When the symmetric property of a matrix is known beforehands using the SymmetricMatrix adaptor // instead of a general matrix can be a considerable performance advantage. The \b Blaze library // tries to exploit the properties of symmetric matrices whenever possible. However, there are // also situations when using a symmetric matrix introduces some overhead. The following examples // demonstrate several situations where symmetric matrices can positively or negatively impact // performance. // // \n \subsection adaptors_symmetric_matrices_matrix_matrix_multiplication Positive Impact: Matrix/Matrix Multiplication // // When multiplying two matrices, at least one of which is symmetric, \b Blaze can exploit the fact // that \f$ A = A^T \f$ and choose the fastest and most suited combination of storage orders for the // multiplication. The following example demonstrates this by means of a dense matrix/sparse matrix // multiplication: \code using blaze::DynamicMatrix; using blaze::SymmetricMatrix; using blaze::rowMajor; using blaze::columnMajor; SymmetricMatrix< DynamicMatrix<double,rowMajor> > A; SymmetricMatrix< CompressedMatrix<double,columnMajor> > B; DynamicMatrix<double,columnMajor> C; // ... Resizing and initialization C = A * B; \endcode // Intuitively, the chosen combination of a row-major and a column-major matrix is the most suited // for maximum performance. However, \b Blaze evaluates the multiplication as \code C = A * trans( B ); \endcode // which significantly increases the performance since in contrast to the original formulation the // optimized form can be vectorized. Therefore, in the context of matrix multiplications, using the // SymmetricMatrix adapter is obviously an advantage. // // \n \subsection adaptors_symmetric_matrices_matrix_vector_multiplication Positive Impact: Matrix/Vector Multiplication // // A similar optimization is possible in case of matrix/vector multiplications: \code using blaze::DynamicMatrix; using blaze::DynamicVector; using blaze::CompressedVector; using blaze::rowMajor; using blaze::columnVector; SymmetricMatrix< DynamicMatrix<double,rowMajor> > A; CompressedVector<double,columnVector> x; DynamicVector<double,columnVector> y; // ... Resizing and initialization y = A * x; \endcode // In this example it is not intuitively apparent that using a row-major matrix is not the best // possible choice in terms of performance since the computation cannot be vectorized. Choosing // a column-major matrix instead, however, would enable a vectorized computation. Therefore // \b Blaze exploits the fact that \c A is symmetric, selects the best suited storage order and // evaluates the multiplication as \code y = trans( A ) * x; \endcode // which also significantly increases the performance. // // \n \subsection adaptors_symmetric_matrices_views Positive Impact: Row/Column Views on Column/Row-Major Matrices // // Another example is the optimization of a row view on a column-major symmetric matrix: \code using blaze::DynamicMatrix; using blaze::SymmetricMatrix; using blaze::columnMajor; SymmetricMatrix< DynamicMatrix<double,columnMajor> > A( 10UL ); auto row5 = row( A, 5UL ); \endcode // Usually, a row view on a column-major matrix results in a considerable performance decrease in // comparison to a row view on a row-major matrix due to the non-contiguous storage of the matrix // elements. However, in case of symmetric matrices, \b Blaze instead uses the according column of // the matrix, which provides the same performance as if the matrix would be row-major. Note that // this also works for column views on row-major matrices, where \b Blaze can use the according // row instead of a column in order to provide maximum performance. // // \n \subsection adaptors_symmetric_matrices_assignment Negative Impact: Assignment of a General Matrix // // In contrast to using a symmetric matrix on the right-hand side of an assignment (i.e. for read // access), which introduces absolutely no performance penalty, using a symmetric matrix on the // left-hand side of an assignment (i.e. for write access) may introduce additional overhead when // it is assigned a general matrix, which is not symmetric at compile time: \code using blaze::DynamicMatrix; using blaze::SymmetricMatrix; SymmetricMatrix< DynamicMatrix<double> > A, C; DynamicMatrix<double> B; B = A; // Only read-access to the symmetric matrix; no performance penalty C = A; // Assignment of a symmetric matrix to another symmetric matrix; no runtime overhead C = B; // Assignment of a general matrix to a symmetric matrix; some runtime overhead \endcode // When assigning a general, potentially not symmetric matrix to a symmetric matrix it is necessary // to check whether the matrix is symmetric at runtime in order to guarantee the symmetry property // of the symmetric matrix. In case it turns out to be symmetric, it is assigned as efficiently as // possible, if it is not, an exception is thrown. In order to prevent this runtime overhead it is // therefore generally advisable to assign symmetric matrices to other symmetric matrices.\n // In this context it is especially noteworthy that in contrast to additions and subtractions the // multiplication of two symmetric matrices does not necessarily result in another symmetric matrix: \code SymmetricMatrix< DynamicMatrix<double> > A, B, C; C = A + B; // Results in a symmetric matrix; no runtime overhead C = A - B; // Results in a symmetric matrix; no runtime overhead C = A * B; // Is not guaranteed to result in a symmetric matrix; some runtime overhead \endcode // \n Previous: \ref adaptors &nbsp; &nbsp; Next: \ref adaptors_hermitian_matrices */ //************************************************************************************************* //**Hermitian Matrices***************************************************************************** /*!\page adaptors_hermitian_matrices Hermitian Matrices // // \tableofcontents // // // \n \section adaptors_hermitian_matrices_general Hermitian Matrices // <hr> // // In addition to symmetric matrices, \b Blaze also provides an adaptor for Hermitian matrices. // Hermitian matrices provide the compile time guarantee to be square matrices with pair-wise // conjugate complex values. Mathematically, this means that an Hermitian matrix is always equal // to its conjugate transpose (\f$ A = \overline{A^T} \f$) and that all non-diagonal values have // a complex conjugate counterpart (\f$ a_{ij} == \overline{a_{ji}} \f$). Within the \b Blaze // library, Hermitian matrices are realized by the \ref adaptors_hermitian_matrices_hermitianmatrix // class template. // // // \n \section adaptors_hermitian_matrices_hermitianmatrix HermitianMatrix // <hr> // // The HermitianMatrix class template is an adapter for existing dense and sparse matrix types. // It inherits the properties and the interface of the given matrix type \c MT and extends it by // enforcing the additional invariant of Hermitian symmetry (i.e. the matrix is always equal to // its conjugate transpose \f$ A = \overline{A^T} \f$). It can be included via the header file \code #include <blaze/math/HermitianMatrix.h> \endcode // The type of the adapted matrix can be specified via template parameter: \code template< typename MT > class HermitianMatrix; \endcode // \c MT specifies the type of the matrix to be adapted. HermitianMatrix can be used with any // non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix type. Also, // the given matrix type must have numeric element types (i.e. all integral types except \c bool, // floating point and complex types). Note that the given matrix type must be either resizable (as // for instance blaze::HybridMatrix or blaze::DynamicMatrix) or must be square at compile time (as // for instance blaze::StaticMatrix). // // The following examples give an impression of several possible Hermitian matrices: \code using blaze::unaligned; using blaze::unpadded; using blaze::rowMajor; using blaze::columnMajor; // Definition of a 3x3 row-major dense Hermitian matrix with static memory blaze::HermitianMatrix< blaze::StaticMatrix<int,3UL,3UL,rowMajor> > A; // Definition of a resizable column-major dense Hermitian matrix based on HybridMatrix blaze::HermitianMatrix< blaze::HybridMatrix<float,4UL,4UL,columnMajor> B; // Definition of a resizable row-major dense Hermitian matrix based on DynamicMatrix blaze::HermitianMatrix< blaze::DynamicMatrix<std::complex<double>,rowMajor> > C; // Definition of a fixed size row-major dense Hermitian matrix based on CustomMatrix blaze::HermitianMatrix< blaze::CustomMatrix<double,unaligned,unpadded,rowMajor> > D; // Definition of a compressed row-major single precision complex Hermitian matrix blaze::HermitianMatrix< blaze::CompressedMatrix<std::complex<float>,rowMajor> > E; \endcode // The storage order of a Hermitian matrix is depending on the storage order of the adapted matrix // type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e. is specified as // blaze::rowMajor), the Hermitian matrix will also be a row-major matrix. Otherwise, if the // adapted matrix is column-major (i.e. is specified as blaze::columnMajor), the Hermitian matrix // will also be a column-major matrix. // // // \n \section adaptors_hermitian_matrices_vs_symmetric_matrices Hermitian Matrices vs. Symmetric Matrices // // The blaze::HermitianMatrix adaptor and the blaze::SymmetricMatrix adaptor share several traits. // However, there are a couple of differences, both from a mathematical point of view as well as // from an implementation point of view. // // From a mathematical point of view, a matrix is called symmetric when it is equal to its // transpose (\f$ A = A^T \f$) and it is called Hermitian when it is equal to its conjugate // transpose (\f$ A = \overline{A^T} \f$). For matrices of real values, however, these two // conditions coincide, which means that symmetric matrices of real values are also Hermitian // and Hermitian matrices of real values are also symmetric. // // From an implementation point of view, \b Blaze restricts Hermitian matrices to numeric data // types (i.e. all integral types except \c bool, floating point and complex types), whereas // symmetric matrices can also be block matrices (i.e. can have vector or matrix elements). // For built-in element types, the HermitianMatrix adaptor behaves exactly like the according // SymmetricMatrix implementation. For complex element types, however, the Hermitian property // is enforced (see also \ref adaptors_hermitian_matrices_hermitian). \code using blaze::DynamicMatrix; using blaze::DynamicVector; using blaze::HermitianMatrix; using blaze::SymmetricMatrix; // The following two matrices provide an identical experience (including performance) HermitianMatrix< DynamicMatrix<double> > A; // Both Hermitian and symmetric SymmetricMatrix< DynamicMatrix<double> > B; // Both Hermitian and symmetric // The following two matrices will behave differently HermitianMatrix< DynamicMatrix< complex<double> > > C; // Only Hermitian SymmetricMatrix< DynamicMatrix< complex<double> > > D; // Only symmetric // Hermitian block matrices are not allowed HermitianMatrix< DynamicMatrix< DynamicVector<double> > > E; // Compilation error! SymmetricMatrix< DynamicMatrix< DynamicVector<double> > > F; // Symmetric block matrix \endcode // \n \section adaptors_hermitian_matrices_special_properties Special Properties of Hermitian Matrices // <hr> // // A Hermitian matrix is used exactly like a matrix of the underlying, adapted matrix type \c MT. // It also provides (nearly) the same interface as the underlying matrix type. However, there are // some important exceptions resulting from the Hermitian symmetry constraint: // // -# <b>\ref adaptors_hermitian_matrices_square</b> // -# <b>\ref adaptors_hermitian_matrices_hermitian</b> // -# <b>\ref adaptors_hermitian_matrices_initialization</b> // // \n \subsection adaptors_hermitian_matrices_square Hermitian Matrices Must Always be Square! // // In case a resizable matrix is used (as for instance blaze::HybridMatrix, blaze::DynamicMatrix, // or blaze::CompressedMatrix), this means that the according constructors, the \c resize() and // the \c extend() functions only expect a single parameter, which specifies both the number of // rows and columns, instead of two (one for the number of rows and one for the number of columns): \code using blaze::DynamicMatrix; using blaze::HermitianMatrix; using blaze::rowMajor; // Default constructed, default initialized, row-major 3x3 Hermitian dynamic matrix HermitianMatrix< DynamicMatrix<std::complex<double>,rowMajor> > A( 3 ); // Resizing the matrix to 5x5 A.resize( 5 ); // Extending the number of rows and columns by 2, resulting in a 7x7 matrix A.extend( 2 ); \endcode // In case a matrix with a fixed size is used (as for instance blaze::StaticMatrix), the number // of rows and number of columns must be specified equally: \code using blaze::StaticMatrix; using blaze::HermitianMatrix; using blaze::columnMajor; // Correct setup of a fixed size column-major 3x3 Hermitian static matrix HermitianMatrix< StaticMatrix<std::complex<float>,3UL,3UL,columnMajor> > A; // Compilation error: the provided matrix type is not a square matrix type HermitianMatrix< StaticMatrix<std::complex<float>,3UL,4UL,columnMajor> > B; \endcode // \n \subsection adaptors_hermitian_matrices_hermitian The Hermitian Property is Always Enforced! // // This means that the following properties of a Hermitian matrix are always guaranteed: // // - The diagonal elements are real numbers, i.e. the imaginary part is zero // - Element \f$ a_{ij} \f$ is always the complex conjugate of element \f$ a_{ji} \f$ // // Thus modifying the element \f$ a_{ij} \f$ of a Hermitian matrix also modifies its // counterpart element \f$ a_{ji} \f$. Also, it is only possible to assign matrices that // are Hermitian themselves: \code using blaze::CompressedMatrix; using blaze::DynamicMatrix; using blaze::StaticMatrix; using blaze::HermitianMatrix; using blaze::rowMajor; using cplx = std::complex<double>; // Default constructed, row-major 3x3 Hermitian compressed matrix HermitianMatrix< CompressedMatrix<cplx,rowMajor> > A( 3 ); // Initializing the matrix via the function call operator // // ( (1, 0) (0,0) (2,1) ) // ( (0, 0) (0,0) (0,0) ) // ( (2,-1) (0,0) (0,0) ) // A(0,0) = cplx( 1.0, 0.0 ); // Initialization of the diagonal element (0,0) A(0,2) = cplx( 2.0, 1.0 ); // Initialization of the elements (0,2) and (2,0) // Inserting three more elements via the insert() function // // ( (1,-3) (0,0) (2, 1) ) // ( (0, 0) (2,0) (4,-2) ) // ( (2,-1) (4,2) (0, 0) ) // A.insert( 1, 1, cplx( 2.0, 0.0 ) ); // Inserting the diagonal element (1,1) A.insert( 1, 2, cplx( 4.0, -2.0 ) ); // Inserting the elements (1,2) and (2,1) // Access via a non-const iterator // // ( (1,-3) (8,1) (2, 1) ) // ( (8,-1) (2,0) (4,-2) ) // ( (2,-1) (4,2) (0, 0) ) // *A.begin(1UL) = cplx( 8.0, -1.0 ); // Modifies both elements (1,0) and (0,1) // Erasing elements via the erase() function // // ( (0, 0) (8,1) (0, 0) ) // ( (8,-1) (2,0) (4,-2) ) // ( (0, 0) (4,2) (0, 0) ) // A.erase( 0, 0 ); // Erasing the diagonal element (0,0) A.erase( 0, 2 ); // Erasing the elements (0,2) and (2,0) // Construction from a Hermitian dense matrix StaticMatrix<cplx,3UL,3UL> B{ { cplx( 3.0, 0.0 ), cplx( 8.0, 2.0 ), cplx( -2.0, 2.0 ) }, { cplx( 8.0, 1.0 ), cplx( 0.0, 0.0 ), cplx( -1.0, -1.0 ) }, { cplx( -2.0, -2.0 ), cplx( -1.0, 1.0 ), cplx( 4.0, 0.0 ) } }; HermitianMatrix< DynamicMatrix<double,rowMajor> > C( B ); // OK // Assignment of a non-Hermitian dense matrix StaticMatrix<cplx,3UL,3UL> D{ { cplx( 3.0, 0.0 ), cplx( 7.0, 2.0 ), cplx( 3.0, 2.0 ) }, { cplx( 8.0, 1.0 ), cplx( 0.0, 0.0 ), cplx( 6.0, 4.0 ) }, { cplx( -2.0, 2.0 ), cplx( -1.0, 1.0 ), cplx( 4.0, 0.0 ) } }; C = D; // Throws an exception; Hermitian invariant would be violated! \endcode // The same restriction also applies to the \c append() function for sparse matrices: Appending // the element \f$ a_{ij} \f$ additionally inserts the element \f$ a_{ji} \f$ into the matrix. // Despite the additional insertion, the \c append() function still provides the most efficient // way to set up a Hermitian sparse matrix. In order to achieve the maximum efficiency, the // capacity of the individual rows/columns of the matrix should to be specifically prepared with // \c reserve() calls: \code using blaze::CompressedMatrix; using blaze::HermitianMatrix; using blaze::rowMajor; using cplx = std::complex<double>; // Setup of the Hermitian matrix // // ( (0, 0) (1,2) (3,-4) ) // A = ( (1,-2) (2,0) (0, 0) ) // ( (3, 4) (0,0) (0, 0) ) // HermitianMatrix< CompressedMatrix<cplx,rowMajor> > A( 3 ); A.reserve( 5 ); // Reserving enough space for 5 non-zero elements A.reserve( 0, 2 ); // Reserving two non-zero elements in the first row A.reserve( 1, 2 ); // Reserving two non-zero elements in the second row A.reserve( 2, 1 ); // Reserving a single non-zero element in the third row A.append( 0, 1, cplx( 1.0, 2.0 ) ); // Appending an element at position (0,1) and (1,0) A.append( 1, 1, cplx( 2.0, 0.0 ) ); // Appending an element at position (1,1) A.append( 2, 0, cplx( 3.0, 4.0 ) ); // Appending an element at position (2,0) and (0,2) \endcode // The Hermitian property is also enforced for Hermitian custom matrices: In case the given array // of elements does not represent a Hermitian matrix, a \c std::invalid_argument exception is // thrown: \code using blaze::CustomMatrix; using blaze::HermitianMatrix; using blaze::unaligned; using blaze::unpadded; using blaze::rowMajor; using CustomHermitian = HermitianMatrix< CustomMatrix<double,unaligned,unpadded,rowMajor> >; // Creating a 3x3 Hermitian custom matrix from a properly initialized array double array[9] = { 1.0, 2.0, 4.0, 2.0, 3.0, 5.0, 4.0, 5.0, 6.0 }; CustomHermitian A( array, 3UL ); // OK // Attempt to create a second 3x3 Hermitian custom matrix from an uninitialized array std::unique_ptr<double[]> memory( new double[9UL] ); CustomHermitian B( memory.get(), 3UL ); // Throws an exception \endcode // Finally, the Hermitian property is enforced for views (rows, columns, submatrices, ...) on the // Hermitian matrix. The following example demonstrates that modifying the elements of an entire // row of the Hermitian matrix also affects the counterpart elements in the according column of // the matrix: \code using blaze::DynamicMatrix; using blaze::HermtianMatrix; using cplx = std::complex<double>; // Setup of the Hermitian matrix // // ( (0, 0) (1,-1) (0,0) (2, 1) ) // A = ( (1, 1) (3, 0) (4,2) (0, 0) ) // ( (0, 0) (4,-2) (0,0) (5,-3) ) // ( (2,-1) (0, 0) (5,3) (0, 0) ) // HermitianMatrix< DynamicMatrix<int> > A( 4 ); A(0,1) = cplx( 1.0, -1.0 ); A(0,3) = cplx( 2.0, 1.0 ); A(1,1) = cplx( 3.0, 0.0 ); A(1,2) = cplx( 4.0, 2.0 ); A(2,3) = cplx( 5.0, 3.0 ); // Setting all elements in the 1st row to 0 results in the matrix // // ( (0, 0) (0,0) (0,0) (2, 1) ) // A = ( (0, 0) (0,0) (0,0) (0, 0) ) // ( (0, 0) (0,0) (0,0) (5,-3) ) // ( (2,-1) (0,0) (5,3) (0, 0) ) // row( A, 1 ) = cplx( 0.0, 0.0 ); \endcode // The next example demonstrates the (compound) assignment to submatrices of Hermitian matrices. // Since the modification of element \f$ a_{ij} \f$ of a Hermitian matrix also modifies the // element \f$ a_{ji} \f$, the matrix to be assigned must be structured such that the Hermitian // symmetry of the matrix is preserved. Otherwise a \c std::invalid_argument exception is thrown: \code using blaze::DynamicMatrix; using blaze::HermitianMatrix; std::complex<double> cplx; // Setup of two default 4x4 Hermitian matrices HermitianMatrix< DynamicMatrix<cplx> > A1( 4 ), A2( 4 ); // Setup of the 3x2 dynamic matrix // // ( (1,-1) (2, 5) ) // B = ( (3, 0) (4,-6) ) // ( (5, 0) (6, 0) ) // DynamicMatrix<int> B( 3UL, 2UL ); B(0,0) = cplx( 1.0, -1.0 ); B(0,1) = cplx( 2.0, 5.0 ); B(1,0) = cplx( 3.0, 0.0 ); B(1,1) = cplx( 4.0, -6.0 ); B(2,1) = cplx( 5.0, 0.0 ); B(2,2) = cplx( 6.0, 7.0 ); // OK: Assigning B to a submatrix of A1 such that the Hermitian property is preserved // // ( (0, 0) (0, 0) (1,-1) (2, 5) ) // A1 = ( (0, 0) (0, 0) (3, 0) (4,-6) ) // ( (1, 1) (3, 0) (5, 0) (6, 0) ) // ( (2,-5) (4, 6) (6, 0) (0, 0) ) // submatrix( A1, 0UL, 2UL, 3UL, 2UL ) = B; // OK // Error: Assigning B to a submatrix of A2 such that the Hermitian property isn't preserved! // The elements marked with X cannot be assigned unambiguously! // // ( (0, 0) (1,-1) (2,5) (0,0) ) // A2 = ( (1, 1) (3, 0) (X,X) (0,0) ) // ( (2,-5) (X, X) (6,0) (0,0) ) // ( (0, 0) (0, 0) (0,0) (0,0) ) // submatrix( A2, 0UL, 1UL, 3UL, 2UL ) = B; // Assignment throws an exception! \endcode // \n \subsection adaptors_hermitian_matrices_initialization The Elements of a Dense Hermitian Matrix are Always Default Initialized! // // Although this results in a small loss of efficiency (especially in case all default values are // overridden afterwards), this property is important since otherwise the Hermitian property of // dense Hermitian matrices could not be guaranteed: \code using blaze::DynamicMatrix; using blaze::HermitianMatrix; // Uninitialized, 5x5 row-major dynamic matrix DynamicMatrix<int,rowMajor> A( 5, 5 ); // Default initialized, 5x5 row-major Hermitian dynamic matrix HermitianMatrix< DynamicMatrix<int,rowMajor> > B( 5 ); \endcode // \n \section adaptors_hermitian_matrices_arithmetic_operations Arithmetic Operations // <hr> // // A HermitianMatrix can be used within all numerical operations in any way any other dense or // sparse matrix can be used. It can also be combined with any other dense or sparse vector or // matrix. The following code example gives an impression of the use of HermitianMatrix within // arithmetic operations: \code using blaze::HermitianMatrix; using blaze::DynamicMatrix; using blaze::HybridMatrix; using blaze::StaticMatrix; using blaze::CompressedMatrix; using blaze::rowMajor; using blaze::columnMajor; using cplx = complex<float>; DynamicMatrix<cplx,rowMajor> A( 3, 3 ); CompressedMatrix<cplx,rowMajor> B( 3, 3 ); HermitianMatrix< DynamicMatrix<cplx,rowMajor> > C( 3 ); HermitianMatrix< CompressedMatrix<cplx,rowMajor> > D( 3 ); HermitianMatrix< HybridMatrix<cplx,3UL,3UL,rowMajor> > E; HermitianMatrix< StaticMatrix<cplx,3UL,3UL,columnMajor> > F; E = A + B; // Matrix addition and assignment to a row-major Hermitian matrix (includes runtime check) F = C - D; // Matrix subtraction and assignment to a column-major Hermitian matrix (only compile time check) F = A * D; // Matrix multiplication between a dense and a sparse matrix (includes runtime check) C *= 2.0; // In-place scaling of matrix C E = 2.0 * B; // Scaling of matrix B (includes runtime check) F = C * 2.0; // Scaling of matrix C (only compile time check) E += A - B; // Addition assignment (includes runtime check) F -= C + D; // Subtraction assignment (only compile time check) F *= A * D; // Multiplication assignment (includes runtime check) \endcode // Note that it is possible to assign any kind of matrix to a Hermitian matrix. In case the matrix // to be assigned is not Hermitian at compile time, a runtime check is performed. // // // \n \section adaptors_hermitian_matrices_performance Performance Considerations // <hr> // // When the Hermitian property of a matrix is known beforehands using the HermitianMatrix adaptor // instead of a general matrix can be a considerable performance advantage. This is particularly // true in case the Hermitian matrix is also symmetric (i.e. has built-in element types). The // \b Blaze library tries to exploit the properties of Hermitian (symmetric) matrices whenever // possible. However, there are also situations when using a Hermitian matrix introduces some // overhead. The following examples demonstrate several situations where Hermitian matrices can // positively or negatively impact performance. // // \n \subsection adaptors_hermitian_matrices_matrix_matrix_multiplication Positive Impact: Matrix/Matrix Multiplication // // When multiplying two matrices, at least one of which is symmetric, \b Blaze can exploit the fact // that \f$ A = A^T \f$ and choose the fastest and most suited combination of storage orders for the // multiplication. The following example demonstrates this by means of a dense matrix/sparse matrix // multiplication: \code using blaze::DynamicMatrix; using blaze::HermitianMatrix; using blaze::rowMajor; using blaze::columnMajor; HermitianMatrix< DynamicMatrix<double,rowMajor> > A; // Both Hermitian and symmetric HermitianMatrix< CompressedMatrix<double,columnMajor> > B; // Both Hermitian and symmetric DynamicMatrix<double,columnMajor> C; // ... Resizing and initialization C = A * B; \endcode // Intuitively, the chosen combination of a row-major and a column-major matrix is the most suited // for maximum performance. However, \b Blaze evaluates the multiplication as \code C = A * trans( B ); \endcode // which significantly increases the performance since in contrast to the original formulation the // optimized form can be vectorized. Therefore, in the context of matrix multiplications, using a // symmetric matrix is obviously an advantage. // // \n \subsection adaptors_hermitian_matrices_matrix_vector_multiplication Positive Impact: Matrix/Vector Multiplication // // A similar optimization is possible in case of matrix/vector multiplications: \code using blaze::DynamicMatrix; using blaze::DynamicVector; using blaze::CompressedVector; using blaze::HermitianMatrix; using blaze::rowMajor; using blaze::columnVector; HermitianMatrix< DynamicMatrix<double,rowMajor> > A; // Hermitian and symmetric CompressedVector<double,columnVector> x; DynamicVector<double,columnVector> y; // ... Resizing and initialization y = A * x; \endcode // In this example it is not intuitively apparent that using a row-major matrix is not the best // possible choice in terms of performance since the computation cannot be vectorized. Choosing // a column-major matrix instead, however, would enable a vectorized computation. Therefore // \b Blaze exploits the fact that \c A is symmetric, selects the best suited storage order and // evaluates the multiplication as \code y = trans( A ) * x; \endcode // which also significantly increases the performance. // // \n \subsection adaptors_hermitian_matrices_views Positive Impact: Row/Column Views on Column/Row-Major Matrices // // Another example is the optimization of a row view on a column-major symmetric matrix: \code using blaze::DynamicMatrix; using blaze::HermitianMatrix; using blaze::columnMajor; HermitianMatrix< DynamicMatrix<double,columnMajor> > A( 10UL ); // Both Hermitian and symmetric auto row5 = row( A, 5UL ); \endcode // Usually, a row view on a column-major matrix results in a considerable performance decrease in // comparison to a row view on a row-major matrix due to the non-contiguous storage of the matrix // elements. However, in case of symmetric matrices, \b Blaze instead uses the according column of // the matrix, which provides the same performance as if the matrix would be row-major. Note that // this also works for column views on row-major matrices, where \b Blaze can use the according // row instead of a column in order to provide maximum performance. // // \n \subsection adaptors_hermitian_matrices_assignment Negative Impact: Assignment of a General Matrix // // In contrast to using a Hermitian matrix on the right-hand side of an assignment (i.e. for read // access), which introduces absolutely no performance penalty, using a Hermitian matrix on the // left-hand side of an assignment (i.e. for write access) may introduce additional overhead when // it is assigned a general matrix, which is not Hermitian at compile time: \code using blaze::DynamicMatrix; using blaze::HermitianMatrix; HermitianMatrix< DynamicMatrix< complex<double> > > A, C; DynamicMatrix<double> B; B = A; // Only read-access to the Hermitian matrix; no performance penalty C = A; // Assignment of a Hermitian matrix to another Hermitian matrix; no runtime overhead C = B; // Assignment of a general matrix to a Hermitian matrix; some runtime overhead \endcode // When assigning a general, potentially not Hermitian matrix to a Hermitian matrix it is necessary // to check whether the matrix is Hermitian at runtime in order to guarantee the Hermitian property // of the Hermitian matrix. In case it turns out to be Hermitian, it is assigned as efficiently as // possible, if it is not, an exception is thrown. In order to prevent this runtime overhead it is // therefore generally advisable to assign Hermitian matrices to other Hermitian matrices.\n // In this context it is especially noteworthy that in contrast to additions and subtractions the // multiplication of two Hermitian matrices does not necessarily result in another Hermitian matrix: \code HermitianMatrix< DynamicMatrix<double> > A, B, C; C = A + B; // Results in a Hermitian matrix; no runtime overhead C = A - B; // Results in a Hermitian matrix; no runtime overhead C = A * B; // Is not guaranteed to result in a Hermitian matrix; some runtime overhead \endcode // \n Previous: \ref adaptors_symmetric_matrices &nbsp; &nbsp; Next: \ref adaptors_triangular_matrices */ //************************************************************************************************* //**Triangular Matrices**************************************************************************** /*!\page adaptors_triangular_matrices Triangular Matrices // // \tableofcontents // // // \n \section adaptors_triangular_matrices_general Triangular Matrices // <hr> // // Triangular matrices come in three flavors: Lower triangular matrices provide the compile time // guarantee to be square matrices and that the upper part of the matrix contains only default // elements that cannot be modified. Upper triangular matrices on the other hand provide the // compile time guarantee to be square and that the lower part of the matrix contains only fixed // default elements. Finally, diagonal matrices provide the compile time guarantee to be square // and that both the lower and upper part of the matrix contain only immutable default elements. // These properties can be exploited to gain higher performance and/or to save memory. Within the // \b Blaze library, several kinds of lower and upper triangular and diagonal matrices are realized // by the following class templates: // // Lower triangular matrices: // - <b>\ref adaptors_triangular_matrices_lowermatrix</b> // - <b>\ref adaptors_triangular_matrices_unilowermatrix</b> // - <b>\ref adaptors_triangular_matrices_strictlylowermatrix</b> // // Upper triangular matrices: // - <b>\ref adaptors_triangular_matrices_uppermatrix</b> // - <b>\ref adaptors_triangular_matrices_uniuppermatrix</b> // - <b>\ref adaptors_triangular_matrices_strictlyuppermatrix</b> // // Diagonal matrices // - <b>\ref adaptors_triangular_matrices_diagonalmatrix</b> // // // \n \section adaptors_triangular_matrices_lowermatrix LowerMatrix // <hr> // // The blaze::LowerMatrix class template is an adapter for existing dense and sparse matrix types. // It inherits the properties and the interface of the given matrix type \c MT and extends it by // enforcing the additional invariant that all matrix elements above the diagonal are 0 (lower // triangular matrix): \f[\left(\begin{array}{*{5}{c}} l_{0,0} & 0 & 0 & \cdots & 0 \\ l_{1,0} & l_{1,1} & 0 & \cdots & 0 \\ l_{2,0} & l_{2,1} & l_{2,2} & \cdots & 0 \\ \vdots & \vdots & \vdots & \ddots & \vdots \\ l_{N,0} & l_{N,1} & l_{N,2} & \cdots & l_{N,N} \\ \end{array}\right).\f] // It can be included via the header file \code #include <blaze/math/LowerMatrix.h> \endcode // The type of the adapted matrix can be specified via the first template parameter: \code template< typename MT > class LowerMatrix; \endcode // \c MT specifies the type of the matrix to be adapted. blaze::LowerMatrix can be used with any // non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix type. Note // that the given matrix type must be either resizable (as for instance blaze::HybridMatrix or // blaze::DynamicMatrix) or must be square at compile time (as for instance blaze::StaticMatrix). // // The following examples give an impression of several possible lower matrices: \code using blaze::unaligned; using blaze::unpadded; using blaze::rowMajor; using blaze::columnMajor; // Definition of a 3x3 row-major dense lower matrix with static memory blaze::LowerMatrix< blaze::StaticMatrix<int,3UL,3UL,rowMajor> > A; // Definition of a resizable column-major dense lower matrix based on HybridMatrix blaze::LowerMatrix< blaze::HybridMatrix<float,4UL,4UL,columnMajor> B; // Definition of a resizable row-major dense lower matrix based on DynamicMatrix blaze::LowerMatrix< blaze::DynamicMatrix<double,rowMajor> > C; // Definition of a fixed size row-major dense lower matrix based on CustomMatrix blaze::LowerMatrix< blaze::CustomMatrix<double,unaligned,unpadded,rowMajor> > D; // Definition of a compressed row-major single precision lower matrix blaze::LowerMatrix< blaze::CompressedMatrix<float,rowMajor> > E; \endcode // The storage order of a lower matrix is depending on the storage order of the adapted matrix // type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e. is specified // as blaze::rowMajor), the lower matrix will also be a row-major matrix. Otherwise, if the // adapted matrix is column-major (i.e. is specified as blaze::columnMajor), the lower matrix // will also be a column-major matrix. // // // \n \section adaptors_triangular_matrices_unilowermatrix UniLowerMatrix // <hr> // // The blaze::UniLowerMatrix class template is an adapter for existing dense and sparse matrix // types. It inherits the properties and the interface of the given matrix type \c MT and extends // it by enforcing the additional invariant that all diagonal matrix elements are 1 and all matrix // elements above the diagonal are 0 (lower unitriangular matrix): \f[\left(\begin{array}{*{5}{c}} 1 & 0 & 0 & \cdots & 0 \\ l_{1,0} & 1 & 0 & \cdots & 0 \\ l_{2,0} & l_{2,1} & 1 & \cdots & 0 \\ \vdots & \vdots & \vdots & \ddots & \vdots \\ l_{N,0} & l_{N,1} & l_{N,2} & \cdots & 1 \\ \end{array}\right).\f] // It can be included via the header file \code #include <blaze/math/UniLowerMatrix.h> \endcode // The type of the adapted matrix can be specified via the first template parameter: \code template< typename MT > class UniLowerMatrix; \endcode // \c MT specifies the type of the matrix to be adapted. blaze::UniLowerMatrix can be used with any // non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix type. Also, // the given matrix type must have numeric element types (i.e. all integral types except \c bool, // floating point and complex types). Note that the given matrix type must be either resizable (as // for instance blaze::HybridMatrix or blaze::DynamicMatrix) or must be square at compile time (as // for instance blaze::StaticMatrix). // // The following examples give an impression of several possible lower unitriangular matrices: \code // Definition of a 3x3 row-major dense unilower matrix with static memory blaze::UniLowerMatrix< blaze::StaticMatrix<int,3UL,3UL,blaze::rowMajor> > A; // Definition of a resizable column-major dense unilower matrix based on HybridMatrix blaze::UniLowerMatrix< blaze::HybridMatrix<float,4UL,4UL,blaze::columnMajor> B; // Definition of a resizable row-major dense unilower matrix based on DynamicMatrix blaze::UniLowerMatrix< blaze::DynamicMatrix<double,blaze::rowMajor> > C; // Definition of a compressed row-major single precision unilower matrix blaze::UniLowerMatrix< blaze::CompressedMatrix<float,blaze::rowMajor> > D; \endcode // The storage order of a lower unitriangular matrix is depending on the storage order of the // adapted matrix type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e. // is specified as blaze::rowMajor), the unilower matrix will also be a row-major matrix. // Otherwise if the adapted matrix is column-major (i.e. is specified as blaze::columnMajor), // the unilower matrix will also be a column-major matrix. // // // \n \section adaptors_triangular_matrices_strictlylowermatrix StrictlyLowerMatrix // <hr> // // The blaze::StrictlyLowerMatrix class template is an adapter for existing dense and sparse matrix // types. It inherits the properties and the interface of the given matrix type \c MT and extends // it by enforcing the additional invariant that all diagonal matrix elements and all matrix // elements above the diagonal are 0 (strictly lower triangular matrix): \f[\left(\begin{array}{*{5}{c}} 0 & 0 & 0 & \cdots & 0 \\ l_{1,0} & 0 & 0 & \cdots & 0 \\ l_{2,0} & l_{2,1} & 0 & \cdots & 0 \\ \vdots & \vdots & \vdots & \ddots & \vdots \\ l_{N,0} & l_{N,1} & l_{N,2} & \cdots & 0 \\ \end{array}\right).\f] // It can be included via the header file \code #include <blaze/math/StrictlyLowerMatrix.h> \endcode // The type of the adapted matrix can be specified via the first template parameter: \code template< typename MT > class StrictlyLowerMatrix; \endcode // \c MT specifies the type of the matrix to be adapted. blaze::StrictlyLowerMatrix can be used // with any non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix // type. Note that the given matrix type must be either resizable (as for instance // blaze::HybridMatrix or blaze::DynamicMatrix) or must be square at compile time (as for instance // blaze::StaticMatrix). // // The following examples give an impression of several possible strictly lower triangular matrices: \code // Definition of a 3x3 row-major dense strictly lower matrix with static memory blaze::StrictlyLowerMatrix< blaze::StaticMatrix<int,3UL,3UL,blaze::rowMajor> > A; // Definition of a resizable column-major dense strictly lower matrix based on HybridMatrix blaze::StrictlyLowerMatrix< blaze::HybridMatrix<float,4UL,4UL,blaze::columnMajor> B; // Definition of a resizable row-major dense strictly lower matrix based on DynamicMatrix blaze::StrictlyLowerMatrix< blaze::DynamicMatrix<double,blaze::rowMajor> > C; // Definition of a compressed row-major single precision strictly lower matrix blaze::StrictlyLowerMatrix< blaze::CompressedMatrix<float,blaze::rowMajor> > D; \endcode // The storage order of a strictly lower triangular matrix is depending on the storage order of // the adapted matrix type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e. // is specified as blaze::rowMajor), the strictly lower matrix will also be a row-major matrix. // Otherwise if the adapted matrix is column-major (i.e. is specified as blaze::columnMajor), // the strictly lower matrix will also be a column-major matrix. // // // \n \section adaptors_triangular_matrices_uppermatrix UpperMatrix // <hr> // // The blaze::UpperMatrix class template is an adapter for existing dense and sparse matrix types. // It inherits the properties and the interface of the given matrix type \c MT and extends it by // enforcing the additional invariant that all matrix elements below the diagonal are 0 (upper // triangular matrix): \f[\left(\begin{array}{*{5}{c}} u_{0,0} & u_{0,1} & u_{0,2} & \cdots & u_{0,N} \\ 0 & u_{1,1} & u_{1,2} & \cdots & u_{1,N} \\ 0 & 0 & u_{2,2} & \cdots & u_{2,N} \\ \vdots & \vdots & \vdots & \ddots & \vdots \\ 0 & 0 & 0 & \cdots & u_{N,N} \\ \end{array}\right).\f] // It can be included via the header file \code #include <blaze/math/UpperMatrix.h> \endcode // The type of the adapted matrix can be specified via the first template parameter: \code template< typename MT > class UpperMatrix; \endcode // \c MT specifies the type of the matrix to be adapted. blaze::UpperMatrix can be used with any // non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix type. Note // that the given matrix type must be either resizable (as for instance blaze::HybridMatrix or // blaze::DynamicMatrix) or must be square at compile time (as for instance blaze::StaticMatrix). // // The following examples give an impression of several possible upper matrices: \code // Definition of a 3x3 row-major dense upper matrix with static memory blaze::UpperMatrix< blaze::StaticMatrix<int,3UL,3UL,blaze::rowMajor> > A; // Definition of a resizable column-major dense upper matrix based on HybridMatrix blaze::UpperMatrix< blaze::HybridMatrix<float,4UL,4UL,blaze::columnMajor> B; // Definition of a resizable row-major dense upper matrix based on DynamicMatrix blaze::UpperMatrix< blaze::DynamicMatrix<double,blaze::rowMajor> > C; // Definition of a compressed row-major single precision upper matrix blaze::UpperMatrix< blaze::CompressedMatrix<float,blaze::rowMajor> > D; \endcode // The storage order of an upper matrix is depending on the storage order of the adapted matrix // type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e. is specified // as blaze::rowMajor), the upper matrix will also be a row-major matrix. Otherwise, if the // adapted matrix is column-major (i.e. is specified as blaze::columnMajor), the upper matrix // will also be a column-major matrix. // // // \n \section adaptors_triangular_matrices_uniuppermatrix UniUpperMatrix // <hr> // // The blaze::UniUpperMatrix class template is an adapter for existing dense and sparse matrix // types. It inherits the properties and the interface of the given matrix type \c MT and extends // it by enforcing the additional invariant that all diagonal matrix elements are 1 and all matrix // elements below the diagonal are 0 (upper unitriangular matrix): \f[\left(\begin{array}{*{5}{c}} 1 & u_{0,1} & u_{0,2} & \cdots & u_{0,N} \\ 0 & 1 & u_{1,2} & \cdots & u_{1,N} \\ 0 & 0 & 1 & \cdots & u_{2,N} \\ \vdots & \vdots & \vdots & \ddots & \vdots \\ 0 & 0 & 0 & \cdots & 1 \\ \end{array}\right).\f] // It can be included via the header file \code #include <blaze/math/UniUpperMatrix.h> \endcode // The type of the adapted matrix can be specified via the first template parameter: \code template< typename MT > class UniUpperMatrix; \endcode // \c MT specifies the type of the matrix to be adapted. blaze::UniUpperMatrix can be used with any // non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix type. Also, // the given matrix type must have numeric element types (i.e. all integral types except \c bool, // floating point and complex types). Note that the given matrix type must be either resizable (as // for instance blaze::HybridMatrix or blaze::DynamicMatrix) or must be square at compile time (as // for instance blaze::StaticMatrix). // // The following examples give an impression of several possible upper unitriangular matrices: \code // Definition of a 3x3 row-major dense uniupper matrix with static memory blaze::UniUpperMatrix< blaze::StaticMatrix<int,3UL,3UL,blaze::rowMajor> > A; // Definition of a resizable column-major dense uniupper matrix based on HybridMatrix blaze::UniUpperMatrix< blaze::HybridMatrix<float,4UL,4UL,blaze::columnMajor> B; // Definition of a resizable row-major dense uniupper matrix based on DynamicMatrix blaze::UniUpperMatrix< blaze::DynamicMatrix<double,blaze::rowMajor> > C; // Definition of a compressed row-major single precision uniupper matrix blaze::UniUpperMatrix< blaze::CompressedMatrix<float,blaze::rowMajor> > D; \endcode // The storage order of an upper unitriangular matrix is depending on the storage order of the // adapted matrix type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e. // is specified as blaze::rowMajor), the uniupper matrix will also be a row-major matrix. // Otherwise, if the adapted matrix is column-major (i.e. is specified as blaze::columnMajor), // the uniupper matrix will also be a column-major matrix. // // // \n \section adaptors_triangular_matrices_strictlyuppermatrix StrictlyUpperMatrix // <hr> // // The blaze::StrictlyUpperMatrix class template is an adapter for existing dense and sparse matrix // types. It inherits the properties and the interface of the given matrix type \c MT and extends // it by enforcing the additional invariant that all diagonal matrix elements and all matrix // elements below the diagonal are 0 (strictly upper triangular matrix): \f[\left(\begin{array}{*{5}{c}} 0 & u_{0,1} & u_{0,2} & \cdots & u_{0,N} \\ 0 & 0 & u_{1,2} & \cdots & u_{1,N} \\ 0 & 0 & 0 & \cdots & u_{2,N} \\ \vdots & \vdots & \vdots & \ddots & \vdots \\ 0 & 0 & 0 & \cdots & 0 \\ \end{array}\right).\f] // It can be included via the header file \code #include <blaze/math/StrictlyUpperMatrix.h> \endcode // The type of the adapted matrix can be specified via the first template parameter: \code template< typename MT > class StrictlyUpperMatrix; \endcode // \c MT specifies the type of the matrix to be adapted. blaze::StrictlyUpperMatrix can be used // with any non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix // type. Note that the given matrix type must be either resizable (as for instance // blaze::HybridMatrix or blaze::DynamicMatrix) or must be square at compile time (as for instance // blaze::StaticMatrix). // // The following examples give an impression of several possible strictly upper triangular matrices: \code // Definition of a 3x3 row-major dense strictly upper matrix with static memory blaze::StrictlyUpperMatrix< blaze::StaticMatrix<int,3UL,3UL,blaze::rowMajor> > A; // Definition of a resizable column-major dense strictly upper matrix based on HybridMatrix blaze::StrictlyUpperMatrix< blaze::HybridMatrix<float,4UL,4UL,blaze::columnMajor> B; // Definition of a resizable row-major dense strictly upper matrix based on DynamicMatrix blaze::StrictlyUpperMatrix< blaze::DynamicMatrix<double,blaze::rowMajor> > C; // Definition of a compressed row-major single precision strictly upper matrix blaze::StrictlyUpperMatrix< blaze::CompressedMatrix<float,blaze::rowMajor> > D; \endcode // The storage order of a strictly upper triangular matrix is depending on the storage order of // the adapted matrix type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e. // is specified as blaze::rowMajor), the strictly upper matrix will also be a row-major matrix. // Otherwise, if the adapted matrix is column-major (i.e. is specified as blaze::columnMajor), // the strictly upper matrix will also be a column-major matrix. // // // \n \section adaptors_triangular_matrices_diagonalmatrix DiagonalMatrix // <hr> // // The blaze::DiagonalMatrix class template is an adapter for existing dense and sparse matrix // types. It inherits the properties and the interface of the given matrix type \c MT and extends // it by enforcing the additional invariant that all matrix elements above and below the diagonal // are 0 (diagonal matrix): \f[\left(\begin{array}{*{5}{c}} l_{0,0} & 0 & 0 & \cdots & 0 \\ 0 & l_{1,1} & 0 & \cdots & 0 \\ 0 & 0 & l_{2,2} & \cdots & 0 \\ \vdots & \vdots & \vdots & \ddots & \vdots \\ 0 & 0 & 0 & \cdots & l_{N,N} \\ \end{array}\right).\f] // It can be included via the header file \code #include <blaze/math/DiagonalMatrix.h> \endcode // The type of the adapted matrix can be specified via the first template parameter: \code template< typename MT > class DiagonalMatrix; \endcode // \c MT specifies the type of the matrix to be adapted. blaze::DiagonalMatrix can be used with any // non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix type. Note // that the given matrix type must be either resizable (as for instance blaze::HybridMatrix or // blaze::DynamicMatrix) or must be square at compile time (as for instance blaze::StaticMatrix). // // The following examples give an impression of several possible diagonal matrices: \code // Definition of a 3x3 row-major dense diagonal matrix with static memory blaze::DiagonalMatrix< blaze::StaticMatrix<int,3UL,3UL,blaze::rowMajor> > A; // Definition of a resizable column-major dense diagonal matrix based on HybridMatrix blaze::DiagonalMatrix< blaze::HybridMatrix<float,4UL,4UL,blaze::columnMajor> B; // Definition of a resizable row-major dense diagonal matrix based on DynamicMatrix blaze::DiagonalMatrix< blaze::DynamicMatrix<double,blaze::rowMajor> > C; // Definition of a compressed row-major single precision diagonal matrix blaze::DiagonalMatrix< blaze::CompressedMatrix<float,blaze::rowMajor> > D; \endcode // The storage order of a diagonal matrix is depending on the storage order of the adapted matrix // type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e. is specified // as blaze::rowMajor), the diagonal matrix will also be a row-major matrix. Otherwise, if the // adapted matrix is column-major (i.e. is specified as blaze::columnMajor), the diagonal matrix // will also be a column-major matrix. // // // \n \section adaptors_triangular_matrices_special_properties Special Properties of Triangular Matrices // <hr> // // A triangular matrix is used exactly like a matrix of the underlying, adapted matrix type \c MT. // It also provides (nearly) the same interface as the underlying matrix type. However, there are // some important exceptions resulting from the triangular matrix constraint: // // -# <b>\ref adaptors_triangular_matrices_square</b> // -# <b>\ref adaptors_triangular_matrices_triangular</b> // -# <b>\ref adaptors_triangular_matrices_initialization</b> // -# <b>\ref adaptors_triangular_matrices_storage</b> // -# <b>\ref adaptors_triangular_matrices_scaling</b> // // \n \subsection adaptors_triangular_matrices_square Triangular Matrices Must Always be Square! // // In case a resizable matrix is used (as for instance blaze::HybridMatrix, blaze::DynamicMatrix, // or blaze::CompressedMatrix), this means that the according constructors, the \c resize() and // the \c extend() functions only expect a single parameter, which specifies both the number of // rows and columns, instead of two (one for the number of rows and one for the number of columns): \code using blaze::DynamicMatrix; using blaze::LowerMatrix; using blaze::rowMajor; // Default constructed, default initialized, row-major 3x3 lower dynamic matrix LowerMatrix< DynamicMatrix<double,rowMajor> > A( 3 ); // Resizing the matrix to 5x5 A.resize( 5 ); // Extending the number of rows and columns by 2, resulting in a 7x7 matrix A.extend( 2 ); \endcode // In case a matrix with a fixed size is used (as for instance blaze::StaticMatrix), the number // of rows and number of columns must be specified equally: \code using blaze::StaticMatrix; using blaze::LowerMatrix; using blaze::columnMajor; // Correct setup of a fixed size column-major 3x3 lower static matrix LowerMatrix< StaticMatrix<int,3UL,3UL,columnMajor> > A; // Compilation error: the provided matrix type is not a square matrix type LowerMatrix< StaticMatrix<int,3UL,4UL,columnMajor> > B; \endcode // \n \subsection adaptors_triangular_matrices_triangular The Triangular Property is Always Enforced! // // This means that it is only allowed to modify elements in the lower part or the diagonal of // a lower triangular matrix and in the upper part or the diagonal of an upper triangular matrix. // Unitriangular and strictly triangular matrices are even more restrictive and don't allow the // modification of diagonal elements. Also, triangular matrices can only be assigned matrices that // don't violate their triangular property. The following example demonstrates this restriction // by means of the blaze::LowerMatrix adaptor. For examples with other triangular matrix types // see the according class documentations. \code using blaze::CompressedMatrix; using blaze::DynamicMatrix; using blaze::StaticMatrix; using blaze::LowerMatrix; using blaze::rowMajor; using CompressedLower = LowerMatrix< CompressedMatrix<double,rowMajor> >; // Default constructed, row-major 3x3 lower compressed matrix CompressedLower A( 3 ); // Initializing elements via the function call operator A(0,0) = 1.0; // Initialization of the diagonal element (0,0) A(2,0) = 2.0; // Initialization of the lower element (2,0) A(1,2) = 9.0; // Throws an exception; invalid modification of upper element // Inserting two more elements via the insert() function A.insert( 1, 0, 3.0 ); // Inserting the lower element (1,0) A.insert( 2, 1, 4.0 ); // Inserting the lower element (2,1) A.insert( 0, 2, 9.0 ); // Throws an exception; invalid insertion of upper element // Appending an element via the append() function A.reserve( 1, 3 ); // Reserving enough capacity in row 1 A.append( 1, 1, 5.0 ); // Appending the diagonal element (1,1) A.append( 1, 2, 9.0 ); // Throws an exception; appending an element in the upper part // Access via a non-const iterator CompressedLower::Iterator it = A.begin(1); *it = 6.0; // Modifies the lower element (1,0) ++it; *it = 9.0; // Modifies the diagonal element (1,1) // Erasing elements via the erase() function A.erase( 0, 0 ); // Erasing the diagonal element (0,0) A.erase( 2, 0 ); // Erasing the lower element (2,0) // Construction from a lower dense matrix StaticMatrix<double,3UL,3UL> B{ { 3.0, 0.0, 0.0 }, { 8.0, 0.0, 0.0 }, { -2.0, -1.0, 4.0 } }; LowerMatrix< DynamicMatrix<double,rowMajor> > C( B ); // OK // Assignment of a non-lower dense matrix StaticMatrix<double,3UL,3UL> D{ { 3.0, 0.0, -2.0 }, { 8.0, 0.0, 0.0 }, { -2.0, -1.0, 4.0 } }; C = D; // Throws an exception; lower matrix invariant would be violated! \endcode // The triangular property is also enforced during the construction of triangular custom matrices: // In case the given array of elements does not represent the according triangular matrix type, a // \c std::invalid_argument exception is thrown: \code using blaze::CustomMatrix; using blaze::LowerMatrix; using blaze::unaligned; using blaze::unpadded; using blaze::rowMajor; using CustomLower = LowerMatrix< CustomMatrix<double,unaligned,unpadded,rowMajor> >; // Creating a 3x3 lower custom matrix from a properly initialized array double array[9] = { 1.0, 0.0, 0.0, 2.0, 3.0, 0.0, 4.0, 5.0, 6.0 }; CustomLower A( array, 3UL ); // OK // Attempt to create a second 3x3 lower custom matrix from an uninitialized array std::unique_ptr<double[]> memory( new double[9UL] ); CustomLower B( memory.get(), 3UL ); // Throws an exception \endcode // Finally, the triangular matrix property is enforced for views (rows, columns, submatrices, ...) // on the triangular matrix. The following example demonstrates that modifying the elements of an // entire row and submatrix of a lower matrix only affects the lower and diagonal matrix elements. // Again, this example uses blaze::LowerMatrix, for examples with other triangular matrix types // see the according class documentations. \code using blaze::DynamicMatrix; using blaze::LowerMatrix; // Setup of the lower matrix // // ( 0 0 0 0 ) // A = ( 1 2 0 0 ) // ( 0 3 0 0 ) // ( 4 0 5 0 ) // LowerMatrix< DynamicMatrix<int> > A( 4 ); A(1,0) = 1; A(1,1) = 2; A(2,1) = 3; A(3,0) = 4; A(3,2) = 5; // Setting the lower and diagonal elements in the 2nd row to 9 results in the matrix // // ( 0 0 0 0 ) // A = ( 1 2 0 0 ) // ( 9 9 9 0 ) // ( 4 0 5 0 ) // row( A, 2 ) = 9; // Setting the lower and diagonal elements in the 1st and 2nd column to 7 results in // // ( 0 0 0 0 ) // A = ( 1 7 0 0 ) // ( 9 7 7 0 ) // ( 4 7 7 0 ) // submatrix( A, 0, 1, 4, 2 ) = 7; \endcode // The next example demonstrates the (compound) assignment to rows/columns and submatrices of // triangular matrices. Since only lower/upper and potentially diagonal elements may be modified // the matrix to be assigned must be structured such that the triangular matrix invariant of the // matrix is preserved. Otherwise a \c std::invalid_argument exception is thrown: \code using blaze::DynamicMatrix; using blaze::DynamicVector; using blaze::LowerMatrix; using blaze::rowVector; // Setup of two default 4x4 lower matrices LowerMatrix< DynamicMatrix<int> > A1( 4 ), A2( 4 ); // Setup of a 4-dimensional vector // // v = ( 1 2 3 0 ) // DynamicVector<int,rowVector> v{ 1, 2, 3, 0 }; // OK: Assigning v to the 2nd row of A1 preserves the lower matrix invariant // // ( 0 0 0 0 ) // A1 = ( 0 0 0 0 ) // ( 1 2 3 0 ) // ( 0 0 0 0 ) // row( A1, 2 ) = v; // OK // Error: Assigning v to the 1st row of A1 violates the lower matrix invariant! The element // marked with X cannot be assigned and triggers an exception. // // ( 0 0 0 0 ) // A1 = ( 1 2 X 0 ) // ( 1 2 3 0 ) // ( 0 0 0 0 ) // row( A1, 1 ) = v; // Assignment throws an exception! // Setup of the 3x2 dynamic matrix // // ( 0 0 ) // B = ( 7 0 ) // ( 8 9 ) // DynamicMatrix<int> B( 3UL, 2UL, 0 ); B(1,0) = 7; B(2,0) = 8; B(2,1) = 9; // OK: Assigning B to a submatrix of A2 such that the lower matrix invariant can be preserved // // ( 0 0 0 0 ) // A2 = ( 0 7 0 0 ) // ( 0 8 9 0 ) // ( 0 0 0 0 ) // submatrix( A2, 0UL, 1UL, 3UL, 2UL ) = B; // OK // Error: Assigning B to a submatrix of A2 such that the lower matrix invariant cannot be // preserved! The elements marked with X cannot be assigned without violating the invariant! // // ( 0 0 0 0 ) // A2 = ( 0 7 X 0 ) // ( 0 8 8 X ) // ( 0 0 0 0 ) // submatrix( A2, 0UL, 2UL, 3UL, 2UL ) = B; // Assignment throws an exception! \endcode // \n \subsection adaptors_triangular_matrices_initialization The Elements of a Dense Triangular Matrix are Always Default Initialized! // // Although this results in a small loss of efficiency during the creation of a dense lower or // upper matrix this initialization is important since otherwise the lower/upper matrix property // of dense lower matrices would not be guaranteed: \code using blaze::DynamicMatrix; using blaze::LowerMatrix; using blaze::UpperMatrix; // Uninitialized, 5x5 row-major dynamic matrix DynamicMatrix<int,rowMajor> A( 5, 5 ); // 5x5 row-major lower dynamic matrix with default initialized upper matrix LowerMatrix< DynamicMatrix<int,rowMajor> > B( 5 ); // 7x7 column-major upper dynamic matrix with default initialized lower matrix UpperMatrix< DynamicMatrix<int,columnMajor> > C( 7 ); // 3x3 row-major diagonal dynamic matrix with default initialized lower and upper matrix DiagonalMatrix< DynamicMatrix<int,rowMajor> > D( 3 ); \endcode // \n \subsection adaptors_triangular_matrices_storage Dense Triangular Matrices Store All Elements! // // All dense triangular matrices store all \f$ N \times N \f$ elements, including the immutable // elements in the lower or upper part, respectively. Therefore dense triangular matrices don't // provide any kind of memory reduction! There are two main reasons for this: First, storing also // the zero elements guarantees maximum performance for many algorithms that perform vectorized // operations on the triangular matrices, which is especially true for small dense matrices. // Second, conceptually all triangular adaptors merely restrict the interface to the matrix type // \c MT and do not change the data layout or the underlying matrix type. // // This property matters most for diagonal matrices. In order to achieve the perfect combination // of performance and memory consumption for a diagonal matrix it is recommended to use dense // matrices for small diagonal matrices and sparse matrices for large diagonal matrices: \code // Recommendation 1: use dense matrices for small diagonal matrices using SmallDiagonalMatrix = blaze::DiagonalMatrix< blaze::StaticMatrix<float,3UL,3UL> >; // Recommendation 2: use sparse matrices for large diagonal matrices using LargeDiagonalMatrix = blaze::DiagonalMatrix< blaze::CompressedMatrix<float> >; \endcode // \n \subsection adaptors_triangular_matrices_scaling Unitriangular Matrices Cannot Be Scaled! // // Since the diagonal elements of a unitriangular matrix have a fixed value of 1 it is not possible // to self-scale such a matrix: \code using blaze::DynamicMatrix; using blaze::UniLowerMatrix; UniLowerMatrix< DynamicMatrix<int> > A( 4 ); A *= 2; // Compilation error; Scale operation is not available on an unilower matrix A /= 2; // Compilation error; Scale operation is not available on an unilower matrix A.scale( 2 ); // Compilation error; Scale function is not available on an unilower matrix A = A * 2; // Throws an exception; Invalid assignment of non-unilower matrix A = A / 2; // Throws an exception; Invalid assignment of non-unilower matrix \endcode // \n \section adaptors_triangular_matrices_arithmetic_operations Arithmetic Operations // <hr> // // A lower and upper triangular matrix can participate in numerical operations in any way any other // dense or sparse matrix can participate. It can also be combined with any other dense or sparse // vector or matrix. The following code example gives an impression of the use of blaze::LowerMatrix // within arithmetic operations: \code using blaze::LowerMatrix; using blaze::DynamicMatrix; using blaze::HybridMatrix; using blaze::StaticMatrix; using blaze::CompressedMatrix; using blaze::rowMajor; using blaze::columnMajor; DynamicMatrix<double,rowMajor> A( 3, 3 ); CompressedMatrix<double,rowMajor> B( 3, 3 ); LowerMatrix< DynamicMatrix<double,rowMajor> > C( 3 ); LowerMatrix< CompressedMatrix<double,rowMajor> > D( 3 ); LowerMatrix< HybridMatrix<float,3UL,3UL,rowMajor> > E; LowerMatrix< StaticMatrix<float,3UL,3UL,columnMajor> > F; E = A + B; // Matrix addition and assignment to a row-major lower matrix (includes runtime check) F = C - D; // Matrix subtraction and assignment to a column-major lower matrix (only compile time check) F = A * D; // Matrix multiplication between a dense and a sparse matrix (includes runtime check) C *= 2.0; // In-place scaling of matrix C E = 2.0 * B; // Scaling of matrix B (includes runtime check) F = C * 2.0; // Scaling of matrix C (only compile time check) E += A - B; // Addition assignment (includes runtime check) F -= C + D; // Subtraction assignment (only compile time check) F *= A * D; // Multiplication assignment (includes runtime check) \endcode // Note that it is possible to assign any kind of matrix to a triangular matrix. In case the // matrix to be assigned does not satisfy the invariants of the triangular matrix at compile // time, a runtime check is performed. Also note that upper triangular, diagonal, unitriangular // and strictly triangular matrix types can be used in the same way, but may pose some additional // restrictions (see the according class documentations). // // // \n \section adaptors_triangular_matrices_block_matrices Triangular Block Matrices // <hr> // // It is also possible to use triangular block matrices: \code using blaze::CompressedMatrix; using blaze::DynamicMatrix; using blaze::StaticMatrix; using blaze::LowerMatrix; using blaze::UpperMatrix; // Definition of a 5x5 lower block matrix based on DynamicMatrix LowerMatrix< DynamicMatrix< StaticMatrix<int,3UL,3UL> > > A( 5 ); // Definition of a 7x7 upper block matrix based on CompressedMatrix UpperMatrix< CompressedMatrix< StaticMatrix<int,3UL,3UL> > > B( 7 ); \endcode // Also in this case the triangular matrix invariant is enforced, i.e. it is not possible to // manipulate elements in the upper part (lower triangular matrix) or the lower part (upper // triangular matrix) of the matrix: \code const StaticMatrix<int,3UL,3UL> C{ { 1, -4, 5 }, { 6, 8, -3 }, { 2, -1, 2 } }; A(2,4)(1,1) = -5; // Invalid manipulation of upper matrix element; Results in an exception B.insert( 4, 2, C ); // Invalid insertion of the elements (4,2); Results in an exception \endcode // Note that unitriangular matrices are restricted to numeric element types and therefore cannot // be used for block matrices: \code using blaze::CompressedMatrix; using blaze::DynamicMatrix; using blaze::StaticMatrix; using blaze::UniLowerMatrix; using blaze::UniUpperMatrix; // Compilation error: lower unitriangular matrices are restricted to numeric element types UniLowerMatrix< DynamicMatrix< StaticMatrix<int,3UL,3UL> > > A( 5 ); // Compilation error: upper unitriangular matrices are restricted to numeric element types UniUpperMatrix< CompressedMatrix< StaticMatrix<int,3UL,3UL> > > B( 7 ); \endcode // For more information on block matrices, see the tutorial on \ref block_vectors_and_matrices. // // // \n \section adaptors_triangular_matrices_performance Performance Considerations // <hr> // // The \b Blaze library tries to exploit the properties of lower and upper triangular matrices // whenever and wherever possible. Therefore using triangular matrices instead of a general // matrices can result in a considerable performance improvement. However, there are also // situations when using a triangular matrix introduces some overhead. The following examples // demonstrate several common situations where triangular matrices can positively or negatively // impact performance. // // \n \subsection adaptors_triangular_matrices_matrix_matrix_multiplication Positive Impact: Matrix/Matrix Multiplication // // When multiplying two matrices, at least one of which is triangular, \b Blaze can exploit the // fact that either the lower or upper part of the matrix contains only default elements and // restrict the algorithm to the non-zero elements. The following example demonstrates this by // means of a dense matrix/dense matrix multiplication with lower triangular matrices: \code using blaze::DynamicMatrix; using blaze::LowerMatrix; using blaze::rowMajor; using blaze::columnMajor; LowerMatrix< DynamicMatrix<double,rowMajor> > A; LowerMatrix< DynamicMatrix<double,columnMajor> > B; DynamicMatrix<double,columnMajor> C; // ... Resizing and initialization C = A * B; \endcode // In comparison to a general matrix multiplication, the performance advantage is significant, // especially for large matrices. Therefore is it highly recommended to use the blaze::LowerMatrix // and blaze::UpperMatrix adaptors when a matrix is known to be lower or upper triangular, // respectively. Note however that the performance advantage is most pronounced for dense matrices // and much less so for sparse matrices. // // \n \subsection adaptors_triangular_matrices_matrix_vector_multiplication Positive Impact: Matrix/Vector Multiplication // // A similar performance improvement can be gained when using a triangular matrix in a matrix/vector // multiplication: \code using blaze::DynamicMatrix; using blaze::DynamicVector; using blaze::rowMajor; using blaze::columnVector; LowerMatrix< DynamicMatrix<double,rowMajor> > A; DynamicVector<double,columnVector> x, y; // ... Resizing and initialization y = A * x; \endcode // In this example, \b Blaze also exploits the structure of the matrix and approx. halves the // runtime of the multiplication. Also in case of matrix/vector multiplications the performance // improvement is most pronounced for dense matrices and much less so for sparse matrices. // // \n \subsection adaptors_triangular_matrices_assignment Negative Impact: Assignment of a General Matrix // // In contrast to using a triangular matrix on the right-hand side of an assignment (i.e. for // read access), which introduces absolutely no performance penalty, using a triangular matrix // on the left-hand side of an assignment (i.e. for write access) may introduce additional // overhead when it is assigned a general matrix, which is not triangular at compile time: \code using blaze::DynamicMatrix; using blaze::LowerMatrix; LowerMatrix< DynamicMatrix<double> > A, C; DynamicMatrix<double> B; B = A; // Only read-access to the lower matrix; no performance penalty C = A; // Assignment of a lower matrix to another lower matrix; no runtime overhead C = B; // Assignment of a general matrix to a lower matrix; some runtime overhead \endcode // When assigning a general (potentially not lower triangular) matrix to a lower matrix or a // general (potentially not upper triangular) matrix to an upper matrix it is necessary to check // whether the matrix is lower or upper at runtime in order to guarantee the triangular property // of the matrix. In case it turns out to be lower or upper, respectively, it is assigned as // efficiently as possible, if it is not, an exception is thrown. In order to prevent this runtime // overhead it is therefore generally advisable to assign lower or upper triangular matrices to // other lower or upper triangular matrices.\n // In this context it is especially noteworthy that the addition, subtraction, and multiplication // of two triangular matrices of the same structure always results in another triangular matrix: \code LowerMatrix< DynamicMatrix<double> > A, B, C; C = A + B; // Results in a lower matrix; no runtime overhead C = A - B; // Results in a lower matrix; no runtime overhead C = A * B; // Results in a lower matrix; no runtime overhead \endcode \code UpperMatrix< DynamicMatrix<double> > A, B, C; C = A + B; // Results in a upper matrix; no runtime overhead C = A - B; // Results in a upper matrix; no runtime overhead C = A * B; // Results in a upper matrix; no runtime overhead \endcode // \n Previous: \ref adaptors_hermitian_matrices &nbsp; &nbsp; Next: \ref views */ //************************************************************************************************* //**Views****************************************************************************************** /*!\page views Views // // \tableofcontents // // // \section views_general General Concepts // <hr> // // Views represents parts of a vector or matrix, such as a subvector, a submatrix, or a specific // row, column, or band of a matrix. As such, views act as a reference to specific elements of // a vector or matrix. This reference is valid and can be used in every way as any other vector // or matrix can be used as long as the referenced vector or matrix is not resized or entirely // destroyed. Views also act as alias to the elements of the vector or matrix: Changes made to the // elements (e.g. modifying values, inserting or erasing elements) via the view are immediately // visible in the vector or matrix and changes made via the vector or matrix are immediately // visible in the view. // // It is also possible to create nested views (compound views), such as for instance bands of // submatrices or row selections on column selections. A compound view also acts as reference // to specific elements of the underlying vector or matrix and is valid as long as the underlying, // referenced vector or matrix is not resized or entirely destroyed. // // The \b Blaze library provides the following views on vectors and matrices: // // Vector views: // - \ref views_subvectors // - \ref views_element_selections // // Matrix views: // - \ref views_submatrices // - \ref views_rows // - \ref views_row_selections // - \ref views_columns // - \ref views_column_selections // - \ref views_bands // // // \n \section views_examples Examples \code using blaze::DynamicMatrix; using blaze::StaticVector; // Setup of the 3x5 row-major matrix DynamicMatrix<int> A{ { 1, 0, -2, 3, 0 }, { 0, 2, 5, -1, -1 }, { 1, 0, 0, 2, 1 } }; // Setup of the 2-dimensional row vector StaticVector<int,2UL,rowVector> vec{ 18, 19 }; // Assigning to the elements (1,2) and (1,3) via a subvector of a row // // ( 1 0 -2 3 0 ) // ( 0 2 18 19 -1 ) // ( 1 0 0 2 1 ) // subvector( row( A, 1UL ), 2UL, 2UL ) = vec; // Switching rows 0 and 2 of A // // ( 1 0 0 2 1 ) // ( 0 2 18 19 -1 ) // ( 1 0 -2 3 0 ) // rows<0,2>( A ) = rows<2,0>( A ); // Warning: It is the programmer's responsibility to ensure the view does not outlive // the viewed vector or matrix (dangling reference)! auto row1 = row<1UL>( DynamicMatrix<int>{ { 1, 2, 3 }, { 4, 5, 6 } } ); \endcode // \n Previous: \ref adaptors_triangular_matrices &nbsp; &nbsp; Next: \ref views_subvectors */ //************************************************************************************************* //**Subvectors************************************************************************************* /*!\page views_subvectors Subvectors // // \tableofcontents // // // Subvectors provide views on a specific part of a dense or sparse vector. As such, subvectors // act as a reference to a specific range within a vector. This reference is valid and can be // used in every way any other dense or sparse vector can be used as long as the vector containing // the subvector is not resized or entirely destroyed. The subvector also acts as an alias to the // vector elements in the specified range: Changes made to the elements (e.g. modifying values, // inserting or erasing elements) are immediately visible in the vector and changes made via the // vector are immediately visible in the subvector. // // // \n \section views_subvectors_setup Setup of Subvectors // <hr> // // A view on a dense or sparse subvector can be created very conveniently via the \c subvector() // function. It can be included via the header file \code #include <blaze/math/Subvector.h> \endcode // The first parameter specifies the offset of the subvector within the underlying dense or sparse // vector, the second parameter specifies the size of the subvector. The two parameters can be // specified either at compile time or at runtime: \code blaze::DynamicVector<double,blaze::rowVector> x; // ... Resizing and initialization // Create a subvector from index 4 with a size of 12 (i.e. in the range [4..15]) (compile time arguments) auto sv1 = subvector<4UL,12UL>( x ); // Create a subvector from index 8 with a size of 16 (i.e. in the range [8..23]) (runtime arguments) auto sv2 = subvector( x, 8UL, 16UL ); \endcode // The \c subvector() function returns an expression representing the subvector view. The type of // this expression depends on the given subvector arguments, primarily the type of the vector and // the compile time arguments. If the type is required, it can be determined via the \c decltype // specifier: \code using VectorType = blaze::DynamicVector<int>; using SubvectorType = decltype( blaze::subvector<4UL,12UL>( std::declval<VectorType>() ) ); \endcode // The resulting view can be treated as any other dense or sparse vector, i.e. it can be assigned // to, it can be copied from, and it can be used in arithmetic operations. A subvector created // from a row vector can be used as any other row vector, a subvector created from a column vector // can be used as any other column vector. The view can also be used on both sides of an assignment: // The subvector can either be used as an alias to grant write access to a specific subvector of a // vector primitive on the left-hand side of an assignment or to grant read-access to a specific // subvector of a vector primitive or expression on the right-hand side of an assignment. The // following example demonstrates this in detail: \code blaze::DynamicVector<double,blaze::rowVector> x; blaze::CompressedVector<double,blaze::rowVector> y; blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization // Create a subvector from index 0 with a size of 10 (i.e. in the range [0..9]) auto sv = subvector( x, 0UL, 10UL ); // Setting the first ten elements of x to the 2nd row of matrix A sv = row( A, 2UL ); // Setting the second ten elements of x to y subvector( x, 10UL, 10UL ) = y; // Setting the 3rd row of A to a subvector of x row( A, 3UL ) = subvector( x, 3UL, 10UL ); // Setting x to a subvector of the result of the addition between y and the 1st row of A x = subvector( y + row( A, 1UL ), 2UL, 5UL ); \endcode // \warning It is the programmer's responsibility to ensure the subvector does not outlive the // viewed vector: \code // Creating a subvector on a temporary vector; results in a dangling reference! auto sv = subvector<1UL,3UL>( DynamicVector<int>{ 1, 2, 3, 4, 5 } ); \endcode // \n \section views_subvectors_element_access Element Access // <hr> // // The elements of a subvector can be directly accessed via the subscript operator: \code blaze::DynamicVector<double,blaze::rowVector> v; // ... Resizing and initialization // Creating an 8-dimensional subvector, starting from index 4 auto sv = subvector( v, 4UL, 8UL ); // Setting the 1st element of the subvector, which corresponds to // the element at index 5 in vector v sv[1] = 2.0; \endcode // The numbering of the subvector elements is \f[\left(\begin{array}{*{5}{c}} 0 & 1 & 2 & \cdots & N-1 \\ \end{array}\right),\f] // where N is the specified size of the subvector. Alternatively, the elements of a subvector can // be traversed via iterators. Just as with vectors, in case of non-const subvectors, \c begin() // and \c end() return an iterator, which allows to manipulate the elements, in case of constant // subvectors an iterator to immutable elements is returned: \code blaze::DynamicVector<int,blaze::rowVector> v( 256UL ); // ... Resizing and initialization // Creating a reference to a specific subvector of vector v auto sv = subvector( v, 16UL, 64UL ); // Traversing the elements via iterators to non-const elements for( auto it=sv.begin(); it!=sv.end(); ++it ) { *it = ...; // OK: Write access to the dense subvector value. ... = *it; // OK: Read access to the dense subvector value. } // Traversing the elements via iterators to const elements for( auto it=sv.cbegin(); it!=sv.cend(); ++it ) { *it = ...; // Compilation error: Assignment to the value via iterator-to-const is invalid. ... = *it; // OK: Read access to the dense subvector value. } \endcode \code blaze::CompressedVector<int,blaze::rowVector> v( 256UL ); // ... Resizing and initialization // Creating a reference to a specific subvector of vector v auto sv = subvector( v, 16UL, 64UL ); // Traversing the elements via iterators to non-const elements for( auto it=sv.begin(); it!=sv.end(); ++it ) { it->value() = ...; // OK: Write access to the value of the non-zero element. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the sparse element. } // Traversing the elements via iterators to const elements for( auto it=sv.cbegin(); it!=sv.cend(); ++it ) { it->value() = ...; // Compilation error: Assignment to the value via iterator-to-const is invalid. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the sparse element. } \endcode // \n \section views_subvectors_element_insertion Element Insertion // <hr> // // Inserting/accessing elements in a sparse subvector can be done by several alternative functions. // The following example demonstrates all options: \code blaze::CompressedVector<double,blaze::rowVector> v( 256UL ); // Non-initialized vector of size 256 auto sv = subvector( v, 10UL, 60UL ); // View on the range [10..69] of v // The subscript operator provides access to all possible elements of the sparse subvector, // including the zero elements. In case the subscript operator is used to access an element // that is currently not stored in the sparse subvector, the element is inserted into the // subvector. sv[42] = 2.0; // The second operation for inserting elements is the set() function. In case the element is // not contained in the subvector it is inserted into the subvector, if it is already contained // in the subvector its value is modified. sv.set( 45UL, -1.2 ); // An alternative for inserting elements into the subvector is the insert() function. However, // it inserts the element only in case the element is not already contained in the subvector. sv.insert( 50UL, 3.7 ); // Just as in case of vectors, elements can also be inserted via the append() function. In // case of subvectors, append() also requires that the appended element's index is strictly // larger than the currently largest non-zero index of the subvector and that the subvector's // capacity is large enough to hold the new element. Note however that due to the nature of // a subvector, which may be an alias to the middle of a sparse vector, the append() function // does not work as efficiently for a subvector as it does for a vector. sv.reserve( 10UL ); sv.append( 51UL, -2.1 ); \endcode // \n \section views_subvectors_common_operations Common Operations // <hr> // // A subvector view can be used like any other dense or sparse vector. This means that with // only a few exceptions all \ref vector_operations and \ref arithmetic_operations can be used. // For instance, the current number of elements can be obtained via the \c size() function, the // current capacity via the \c capacity() function, and the number of non-zero elements via the // \c nonZeros() function. However, since subvectors are references to a specific range of a // vector, several operations are not possible, such as resizing and swapping. The following // example shows this by means of a dense subvector view: \code blaze::DynamicVector<int,blaze::rowVector> v( 42UL ); // ... Resizing and initialization // Creating a view on the range [5..15] of vector v auto sv = subvector( v, 5UL, 10UL ); sv.size(); // Returns the number of elements in the subvector sv.capacity(); // Returns the capacity of the subvector sv.nonZeros(); // Returns the number of non-zero elements contained in the subvector sv.resize( 84UL ); // Compilation error: Cannot resize a subvector of a vector auto sv2 = subvector( v, 15UL, 10UL ); swap( sv, sv2 ); // Compilation error: Swap operation not allowed \endcode // \n \section views_subvectors_arithmetic_operations Arithmetic Operations // <hr> // // Both dense and sparse subvectors can be used in all arithmetic operations that any other dense // or sparse vector can be used in. The following example gives an impression of the use of dense // subvectors within arithmetic operations. All operations (addition, subtraction, multiplication, // scaling, ...) can be performed on all possible combinations of dense and sparse subvectors with // fitting element types: \code blaze::DynamicVector<double,blaze::rowVector> d1, d2, d3; blaze::CompressedVector<double,blaze::rowVector> s1, s2; // ... Resizing and initialization blaze::DynamicMatrix<double,blaze::rowMajor> A; auto sv( subvector( d1, 0UL, 10UL ) ); // View on the range [0..9] of vector d1 sv = d2; // Dense vector initialization of the range [0..9] subvector( d1, 10UL, 10UL ) = s1; // Sparse vector initialization of the range [10..19] d3 = sv + d2; // Dense vector/dense vector addition s2 = s1 + subvector( d1, 10UL, 10UL ); // Sparse vector/dense vector addition d2 = sv * subvector( d1, 20UL, 10UL ); // Component-wise vector multiplication subvector( d1, 3UL, 4UL ) *= 2.0; // In-place scaling of the range [3..6] d2 = subvector( d1, 7UL, 3UL ) * 2.0; // Scaling of the range [7..9] d2 = 2.0 * subvector( d1, 7UL, 3UL ); // Scaling of the range [7..9] subvector( d1, 0UL , 10UL ) += d2; // Addition assignment subvector( d1, 10UL, 10UL ) -= s2; // Subtraction assignment subvector( d1, 20UL, 10UL ) *= sv; // Multiplication assignment double scalar = subvector( d1, 5UL, 10UL ) * trans( s1 ); // Scalar/dot/inner product between two vectors A = trans( s1 ) * subvector( d1, 4UL, 16UL ); // Outer product between two vectors \endcode // \n \section views_aligned_subvectors Aligned Subvectors // <hr> // // Usually subvectors can be defined anywhere within a vector. They may start at any position and // may have an arbitrary size (only restricted by the size of the underlying vector). However, in // contrast to vectors themselves, which are always properly aligned in memory and therefore can // provide maximum performance, this means that subvectors in general have to be considered to be // unaligned. This can be made explicit by the \c blaze::unaligned flag: \code using blaze::unaligned; blaze::DynamicVector<double,blaze::rowVector> x; // ... Resizing and initialization // Identical creations of an unaligned subvector in the range [8..23] auto sv1 = subvector ( x, 8UL, 16UL ); auto sv2 = subvector<unaligned>( x, 8UL, 16UL ); auto sv3 = subvector<8UL,16UL> ( x ); auto sv4 = subvector<unaligned,8UL,16UL>( x ); \endcode // All of these calls to the \c subvector() function are identical. Whether the alignment flag is // explicitly specified or not, it always returns an unaligned subvector. Whereas this may provide // full flexibility in the creation of subvectors, this might result in performance disadvantages // in comparison to vector primitives (even in case the specified subvector could be aligned). // Whereas vector primitives are guaranteed to be properly aligned and therefore provide maximum // performance in all operations, a general view on a vector might not be properly aligned. This // may cause a performance penalty on some platforms and/or for some operations. // // However, it is also possible to create aligned subvectors. Aligned subvectors are identical to // unaligned subvectors in all aspects, except that they may pose additional alignment restrictions // and therefore have less flexibility during creation, but don't suffer from performance penalties // and provide the same performance as the underlying vector. Aligned subvectors are created by // explicitly specifying the \c blaze::aligned flag: \code using blaze::aligned; // Creating an aligned subvector in the range [8..23] auto sv1 = subvector<aligned>( x, 8UL, 16UL ); auto sv2 = subvector<aligned,8UL,16UL>( x ); \endcode // The alignment restrictions refer to system dependent address restrictions for the used element // type and the available vectorization mode (SSE, AVX, ...). In order to be properly aligned the // first element of the subvector must be aligned. The following source code gives some examples // for a double precision dynamic vector, assuming that AVX is available, which packs 4 \c double // values into a SIMD vector: \code using blaze::aligned; blaze::DynamicVector<double,blaze::columnVector> d( 17UL ); // ... Resizing and initialization // OK: Starts at the beginning, i.e. the first element is aligned auto dsv1 = subvector<aligned>( d, 0UL, 13UL ); // OK: Start index is a multiple of 4, i.e. the first element is aligned auto dsv2 = subvector<aligned>( d, 4UL, 7UL ); // OK: The start index is a multiple of 4 and the subvector includes the last element auto dsv3 = subvector<aligned>( d, 8UL, 9UL ); // Error: Start index is not a multiple of 4, i.e. the first element is not aligned auto dsv4 = subvector<aligned>( d, 5UL, 8UL ); \endcode // Note that the discussed alignment restrictions are only valid for aligned dense subvectors. // In contrast, aligned sparse subvectors at this time don't pose any additional restrictions. // Therefore aligned and unaligned sparse subvectors are truly fully identical. Still, in case // the \c blaze::aligned flag is specified during setup, an aligned subvector is created: \code using blaze::aligned; blaze::CompressedVector<double,blaze::rowVector> x; // ... Resizing and initialization // Creating an aligned subvector in the range [8..23] auto sv1 = subvector<aligned>( x, 8UL, 16UL ); auto sv2 = subvector<aligned,8UL,16UL>( x ); \endcode // \n Previous: \ref views &nbsp; &nbsp; Next: \ref views_element_selections */ //************************************************************************************************* //**Element Selections***************************************************************************** /*!\page views_element_selections Element Selections // // \tableofcontents // // // Element selections provide views on arbitrary compositions of elements of dense and sparse // vectors. These views act as a reference to the selected elements and represent them as another // dense or sparse vector. This reference is valid and can be used in every way any other dense // or sparse vector can be used as long as the vector containing the elements is not resized or // entirely destroyed. The element selection also acts as an alias to the vector elements in the // specified range: Changes made to the elements (e.g. modifying values, inserting or erasing // elements) are immediately visible in the vector and changes made via the vector are immediately // visible in the elements. // // // \n \section views_element_selections_setup Setup of Element Selections // // An element selection can be created very conveniently via the \c elements() function. It can // be included via the header file \code #include <blaze/math/Elements.h> \endcode // The indices of the elements to be selected can be specified either at compile time or at runtime // (by means of an initializer list, array or vector): \code blaze::DynamicVector<double,blaze::rowVector> x; // ... Resizing and initialization // Selecting the elements 4, 6, 8, and 10 (compile time arguments) auto e1 = elements<4UL,6UL,8UL,10UL>( x ); // Selecting the elements 3, 2, and 1 (runtime arguments via an initializer list) const std::initializer_list<size_t> list{ 3UL, 2UL, 1UL }; auto e2 = elements( x, { 3UL, 2UL, 1UL } ); auto e3 = elements( x, list ); // Selecting the elements 1, 2, 3, 3, 2, and 1 (runtime arguments via a std::array) const std::array<size_t> array{ 1UL, 2UL, 3UL, 3UL, 2UL, 1UL }; auto e4 = elements( x, array ); auto e5 = elements( x, array.data(), array.size() ); // Selecting the element 4 fives times (runtime arguments via a std::vector) const std::vector<size_t> vector{ 4UL, 4UL, 4UL, 4UL, 4UL }; auto e6 = elements( x, vector ); auto e7 = elements( x, vector.data(), vector.size() ); \endcode // Note that it is possible to alias the elements of the underlying vector in any order. Also note // that it is possible to use the same index multiple times. The \c elements() function returns an // expression representing the view on the selected elements. The type of this expression depends // on the given arguments, primarily the type of the vector and the compile time arguments. If the // type is required, it can be determined via the \c decltype specifier: \code using VectorType = blaze::DynamicVector<int>; using ElementsType = decltype( blaze::elements<4UL,12UL>( std::declval<VectorType>() ) ); \endcode // The resulting view can be treated as any other dense or sparse vector, i.e. it can be assigned // to, it can be copied from, and it can be used in arithmetic operations. An element selection // created from a row vector can be used as any other row vector, an element selection created // from a column vector can be used as any other column vector. The view can also be used on both // sides of an assignment: It can either be used as an alias to grant write access to specific // elements of a vector primitive on the left-hand side of an assignment or to grant read-access // to specific elements of a vector primitive or expression on the right-hand side of an assignment. // The following example demonstrates this in detail: \code blaze::DynamicVector<double,blaze::rowVector> x; blaze::CompressedVector<double,blaze::rowVector> y; blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization // Selecting the elements 1, 3, 5, and 7 auto e = elements( x, { 1UL, 3UL, 5UL, 7UL } ); // Setting the elements 1, 3, 5, and 7 of x to the 2nd row of matrix A e = row( A, 2UL ); // Setting the elements 2, 4, 6, and 8 of x to y elements( x, { 2UL, 4UL, 6UL, 8UL } ) = y; // Setting the 3rd row of A to the elements 5, 4, 3, and 2 of x row( A, 3UL ) = elements( x, { 5UL, 4UL, 3UL, 2UL } ); // Rotating the result of the addition between y and the 1st row of A x = elements( y + row( A, 1UL ), { 2UL, 3UL, 0UL, 1UL } ) \endcode // Please note that using an element selection, which refers to an index multiple times, on the // left-hand side of an assignment leads to undefined behavior: \code blaze::DynamicVector<int,blaze::rowVector> a{ 1, 2, 3 }; blaze::DynamicVector<int,blaze::rowVector> b{ 1, 2, 3, 4 }; auto e = elements( a, { 1, 1, 1, 1 } ); // Selecting the element 1 four times e = b; // Undefined behavior \endcode // In this example both vectors have the same size, which results in a correct vector assignment, // but the final value of the element at index 1 is unspecified. // // \warning It is the programmer's responsibility to ensure the element selection does not outlive // the viewed vector: \code // Creating an element selection on a temporary vector; results in a dangling reference! auto e = elements<1UL,3UL>( DynamicVector<int>{ 1, 2, 3, 4, 5 } ); \endcode // \n \section views_element_selections_element_access Element Access // // The elements of an element selection can be directly accessed via the subscript operator: \code blaze::DynamicVector<double,blaze::rowVector> v; // ... Resizing and initialization // Selecting the elements 2, 4, 6, and 8 auto e = elements( v, { 2UL, 4UL, 6UL, 8UL } ); // Setting the 1st element of the element selection, which corresponds to // the element at index 4 in vector v e[1] = 2.0; \endcode // The numbering of the selected elements is \f[\left(\begin{array}{*{5}{c}} 0 & 1 & 2 & \cdots & N-1 \\ \end{array}\right),\f] // where N is the number of selected elements. Alternatively, the elements of an element selection // can be traversed via iterators. Just as with vectors, in case of non-const element selections, // \c begin() and \c end() return an iterator, which allows to manipulate the elements, in case of // constant element selections an iterator to immutable elements is returned: \code blaze::DynamicVector<int,blaze::rowVector> v( 256UL ); // ... Resizing and initialization // Creating an element selection including specific elements of dense vector v auto e = elements( v, { 0UL, 3UL, 6UL, 9UL, 12UL } ); // Traversing the elements via iterators to non-const elements for( auto it=e.begin(); it!=e.end(); ++it ) { *it = ...; // OK: Write access to the dense vector value. ... = *it; // OK: Read access to the dense vector value. } // Traversing the elements via iterators to const elements for( auto it=e.cbegin(); it!=e.cend(); ++it ) { *it = ...; // Compilation error: Assignment to the value via iterator-to-const is invalid. ... = *it; // OK: Read access to the dense vector value. } \endcode \code blaze::CompressedVector<int,blaze::rowVector> v( 256UL ); // ... Resizing and initialization // Creating an element selection including specific elements of sparse vector v auto e = elements( v, { 0UL, 3UL, 6UL, 9UL, 12UL } ); // Traversing the elements via iterators to non-const elements for( auto it=e.begin(); it!=e.end(); ++it ) { it->value() = ...; // OK: Write access to the value of the non-zero element. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the sparse element. } // Traversing the elements via iterators to const elements for( auto it=e.cbegin(); it!=e.cend(); ++it ) { it->value() = ...; // Compilation error: Assignment to the value via iterator-to-const is invalid. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the sparse element. } \endcode // \n \section views_element_selections_element_insertion Element Insertion // // Inserting/accessing elements in a sparse element selection can be done by several alternative // functions. The following example demonstrates all options: \code blaze::CompressedVector<double,blaze::rowVector> v( 256UL ); // Non-initialized vector of size 256 std::vector<size_t> indices; // ... Selecting indices of the sparse vector auto e = elements( v, indices ); // The subscript operator provides access to the selected elements of the sparse vector, // including the zero elements. In case the subscript operator is used to access an element // that is currently not stored in the sparse vector, the element is inserted. e[42] = 2.0; // The second operation for inserting elements via the element selection is the set() function. // In case the element is not contained in the vector it is inserted into the vector, if it is // already contained in the vector its value is modified. e.set( 45UL, -1.2 ); // An alternative for inserting elements into the vector is the insert() function. However, it // inserts the element only in case the element is not already contained in the vector. e.insert( 50UL, 3.7 ); // Just as in case of vectors, elements can also be inserted via the append() function. In case // of element selections, append() also requires that the appended element's index is strictly // larger than the currently largest non-zero index of the selection and that the selections's // capacity is large enough to hold the new element. Note however that due to the nature of an // element selection, which is an alias to arbitrary elements of a sparse vector, the append() // function does not work as efficiently for an element selection as it does for a vector. e.reserve( 10UL ); e.append( 51UL, -2.1 ); \endcode // \n \section views_element_selections_common_operations Common Operations // // An element selection can be used like any other dense or sparse vector. For instance, the // number of selected elements can be obtained via the \c size() function, the current capacity // via the \c capacity() function, and the number of non-zero elements via the \c nonZeros() // function. However, since element selections are references to a specific range of a vector, // several operations are not possible, such as resizing and swapping. The following example // shows this by means of an element selection on a dense vector: \code blaze::DynamicVector<int,blaze::rowVector> v( 42UL ); // ... Resizing and initialization // Selecting the elements 5 and 10 auto e = elements( v, { 5UL, 10UL } ); e.size(); // Returns the number of elements in the element selection e.capacity(); // Returns the capacity of the element selection e.nonZeros(); // Returns the number of non-zero elements contained in the element selection e.resize( 84UL ); // Compilation error: Cannot resize an element selection auto e2 = elements( v, { 15UL, 10UL } ); swap( e, e2 ); // Compilation error: Swap operation not allowed \endcode // \n \section views_element_selections_arithmetic_operations Arithmetic Operations // // Both dense and sparse element selections can be used in all arithmetic operations that any other // dense or sparse vector can be used in. The following example gives an impression of the use of // dense element selections within arithmetic operations. All operations (addition, subtraction, // multiplication, scaling, ...) can be performed on all possible combinations of dense and sparse // element selections with fitting element types: \code blaze::DynamicVector<double,blaze::rowVector> d1, d2, d3; blaze::CompressedVector<double,blaze::rowVector> s1, s2; // ... Resizing and initialization blaze::DynamicMatrix<double,blaze::rowMajor> A; std::initializer_list<size_t> indices1{ 0UL, 3UL, 6UL, 9UL, 12UL, 15UL, 18UL, 21UL }; std::initializer_list<size_t> indices2{ 1UL, 4UL, 7UL, 10UL, 13UL, 16UL, 19UL, 22UL }; std::initializer_list<size_t> indices3{ 2UL, 5UL, 8UL, 11UL, 14UL, 17UL, 20UL, 23UL }; auto e( elements( d1, indices1 ) ); // Selecting the every third element of d1 in the range [0..21] e = d2; // Dense vector assignment to the selected elements elements( d1, indices2 ) = s1; // Sparse vector assignment to the selected elements d3 = e + d2; // Dense vector/dense vector addition s2 = s1 + elements( d1, indices2 ); // Sparse vector/dense vector addition d2 = e * elements( d1, indices3 ); // Component-wise vector multiplication elements( d1, indices2 ) *= 2.0; // In-place scaling of the second selection of elements d2 = elements( d1, indices3 ) * 2.0; // Scaling of the elements in the third selection of elements d2 = 2.0 * elements( d1, indices3 ); // Scaling of the elements in the third selection of elements elements( d1, indices1 ) += d2; // Addition assignment elements( d1, indices2 ) -= s2; // Subtraction assignment elements( d1, indices3 ) *= e; // Multiplication assignment double scalar = elements( d1, indices2 ) * trans( s1 ); // Scalar/dot/inner product between two vectors A = trans( s1 ) * elements( d1, { 3UL, 6UL } ); // Outer product between two vectors \endcode // \n Previous: \ref views_subvectors &nbsp; &nbsp; Next: \ref views_submatrices */ //************************************************************************************************* //**Submatrices************************************************************************************ /*!\page views_submatrices Submatrices // // \tableofcontents // // // Submatrices provide views on a specific part of a dense or sparse matrix just as subvectors // provide views on specific parts of vectors. As such, submatrices act as a reference to a // specific block within a matrix. This reference is valid and can be used in evary way any // other dense or sparse matrix can be used as long as the matrix containing the submatrix is // not resized or entirely destroyed. The submatrix also acts as an alias to the matrix elements // in the specified block: Changes made to the elements (e.g. modifying values, inserting or // erasing elements) are immediately visible in the matrix and changes made via the matrix are // immediately visible in the submatrix. // // // \n \section views_submatrices_setup Setup of Submatrices // <hr> // // A view on a dense or sparse submatrix can be created very conveniently via the \c submatrix() // function. It can be included via the header file \code #include <blaze/math/Submatrix.h> \endcode // The first and second parameter specify the row and column of the first element of the submatrix. // The third and fourth parameter specify the number of rows and columns, respectively. The four // parameters can be specified either at compile time or at runtime: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization // Creating a dense submatrix of size 4x8, starting in row 3 and column 0 (compile time arguments) auto sm1 = submatrix<3UL,0UL,4UL,8UL>( A ); // Creating a dense submatrix of size 8x16, starting in row 0 and column 4 (runtime arguments) auto sm2 = submatrix( A, 0UL, 4UL, 8UL, 16UL ); \endcode // The \c submatrix() function returns an expression representing the submatrix view. The type of // this expression depends on the given submatrix arguments, primarily the type of the matrix and // the compile time arguments. If the type is required, it can be determined via the \c decltype // specifier: \code using MatrixType = blaze::DynamicMatrix<int>; using SubmatrixType = decltype( blaze::submatrix<3UL,0UL,4UL,8UL>( std::declval<MatrixType>() ) ); \endcode // The resulting view can be treated as any other dense or sparse matrix, i.e. it can be assigned // to, it can be copied from, and it can be used in arithmetic operations. A submatrix created from // a row-major matrix will itself be a row-major matrix, a submatrix created from a column-major // matrix will be a column-major matrix. The view can also be used on both sides of an assignment: // The submatrix can either be used as an alias to grant write access to a specific submatrix // of a matrix primitive on the left-hand side of an assignment or to grant read-access to // a specific submatrix of a matrix primitive or expression on the right-hand side of an // assignment. The following example demonstrates this in detail: \code blaze::DynamicMatrix<double,blaze::columnMajor> A, B; blaze::CompressedMatrix<double,blaze::rowMajor> C; // ... Resizing and initialization // Creating a dense submatrix of size 8x4, starting in row 0 and column 2 auto sm = submatrix( A, 0UL, 2UL, 8UL, 4UL ); // Setting the submatrix of A to a 8x4 submatrix of B sm = submatrix( B, 0UL, 0UL, 8UL, 4UL ); // Copying the sparse matrix C into another 8x4 submatrix of A submatrix( A, 8UL, 2UL, 8UL, 4UL ) = C; // Assigning part of the result of a matrix addition to the first submatrix sm = submatrix( B + C, 0UL, 0UL, 8UL, 4UL ); \endcode // \warning It is the programmer's responsibility to ensure the submatrix does not outlive the // viewed matrix: \code // Creating a submatrix on a temporary matrix; results in a dangling reference! auto sm = submatrix<1UL,0UL,2UL,3UL>( DynamicMatrix<int>{ { 1, 2, 3 }, { 4, 5, 6 }, { 7, 8, 9 } } ); \endcode // \n \section views_submatrices_element_access Element Access // <hr> // // The elements of a submatrix can be directly accessed with the function call operator: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization // Creating a 8x8 submatrix, starting from position (4,4) auto sm = submatrix( A, 4UL, 4UL, 8UL, 8UL ); // Setting the element (0,0) of the submatrix, which corresponds to // the element at position (4,4) in matrix A sm(0,0) = 2.0; \endcode // Alternatively, the elements of a submatrix can be traversed via (const) iterators. Just as // with matrices, in case of non-const submatrices, \c begin() and \c end() return an iterator, // which allows to manipuate the elements, in case of constant submatrices an iterator to // immutable elements is returned: \code blaze::DynamicMatrix<int,blaze::rowMajor> A( 256UL, 512UL ); // ... Resizing and initialization // Creating a reference to a specific submatrix of matrix A auto sm = submatrix( A, 16UL, 16UL, 64UL, 128UL ); // Traversing the elements of the 0th row via iterators to non-const elements for( auto it=sm.begin(0); it!=sm.end(0); ++it ) { *it = ...; // OK: Write access to the dense submatrix value. ... = *it; // OK: Read access to the dense submatrix value. } // Traversing the elements of the 1st row via iterators to const elements for( auto it=sm.cbegin(1); it!=sm.cend(1); ++it ) { *it = ...; // Compilation error: Assignment to the value via iterator-to-const is invalid. ... = *it; // OK: Read access to the dense submatrix value. } \endcode \code blaze::CompressedMatrix<int,blaze::rowMajor> A( 256UL, 512UL ); // ... Resizing and initialization // Creating a reference to a specific submatrix of matrix A auto sm = submatrix( A, 16UL, 16UL, 64UL, 128UL ); // Traversing the elements of the 0th row via iterators to non-const elements for( auto it=sm.begin(0); it!=sm.end(0); ++it ) { it->value() = ...; // OK: Write access to the value of the non-zero element. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the sparse element. } // Traversing the elements of the 1st row via iterators to const elements for( auto it=sm.cbegin(1); it!=sm.cend(1); ++it ) { it->value() = ...; // Compilation error: Assignment to the value via iterator-to-const is invalid. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the sparse element. } \endcode // \n \section views_submatrices_element_insertion Element Insertion // <hr> // // Inserting/accessing elements in a sparse submatrix can be done by several alternative functions. // The following example demonstrates all options: \code blaze::CompressedMatrix<double,blaze::rowMajor> A( 256UL, 512UL ); // Non-initialized matrix of size 256x512 auto sm = submatrix( A, 10UL, 10UL, 16UL, 16UL ); // View on a 16x16 submatrix of A // The function call operator provides access to all possible elements of the sparse submatrix, // including the zero elements. In case the function call operator is used to access an element // that is currently not stored in the sparse submatrix, the element is inserted into the // submatrix. sm(2,4) = 2.0; // The second operation for inserting elements is the set() function. In case the element is // not contained in the submatrix it is inserted into the submatrix, if it is already contained // in the submatrix its value is modified. sm.set( 2UL, 5UL, -1.2 ); // An alternative for inserting elements into the submatrix is the insert() function. However, // it inserts the element only in case the element is not already contained in the submatrix. sm.insert( 2UL, 6UL, 3.7 ); // Just as in the case of sparse matrices, elements can also be inserted via the append() // function. In case of submatrices, append() also requires that the appended element's // index is strictly larger than the currently largest non-zero index in the according row // or column of the submatrix and that the according row's or column's capacity is large // enough to hold the new element. Note however that due to the nature of a submatrix, which // may be an alias to the middle of a sparse matrix, the append() function does not work as // efficiently for a submatrix as it does for a matrix. sm.reserve( 2UL, 10UL ); sm.append( 2UL, 10UL, -2.1 ); \endcode // \n \section views_submatrices_common_operations Common Operations // <hr> // // A submatrix view can be used like any other dense or sparse matrix. This means that with only // a few exceptions all \ref matrix_operations and \ref arithmetic_operations can be used. For // instance, the current size of the matrix, i.e. the number of rows or columns can be obtained // via the \c rows() and \c columns() functions, the current total capacity via the \c capacity() // function, and the number of non-zero elements via the \c nonZeros() function. However, since // submatrices are views on a specific submatrix of a matrix, several operations are not possible, // such as resizing and swapping: \code blaze::DynamicMatrix<int,blaze::rowMajor> A( 42UL, 42UL ); // ... Resizing and initialization // Creating a view on the a 8x12 submatrix of matrix A auto sm = submatrix( A, 0UL, 0UL, 8UL, 12UL ); sm.rows(); // Returns the number of rows of the submatrix sm.columns(); // Returns the number of columns of the submatrix sm.capacity(); // Returns the capacity of the submatrix sm.nonZeros(); // Returns the number of non-zero elements contained in the submatrix sm.resize( 10UL, 8UL ); // Compilation error: Cannot resize a submatrix of a matrix auto sm2 = submatrix( A, 8UL, 0UL, 12UL, 8UL ); swap( sm, sm2 ); // Compilation error: Swap operation not allowed \endcode // \n \section views_submatrices_arithmetic_operations Arithmetic Operations // <hr> // // Both dense and sparse submatrices can be used in all arithmetic operations that any other dense // or sparse matrix can be used in. The following example gives an impression of the use of dense // submatrices within arithmetic operations. All operations (addition, subtraction, multiplication, // scaling, ...) can be performed on all possible combinations of dense and sparse matrices with // fitting element types: \code blaze::DynamicMatrix<double,blaze::rowMajor> D1, D2, D3; blaze::CompressedMatrix<double,blaze::rowMajor> S1, S2; blaze::CompressedVector<double,blaze::columnVector> a, b; // ... Resizing and initialization auto sm = submatrix( D1, 0UL, 0UL, 8UL, 8UL ); // View on the 8x8 submatrix of matrix D1 // starting from row 0 and column 0 submatrix( D1, 0UL, 8UL, 8UL, 8UL ) = D2; // Dense matrix initialization of the 8x8 submatrix // starting in row 0 and column 8 sm = S1; // Sparse matrix initialization of the second 8x8 submatrix D3 = sm + D2; // Dense matrix/dense matrix addition S2 = S1 - submatrix( D1, 8UL, 0UL, 8UL, 8UL ); // Sparse matrix/dense matrix subtraction D2 = sm * submatrix( D1, 8UL, 8UL, 8UL, 8UL ); // Dense matrix/dense matrix multiplication submatrix( D1, 8UL, 0UL, 8UL, 8UL ) *= 2.0; // In-place scaling of a submatrix of D1 D2 = submatrix( D1, 8UL, 8UL, 8UL, 8UL ) * 2.0; // Scaling of the a submatrix of D1 D2 = 2.0 * sm; // Scaling of the a submatrix of D1 submatrix( D1, 0UL, 8UL, 8UL, 8UL ) += D2; // Addition assignment submatrix( D1, 8UL, 0UL, 8UL, 8UL ) -= S1; // Subtraction assignment submatrix( D1, 8UL, 8UL, 8UL, 8UL ) *= sm; // Multiplication assignment a = submatrix( D1, 4UL, 4UL, 8UL, 8UL ) * b; // Dense matrix/sparse vector multiplication \endcode // \n \section views_aligned_submatrices Aligned Submatrices // <hr> // // Usually submatrices can be defined anywhere within a matrix. They may start at any position and // may have an arbitrary extension (only restricted by the extension of the underlying matrix). // However, in contrast to matrices themselves, which are always properly aligned in memory and // therefore can provide maximum performance, this means that submatrices in general have to be // considered to be unaligned. This can be made explicit by the \c blaze::unaligned flag: \code using blaze::unaligned; blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization // Identical creations of an unaligned submatrix of size 8x8, starting in row 0 and column 0 auto sm1 = submatrix ( A, 0UL, 0UL, 8UL, 8UL ); auto sm2 = submatrix<unaligned>( A, 0UL, 0UL, 8UL, 8UL ); auto sm3 = submatrix<0UL,0UL,8UL,8UL> ( A ); auto sm4 = submatrix<unaligned,0UL,0UL,8UL,8UL>( A ); \endcode // All of these calls to the \c submatrix() function are identical. Whether the alignment flag is // explicitly specified or not, it always returns an unaligned submatrix. Whereas this may provide // full flexibility in the creation of submatrices, this might result in performance disadvantages // in comparison to matrix primitives (even in case the specified submatrix could be aligned). // Whereas matrix primitives are guaranteed to be properly aligned and therefore provide maximum // performance in all operations, a general view on a matrix might not be properly aligned. This // may cause a performance penalty on some platforms and/or for some operations. // // However, it is also possible to create aligned submatrices. Aligned submatrices are identical to // unaligned submatrices in all aspects, except that they may pose additional alignment restrictions // and therefore have less flexibility during creation, but don't suffer from performance penalties // and provide the same performance as the underlying matrix. Aligned submatrices are created by // explicitly specifying the \c blaze::aligned flag: \code using blaze::aligned; // Creating an aligned submatrix of size 8x8, starting in row 0 and column 0 auto sv1 = submatrix<aligned>( A, 0UL, 0UL, 8UL, 8UL ); auto sv2 = submatrix<aligned,0UL,0UL,8UL,8UL>( A ); \endcode // The alignment restrictions refer to system dependent address restrictions for the used element // type and the available vectorization mode (SSE, AVX, ...). In order to be properly aligned the // first element of each row/column of the submatrix must be aligned. The following source code // gives some examples for a double precision row-major dynamic matrix, assuming that padding is // enabled and that AVX is available, which packs 4 \c double values into a SIMD vector: \code using blaze::aligned; blaze::DynamicMatrix<double,blaze::rowMajor> D( 13UL, 17UL ); // ... Resizing and initialization // OK: Starts at position (0,0), i.e. the first element of each row is aligned (due to padding) auto dsm1 = submatrix<aligned>( D, 0UL, 0UL, 7UL, 11UL ); // OK: First column is a multiple of 4, i.e. the first element of each row is aligned (due to padding) auto dsm2 = submatrix<aligned>( D, 3UL, 12UL, 8UL, 16UL ); // OK: First column is a multiple of 4 and the submatrix includes the last row and column auto dsm3 = submatrix<aligned>( D, 4UL, 0UL, 9UL, 17UL ); // Error: First column is not a multiple of 4, i.e. the first element is not aligned auto dsm4 = submatrix<aligned>( D, 2UL, 3UL, 12UL, 12UL ); \endcode // Note that the discussed alignment restrictions are only valid for aligned dense submatrices. // In contrast, aligned sparse submatrices at this time don't pose any additional restrictions. // Therefore aligned and unaligned sparse submatrices are truly fully identical. Still, in case // the \c blaze::aligned flag is specified during setup, an aligned submatrix is created: \code using blaze::aligned; blaze::CompressedMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization // Creating an aligned submatrix of size 8x8, starting in row 0 and column 0 auto sv = submatrix<aligned>( A, 0UL, 0UL, 8UL, 8UL ); \endcode // \n \section views_submatrices_on_symmetric_matrices Submatrices on Symmetric Matrices // // Submatrices can also be created on symmetric matrices (see the \c SymmetricMatrix class template): \code using blaze::DynamicMatrix; using blaze::SymmetricMatrix; // Setup of a 16x16 symmetric matrix SymmetricMatrix< DynamicMatrix<int> > A( 16UL ); // Creating a dense submatrix of size 8x12, starting in row 2 and column 4 auto sm = submatrix( A, 2UL, 4UL, 8UL, 12UL ); \endcode // It is important to note, however, that (compound) assignments to such submatrices have a // special restriction: The symmetry of the underlying symmetric matrix must not be broken! // Since the modification of element \f$ a_{ij} \f$ of a symmetric matrix also modifies the // element \f$ a_{ji} \f$, the matrix to be assigned must be structured such that the symmetry // of the symmetric matrix is preserved. Otherwise a \a std::invalid_argument exception is // thrown: \code using blaze::DynamicMatrix; using blaze::SymmetricMatrix; // Setup of two default 4x4 symmetric matrices SymmetricMatrix< DynamicMatrix<int> > A1( 4 ), A2( 4 ); // Setup of the 3x2 dynamic matrix // // ( 1 2 ) // B = ( 3 4 ) // ( 5 6 ) // DynamicMatrix<int> B{ { 1, 2 }, { 3, 4 }, { 5, 6 } }; // OK: Assigning B to a submatrix of A1 such that the symmetry can be preserved // // ( 0 0 1 2 ) // A1 = ( 0 0 3 4 ) // ( 1 3 5 6 ) // ( 2 4 6 0 ) // submatrix( A1, 0UL, 2UL, 3UL, 2UL ) = B; // OK // Error: Assigning B to a submatrix of A2 such that the symmetry cannot be preserved! // The elements marked with X cannot be assigned unambiguously! // // ( 0 1 2 0 ) // A2 = ( 1 3 X 0 ) // ( 2 X 6 0 ) // ( 0 0 0 0 ) // submatrix( A2, 0UL, 1UL, 3UL, 2UL ) = B; // Assignment throws an exception! \endcode // \n Previous: \ref views_element_selections &nbsp; &nbsp; Next: \ref views_rows */ //************************************************************************************************* //**Rows******************************************************************************************* /*!\page views_rows Rows // // \tableofcontents // // // Rows provide views on a specific row of a dense or sparse matrix. As such, rows act as a // reference to a specific row. This reference is valid and can be used in every way any other // row vector can be used as long as the matrix containing the row is not resized or entirely // destroyed. The row also acts as an alias to the row elements: Changes made to the elements // (e.g. modifying values, inserting or erasing elements) are immediately visible in the matrix // and changes made via the matrix are immediately visible in the row. // // // \n \section views_rows_setup Setup of Rows // <hr> // // \image html row.png // \image latex row.eps "Row view" width=250pt // // A reference to a dense or sparse row can be created very conveniently via the \c row() function. // It can be included via the header file \code #include <blaze/math/Row.h> \endcode // The row index must be in the range from \f$[0..M-1]\f$, where \c M is the total number of rows // of the matrix, and can be specified both at compile time or at runtime: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization // Creating a reference to the 1st row of matrix A (compile time index) auto row1 = row<1UL>( A ); // Creating a reference to the 2nd row of matrix A (runtime index) auto row2 = row( A, 2UL ); \endcode // The \c row() function returns an expression representing the row view. The type of this // expression depends on the given row arguments, primarily the type of the matrix and the compile // time arguments. If the type is required, it can be determined via the \c decltype specifier: \code using MatrixType = blaze::DynamicMatrix<int>; using RowType = decltype( blaze::row<1UL>( std::declval<MatrixType>() ) ); \endcode // The resulting view can be treated as any other row vector, i.e. it can be assigned to, it can // be copied from, and it can be used in arithmetic operations. The reference can also be used on // both sides of an assignment: The row can either be used as an alias to grant write access to a // specific row of a matrix primitive on the left-hand side of an assignment or to grant read-access // to a specific row of a matrix primitive or expression on the right-hand side of an assignment. // The following example demonstrates this in detail: \code blaze::DynamicVector<double,blaze::rowVector> x; blaze::CompressedVector<double,blaze::rowVector> y; blaze::DynamicMatrix<double,blaze::rowMajor> A, B; blaze::CompressedMatrix<double,blaze::rowMajor> C, D; // ... Resizing and initialization // Setting the 2nd row of matrix A to x auto row2 = row( A, 2UL ); row2 = x; // Setting the 3rd row of matrix B to y row( B, 3UL ) = y; // Setting x to the 4th row of the result of the matrix multiplication x = row( A * B, 4UL ); // Setting y to the 2nd row of the result of the sparse matrix multiplication y = row( C * D, 2UL ); \endcode // \warning It is the programmer's responsibility to ensure the row does not outlive the viewed // matrix: \code // Creating a row on a temporary matrix; results in a dangling reference! auto row1 = row<1UL>( DynamicMatrix<int>{ { 1, 2, 3 }, { 4, 5, 6 }, { 7, 8, 9 } } ); \endcode // \n \section views_rows_element_access Element Access // <hr> // // The elements of a row can be directly accessed with the subscript operator: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization // Creating a view on the 4th row of matrix A auto row4 = row( A, 4UL ); // Setting the 1st element of the dense row, which corresponds // to the 1st element in the 4th row of matrix A row4[1] = 2.0; \endcode // The numbering of the row elements is \f[\left(\begin{array}{*{5}{c}} 0 & 1 & 2 & \cdots & N-1 \\ \end{array}\right),\f] // where N is the number of columns of the referenced matrix. Alternatively, the elements of a // row can be traversed via iterators. Just as with vectors, in case of non-const rows, \c begin() // and \c end() return an iterator, which allows to manipulate the elements, in case of constant // rows an iterator to immutable elements is returned: \code blaze::DynamicMatrix<int,blaze::rowMajor> A( 128UL, 256UL ); // ... Resizing and initialization // Creating a reference to the 31st row of matrix A auto row31 = row( A, 31UL ); // Traversing the elements via iterators to non-const elements for( auto it=row31.begin(); it!=row31.end(); ++it ) { *it = ...; // OK; Write access to the dense row value ... = *it; // OK: Read access to the dense row value. } // Traversing the elements via iterators to const elements for( auto it=row31.cbegin(); it!=row31.cend(); ++it ) { *it = ...; // Compilation error: Assignment to the value via a ConstIterator is invalid. ... = *it; // OK: Read access to the dense row value. } \endcode \code blaze::CompressedMatrix<int,blaze::rowMajor> A( 128UL, 256UL ); // ... Resizing and initialization // Creating a reference to the 31st row of matrix A auto row31 = row( A, 31UL ); // Traversing the elements via iterators to non-const elements for( auto it=row31.begin(); it!=row31.end(); ++it ) { it->value() = ...; // OK: Write access to the value of the non-zero element. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the sparse element. } // Traversing the elements via iterators to const elements for( auto it=row31.cbegin(); it!=row31.cend(); ++it ) { it->value() = ...; // Compilation error: Assignment to the value via a ConstIterator is invalid. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the sparse element. } \endcode // \n \section views_rows_element_insertion Element Insertion // <hr> // // Inserting/accessing elements in a sparse row can be done by several alternative functions. // The following example demonstrates all options: \code blaze::CompressedMatrix<double,blaze::rowMajor> A( 10UL, 100UL ); // Non-initialized 10x100 matrix auto row0( row( A, 0UL ) ); // Reference to the 0th row of A // The subscript operator provides access to all possible elements of the sparse row, // including the zero elements. In case the subscript operator is used to access an element // that is currently not stored in the sparse row, the element is inserted into the row. row0[42] = 2.0; // The second operation for inserting elements is the set() function. In case the element // is not contained in the row it is inserted into the row, if it is already contained in // the row its value is modified. row0.set( 45UL, -1.2 ); // An alternative for inserting elements into the row is the insert() function. However, // it inserts the element only in case the element is not already contained in the row. row0.insert( 50UL, 3.7 ); // A very efficient way to add new elements to a sparse row is the append() function. // Note that append() requires that the appended element's index is strictly larger than // the currently largest non-zero index of the row and that the row's capacity is large // enough to hold the new element. row0.reserve( 10UL ); row0.append( 51UL, -2.1 ); \endcode // \n \section views_rows_common_operations Common Operations // <hr> // // A row view can be used like any other row vector. This means that with only a few exceptions // all \ref vector_operations and \ref arithmetic_operations can be used. For instance, the // current number of elements can be obtained via the \c size() function, the current capacity // via the \c capacity() function, and the number of non-zero elements via the \c nonZeros() // function. However, since rows are references to specific rows of a matrix, several operations // are not possible on views, such as resizing and swapping. The following example shows this by // means of a dense row view: \code blaze::DynamicMatrix<int,blaze::rowMajor> A( 42UL, 42UL ); // ... Resizing and initialization // Creating a reference to the 2nd row of matrix A auto row2 = row( A, 2UL ); row2.size(); // Returns the number of elements in the row row2.capacity(); // Returns the capacity of the row row2.nonZeros(); // Returns the number of non-zero elements contained in the row row2.resize( 84UL ); // Compilation error: Cannot resize a single row of a matrix auto row3 = row( A, 3UL ); swap( row2, row3 ); // Compilation error: Swap operation not allowed \endcode // \n \section views_rows_arithmetic_operations Arithmetic Operations // <hr> // // Both dense and sparse rows can be used in all arithmetic operations that any other dense or // sparse row vector can be used in. The following example gives an impression of the use of // dense rows within arithmetic operations. All operations (addition, subtraction, multiplication, // scaling, ...) can be performed on all possible combinations of dense and sparse rows with // fitting element types: \code blaze::DynamicVector<double,blaze::rowVector> a( 2UL, 2.0 ), b; blaze::CompressedVector<double,blaze::rowVector> c( 2UL ); c[1] = 3.0; blaze::DynamicMatrix<double,blaze::rowMajor> A( 4UL, 2UL ); // Non-initialized 4x2 matrix auto row0( row( A, 0UL ) ); // Reference to the 0th row of A row0[0] = 0.0; // Manual initialization of the 0th row of A row0[1] = 0.0; row( A, 1UL ) = 1.0; // Homogeneous initialization of the 1st row of A row( A, 2UL ) = a; // Dense vector initialization of the 2nd row of A row( A, 3UL ) = c; // Sparse vector initialization of the 3rd row of A b = row0 + a; // Dense vector/dense vector addition b = c + row( A, 1UL ); // Sparse vector/dense vector addition b = row0 * row( A, 2UL ); // Component-wise vector multiplication row( A, 1UL ) *= 2.0; // In-place scaling of the 1st row b = row( A, 1UL ) * 2.0; // Scaling of the 1st row b = 2.0 * row( A, 1UL ); // Scaling of the 1st row row( A, 2UL ) += a; // Addition assignment row( A, 2UL ) -= c; // Subtraction assignment row( A, 2UL ) *= row( A, 0UL ); // Multiplication assignment double scalar = row( A, 1UL ) * trans( c ); // Scalar/dot/inner product between two vectors A = trans( c ) * row( A, 1UL ); // Outer product between two vectors \endcode // \n \section views_rows_non_fitting_storage_order Views on Matrices with Non-Fitting Storage Order // <hr> // // Especially noteworthy is that row views can be created for both row-major and column-major // matrices. Whereas the interface of a row-major matrix only allows to traverse a row directly // and the interface of a column-major matrix only allows to traverse a column, via views it is // possible to traverse a row of a column-major matrix or a column of a row-major matrix. For // instance: \code blaze::DynamicMatrix<int,blaze::columnMajor> A( 64UL, 32UL ); // ... Resizing and initialization // Creating a reference to the 1st row of a column-major matrix A auto row1 = row( A, 1UL ); for( auto it=row1.begin(); it!=row1.end(); ++it ) { // ... } \endcode // However, please note that creating a row view on a matrix stored in a column-major fashion // can result in a considerable performance decrease in comparison to a row view on a matrix // with row-major storage format. This is due to the non-contiguous storage of the matrix // elements. Therefore care has to be taken in the choice of the most suitable storage order: \code // Setup of two column-major matrices blaze::DynamicMatrix<double,blaze::columnMajor> A( 128UL, 128UL ); blaze::DynamicMatrix<double,blaze::columnMajor> B( 128UL, 128UL ); // ... Resizing and initialization // The computation of the 15th row of the multiplication between A and B ... blaze::DynamicVector<double,blaze::rowVector> x = row( A * B, 15UL ); // ... is essentially the same as the following computation, which multiplies // the 15th row of the column-major matrix A with B. blaze::DynamicVector<double,blaze::rowVector> x = row( A, 15UL ) * B; \endcode // Although \b Blaze performs the resulting vector/matrix multiplication as efficiently as possible // using a row-major storage order for matrix \c A would result in a more efficient evaluation. // // \n Previous: \ref views_submatrices &nbsp; &nbsp; Next: \ref views_row_selections */ //************************************************************************************************* //**Row Selections********************************************************************************* /*!\page views_row_selections Row Selections // // \tableofcontents // // // Row selections provide views on arbitrary compositions of rows of dense and sparse matrices. // These views act as a reference to the selected rows and represent them as another dense or // sparse matrix. This reference is valid and can be used in every way any other dense or sparse // matrix can be used as long as the matrix containing the rows is not resized or entirely // destroyed. The row selection also acts as an alias to the matrix elements in the specified // range: Changes made to the rows (e.g. modifying values, inserting or erasing elements) are // immediately visible in the matrix and changes made via the matrix are immediately visible // in the rows. // // // \n \section views_row_selections_setup Setup of Row Selections // // A row selection can be created very conveniently via the \c rows() function. It can be included // via the header file \code #include <blaze/math/Rows.h> \endcode // The indices of the rows to be selected can be specified either at compile time or at runtime // (by means of an initializer list, array or vector): \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization // Selecting the rows 4, 6, 8, and 10 (compile time arguments) auto rs1 = rows<4UL,6UL,8UL,10UL>( A ); // Selecting the rows 3, 2, and 1 (runtime arguments via an initializer list) const std::initializer_list<size_t> list{ 3UL, 2UL, 1UL }; auto rs2 = rows( A, { 3UL, 2UL, 1UL } ); auto rs3 = rows( A, list ); // Selecting the rows 1, 2, 3, 3, 2, and 1 (runtime arguments via a std::array) const std::array<size_t> array{ 1UL, 2UL, 3UL, 3UL, 2UL, 1UL }; auto rs4 = rows( A, array ); auto rs5 = rows( A, array.data(), array.size() ); // Selecting the row 4 fives times (runtime arguments via a std::vector) const std::vector<size_t> vector{ 4UL, 4UL, 4UL, 4UL, 4UL }; auto rs6 = rows( A, vector ); auto rs7 = rows( A, vector.data(), vector.size() ); \endcode // Note that it is possible to alias the rows of the underlying matrix in any order. Also note // that it is possible to use the same index multiple times. The \c rows() function returns an // expression representing the view on the selected rows. The type of this expression depends // on the given arguments, primarily the type of the matrix and the compile time arguments. If // the type is required, it can be determined via the \c decltype specifier: \code using MatrixType = blaze::DynamicMatrix<int>; using RowsType = decltype( blaze::rows<3UL,0UL,4UL,8UL>( std::declval<MatrixType>() ) ); \endcode // The resulting view can be treated as any other dense or sparse matrix, i.e. it can be assigned // to, it can be copied from, and it can be used in arithmetic operations. Note, however, that a // row selection will always be treated as a row-major matrix, regardless of the storage order of // the matrix containing the rows. The view can also be used on both sides of an assignment: It // can either be used as an alias to grant write access to specific rows of a matrix primitive // on the left-hand side of an assignment or to grant read-access to specific rows of a matrix // primitive or expression on the right-hand side of an assignment. The following example // demonstrates this in detail: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; blaze::DynamicMatrix<double,blaze::columnMajor> B; blaze::CompressedMatrix<double,blaze::rowMajor> C; // ... Resizing and initialization // Selecting the rows 1, 3, 5, and 7 of A auto rs = rows( A, { 1UL, 3UL, 5UL, 7UL } ); // Setting rows 1, 3, 5, and 7 of A to row 4 of B rs = rows( B, { 4UL, 4UL, 4UL, 4UL } ); // Setting the rows 2, 4, 6, and 8 of A to C rows( A, { 2UL, 4UL, 6UL, 8UL } ) = C; // Setting the first 4 rows of A to the rows 5, 4, 3, and 2 of C submatrix( A, 0UL, 0UL, 4UL, A.columns() ) = rows( C, { 5UL, 4UL, 3UL, 2UL } ); // Rotating the result of the addition between rows 1, 3, 5, and 7 of A and C B = rows( rs + C, { 2UL, 3UL, 0UL, 1UL } ); \endcode // \warning It is the programmer's responsibility to ensure the row selection does not outlive the // viewed matrix: \code // Creating a row selection on a temporary matrix; results in a dangling reference! auto rs = rows<2UL,0UL>( DynamicMatrix<int>{ { 1, 2, 3 }, { 4, 5, 6 }, { 7, 8, 9 } } ); \endcode // \n \section views_row_selections_element_access Element Access // // The elements of a row selection can be directly accessed via the function call operator: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization // Creating a view on the first four rows of A in reverse order auto rs = rows( A, { 3UL, 2UL, 1UL, 0UL } ); // Setting the element (0,0) of the row selection, which corresponds // to the element at position (3,0) in matrix A rs(0,0) = 2.0; \endcode // Alternatively, the elements of a row selection can be traversed via (const) iterators. Just as // with matrices, in case of non-const row selection, \c begin() and \c end() return an iterator, // which allows to manipuate the elements, in case of constant row selection an iterator to // immutable elements is returned: \code blaze::DynamicMatrix<int,blaze::rowMajor> A( 256UL, 512UL ); // ... Resizing and initialization // Creating a reference to a selection of rows of matrix A auto rs = rows( A, { 16UL, 32UL, 64UL, 128UL } ); // Traversing the elements of the 0th row via iterators to non-const elements for( auto it=rs.begin(0); it!=rs.end(0); ++it ) { *it = ...; // OK: Write access to the dense value. ... = *it; // OK: Read access to the dense value. } // Traversing the elements of the 1st row via iterators to const elements for( auto it=rs.cbegin(1); it!=rs.cend(1); ++it ) { *it = ...; // Compilation error: Assignment to the value via iterator-to-const is invalid. ... = *it; // OK: Read access to the dense value. } \endcode \code blaze::CompressedMatrix<int,blaze::rowMajor> A( 256UL, 512UL ); // ... Resizing and initialization // Creating a reference to a selection of rows of matrix A auto rs = rows( A, { 16UL, 32UL, 64UL, 128UL } ); // Traversing the elements of the 0th row via iterators to non-const elements for( auto it=rs.begin(0); it!=rs.end(0); ++it ) { it->value() = ...; // OK: Write access to the value of the non-zero element. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the sparse element. } // Traversing the elements of the 1st row via iterators to const elements for( auto it=rs.cbegin(1); it!=rs.cend(1); ++it ) { it->value() = ...; // Compilation error: Assignment to the value via iterator-to-const is invalid. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the sparse element. } \endcode // \n \section views_row_selections_element_insertion Element Insertion // // Inserting/accessing elements in a sparse row selection can be done by several alternative // functions. The following example demonstrates all options: \code blaze::CompressedMatrix<double,blaze::rowMajor> A( 256UL, 512UL ); // Non-initialized matrix of size 256x512 auto rs = rows( A, { 10UL, 20UL, 30UL, 40UL } ); // View on the rows 10, 20, 30, and 40 of A // The function call operator provides access to all possible elements of the sparse row // selection, including the zero elements. In case the function call operator is used to // access an element that is currently not stored in the sparse row selection, the element // is inserted into the row selection. rs(2,4) = 2.0; // The second operation for inserting elements is the set() function. In case the element is // not contained in the row selection it is inserted into the row selection, if it is already // contained in the row selection its value is modified. rs.set( 2UL, 5UL, -1.2 ); // An alternative for inserting elements into the row selection is the insert() function. // However, it inserts the element only in case the element is not already contained in the // row selection. rs.insert( 2UL, 6UL, 3.7 ); // Just as in the case of sparse matrices, elements can also be inserted via the append() // function. In case of row selections, append() also requires that the appended element's // index is strictly larger than the currently largest non-zero index in the according row // of the row selection and that the according row's capacity is large enough to hold the new // element. Note however that due to the nature of a row selection, which may be an alias to // an arbitrary collection of rows, the append() function does not work as efficiently for // a row selection as it does for a matrix. rs.reserve( 2UL, 10UL ); rs.append( 2UL, 10UL, -2.1 ); \endcode // \n \section views_row_selections_common_operations Common Operations // // A view on specific rows of a matrix can be used like any other dense or sparse matrix. For // instance, the current size of the matrix, i.e. the number of rows or columns can be obtained // via the \c rows() and \c columns() functions, the current total capacity via the \c capacity() // function, and the number of non-zero elements via the \c nonZeros() function. However, since // row selections are views on specific rows of a matrix, several operations are not possible, // such as resizing and swapping: \code blaze::DynamicMatrix<int,blaze::rowMajor> A( 42UL, 42UL ); // ... Resizing and initialization // Creating a view on the rows 8, 16, 24, and 32 of matrix A auto rs = rows( A, { 8UL, 16UL, 24UL, 32UL } ); rs.rows(); // Returns the number of rows of the row selection rs.columns(); // Returns the number of columns of the row selection rs.capacity(); // Returns the capacity of the row selection rs.nonZeros(); // Returns the number of non-zero elements contained in the row selection rs.resize( 10UL, 8UL ); // Compilation error: Cannot resize a row selection auto rs2 = rows( A, 9UL, 17UL, 25UL, 33UL ); swap( rs, rs2 ); // Compilation error: Swap operation not allowed \endcode // \n \section views_row_selections_arithmetic_operations Arithmetic Operations // // Both dense and sparse row selections can be used in all arithmetic operations that any other // dense or sparse matrix can be used in. The following example gives an impression of the use // of dense row selctions within arithmetic operations. All operations (addition, subtraction, // multiplication, scaling, ...) can be performed on all possible combinations of dense and // sparse matrices with fitting element types: \code blaze::DynamicMatrix<double,blaze::rowMajor> D1, D2, D3; blaze::CompressedMatrix<double,blaze::rowMajor> S1, S2; blaze::CompressedVector<double,blaze::columnVector> a, b; // ... Resizing and initialization std::initializer_list<size_t> indices1{ 0UL, 3UL, 6UL, 9UL, 12UL, 15UL, 18UL, 21UL }; std::initializer_list<size_t> indices2{ 1UL, 4UL, 7UL, 10UL, 13UL, 16UL, 19UL, 22UL }; std::initializer_list<size_t> indices3{ 2UL, 5UL, 8UL, 11UL, 14UL, 17UL, 20UL, 23UL }; auto rs = rows( D1, indices1 ); // Selecting the every third row of D1 in the range [0..21] rs = D2; // Dense matrix assignment to the selected rows rows( D1, indices2 ) = S1; // Sparse matrix assignment to the selected rows D3 = rs + D2; // Dense matrix/dense matrix addition S2 = S1 - rows( D1, indices2 ); // Sparse matrix/dense matrix subtraction D2 = rs % rows( D1, indices3 ); // Dense matrix/dense matrix Schur product D2 = rows( D1, indices2 ) * D1; // Dense matrix/dense matrix multiplication rows( D1, indices2 ) *= 2.0; // In-place scaling of the second selection of rows D2 = rows( D1, indices3 ) * 2.0; // Scaling of the elements in the third selection of rows D2 = 2.0 * rows( D1, indices3 ); // Scaling of the elements in the third selection of rows rows( D1, indices1 ) += D2; // Addition assignment rows( D1, indices2 ) -= S1; // Subtraction assignment rows( D1, indices3 ) %= rs; // Schur product assignment a = rows( D1, indices1 ) * b; // Dense matrix/sparse vector multiplication \endcode // \n \section views_row_selections_on_column_major_matrix Row Selections on Column-Major Matrices // // Especially noteworthy is that row selections can be created for both row-major and column-major // matrices. Whereas the interface of a row-major matrix only allows to traverse a row directly // and the interface of a column-major matrix only allows to traverse a column, via views it is // possible to traverse a row of a column-major matrix or a column of a row-major matrix. For // instance: \code blaze::DynamicMatrix<int,blaze::columnMajor> A( 64UL, 32UL ); // ... Resizing and initialization // Creating a reference to the 1st and 3rd row of a column-major matrix A auto rs = rows( A, { 1UL, 3UL } ); // Traversing row 0 of the selection, which corresponds to the 1st row of matrix A for( auto it=rs.begin( 0UL ); it!=rs.end( 0UL ); ++it ) { // ... } \endcode // However, please note that creating a row selection on a matrix stored in a column-major fashion // can result in a considerable performance decrease in comparison to a row selection on a matrix // with row-major storage format. This is due to the non-contiguous storage of the matrix elements. // Therefore care has to be taken in the choice of the most suitable storage order: \code // Setup of two column-major matrices blaze::DynamicMatrix<double,blaze::columnMajor> A( 128UL, 128UL ); blaze::DynamicMatrix<double,blaze::columnMajor> B( 128UL, 128UL ); // ... Resizing and initialization // The computation of the 15th, 30th, and 45th row of the multiplication between A and B ... blaze::DynamicMatrix<double,blaze::rowMajor> x = rows( A * B, { 15UL, 30UL, 45UL } ); // ... is essentially the same as the following computation, which multiplies // the 15th, 30th, and 45th row of the column-major matrix A with B. blaze::DynamicMatrix<double,blaze::rowMajor> x = rows( A, { 15UL, 30UL, 45UL } ) * B; \endcode // Although \b Blaze performs the resulting matrix/matrix multiplication as efficiently as possible // using a row-major storage order for matrix \c A would result in a more efficient evaluation. // // \n Previous: \ref views_rows &nbsp; &nbsp; Next: \ref views_columns */ //************************************************************************************************* //**Columns**************************************************************************************** /*!\page views_columns Columns // // \tableofcontents // // // Just as rows provide a view on a specific row of a matrix, columns provide views on a specific // column of a dense or sparse matrix. As such, columns act as a reference to a specific column. // This reference is valid an can be used in every way any other column vector can be used as long // as the matrix containing the column is not resized or entirely destroyed. Changes made to the // elements (e.g. modifying values, inserting or erasing elements) are immediately visible in the // matrix and changes made via the matrix are immediately visible in the column. // // // \n \section views_colums_setup Setup of Columns // <hr> // // \image html column.png // \image latex column.eps "Column view" width=250pt // // A reference to a dense or sparse column can be created very conveniently via the \c column() // function. It can be included via the header file \code #include <blaze/math/Column.h> \endcode // The column index must be in the range from \f$[0..N-1]\f$, where \c N is the total number of // columns of the matrix, and can be specified both at compile time or at runtime: \code blaze::DynamicMatrix<double,blaze::columnMajor> A; // ... Resizing and initialization // Creating a reference to the 1st column of matrix A (compile time index) auto col1 = column<1UL>( A ); // Creating a reference to the 2nd column of matrix A (runtime index) auto col2 = column( A, 2UL ); \endcode // The \c column() function returns an expression representing the column view. The type of this // expression depends on the given column arguments, primarily the type of the matrix and the // compile time arguments. If the type is required, it can be determined via the \c decltype // specifier: \code using MatrixType = blaze::DynamicMatrix<int>; using ColumnType = decltype( blaze::column<1UL>( std::declval<MatrixType>() ) ); \endcode // The resulting view can be treated as any other column vector, i.e. it can be assigned to, it // can be copied from, and it can be used in arithmetic operations. The reference can also be used // on both sides of an assignment: The column can either be used as an alias to grant write access // to a specific column of a matrix primitive on the left-hand side of an assignment or to grant // read-access to a specific column of a matrix primitive or expression on the right-hand side // of an assignment. The following example demonstrates this in detail: \code blaze::DynamicVector<double,blaze::columnVector> x; blaze::CompressedVector<double,blaze::columnVector> y; blaze::DynamicMatrix<double,blaze::columnMajor> A, B; blaze::CompressedMatrix<double,blaze::columnMajor> C, D; // ... Resizing and initialization // Setting the 1st column of matrix A to x auto col1 = column( A, 1UL ); col1 = x; // Setting the 4th column of matrix B to y column( B, 4UL ) = y; // Setting x to the 2nd column of the result of the matrix multiplication x = column( A * B, 2UL ); // Setting y to the 2nd column of the result of the sparse matrix multiplication y = column( C * D, 2UL ); \endcode // \warning It is the programmer's responsibility to ensure the column does not outlive the // viewed matrix: \code // Creating a column on a temporary matrix; results in a dangling reference! auto col1 = column<1UL>( DynamicMatrix<int>{ { 1, 2, 3 }, { 4, 5, 6 }, { 7, 8, 9 } } ); \endcode // \n \section views_columns_element_access Element Access // <hr> // // The elements of a column can be directly accessed with the subscript operator. \code blaze::DynamicMatrix<double,blaze::columnMajor> A; // ... Resizing and initialization // Creating a view on the 4th column of matrix A auto col4 = column( A, 4UL ); // Setting the 1st element of the dense column, which corresponds // to the 1st element in the 4th column of matrix A col4[1] = 2.0; \endcode // The numbering of the column elements is \f[\left(\begin{array}{*{5}{c}} 0 & 1 & 2 & \cdots & N-1 \\ \end{array}\right),\f] // where N is the number of rows of the referenced matrix. Alternatively, the elements of a column // can be traversed via iterators. Just as with vectors, in case of non-const columns, \c begin() // and \c end() return an iterator, which allows to manipulate the elements, in case of constant // columns an iterator to immutable elements is returned: \code blaze::DynamicMatrix<int,blaze::columnMajor> A( 128UL, 256UL ); // ... Resizing and initialization // Creating a reference to the 31st column of matrix A auto col31 = column( A, 31UL ); // Traversing the elements via iterators to non-const elements for( auto it=col31.begin(); it!=col31.end(); ++it ) { *it = ...; // OK; Write access to the dense column value ... = *it; // OK: Read access to the dense column value. } // Traversing the elements via iterators to const elements for( auto it=col31.cbegin(); it!=col31.cend(); ++it ) { *it = ...; // Compilation error: Assignment to the value via iterator-to-const is invalid. ... = *it; // OK: Read access to the dense column value. } \endcode \code blaze::CompressedMatrix<int,blaze::columnMajor> A( 128UL, 256UL ); // ... Resizing and initialization // Creating a reference to the 31st column of matrix A auto col31 = column( A, 31UL ); // Traversing the elements via iterators to non-const elements for( auto it=col31.begin(); it!=col31.end(); ++it ) { it->value() = ...; // OK: Write access to the value of the non-zero element. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the sparse element. } // Traversing the elements via iterators to const elements for( auto it=col31.cbegin(); it!=col31.cend(); ++it ) { it->value() = ...; // Compilation error: Assignment to the value via iterator-to-const is invalid. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the sparse element. } \endcode // \n \section views_columns_element_insertion Element Insertion // <hr> // // Inserting/accessing elements in a sparse column can be done by several alternative functions. // The following example demonstrates all options: \code blaze::CompressedMatrix<double,blaze::columnMajor> A( 100UL, 10UL ); // Non-initialized 100x10 matrix auto col0( column( A, 0UL ) ); // Reference to the 0th column of A // The subscript operator provides access to all possible elements of the sparse column, // including the zero elements. In case the subscript operator is used to access an element // that is currently not stored in the sparse column, the element is inserted into the column. col0[42] = 2.0; // The second operation for inserting elements is the set() function. In case the element // is not contained in the column it is inserted into the column, if it is already contained // in the column its value is modified. col0.set( 45UL, -1.2 ); // An alternative for inserting elements into the column is the insert() function. However, // it inserts the element only in case the element is not already contained in the column. col0.insert( 50UL, 3.7 ); // A very efficient way to add new elements to a sparse column is the append() function. // Note that append() requires that the appended element's index is strictly larger than // the currently largest non-zero index of the column and that the column's capacity is // large enough to hold the new element. col0.reserve( 10UL ); col0.append( 51UL, -2.1 ); \endcode // \n \section views_columns_common_operations Common Operations // <hr> // // A column view can be used like any other column vector. This means that with only a few // exceptions all \ref vector_operations and \ref arithmetic_operations can be used. For instance, // the current number of elements can be obtained via the \c size() function, the current capacity // via the \c capacity() function, and the number of non-zero elements via the \c nonZeros() // function. However, since columns are references to specific columns of a matrix, several // operations are not possible on views, such as resizing and swapping. The following example // shows this by means of a dense column view: \code blaze::DynamicMatrix<int,blaze::columnMajor> A( 42UL, 42UL ); // ... Resizing and initialization // Creating a reference to the 2nd column of matrix A auto col2 = column( A, 2UL ); col2.size(); // Returns the number of elements in the column col2.capacity(); // Returns the capacity of the column col2.nonZeros(); // Returns the number of non-zero elements contained in the column col2.resize( 84UL ); // Compilation error: Cannot resize a single column of a matrix auto col3 = column( A, 3UL ); swap( col2, col3 ); // Compilation error: Swap operation not allowed \endcode // \n \section views_columns_arithmetic_operations Arithmetic Operations // <hr> // // Both dense and sparse columns can be used in all arithmetic operations that any other dense or // sparse column vector can be used in. The following example gives an impression of the use of // dense columns within arithmetic operations. All operations (addition, subtraction, multiplication, // scaling, ...) can be performed on all possible combinations of dense and sparse columns with // fitting element types: \code blaze::DynamicVector<double,blaze::columnVector> a( 2UL, 2.0 ), b; blaze::CompressedVector<double,blaze::columnVector> c( 2UL ); c[1] = 3.0; blaze::DynamicMatrix<double,blaze::columnMajor> A( 2UL, 4UL ); // Non-initialized 2x4 matrix auto col0( column( A, 0UL ) ); // Reference to the 0th column of A col0[0] = 0.0; // Manual initialization of the 0th column of A col0[1] = 0.0; column( A, 1UL ) = 1.0; // Homogeneous initialization of the 1st column of A column( A, 2UL ) = a; // Dense vector initialization of the 2nd column of A column( A, 3UL ) = c; // Sparse vector initialization of the 3rd column of A b = col0 + a; // Dense vector/dense vector addition b = c + column( A, 1UL ); // Sparse vector/dense vector addition b = col0 * column( A, 2UL ); // Component-wise vector multiplication column( A, 1UL ) *= 2.0; // In-place scaling of the 1st column b = column( A, 1UL ) * 2.0; // Scaling of the 1st column b = 2.0 * column( A, 1UL ); // Scaling of the 1st column column( A, 2UL ) += a; // Addition assignment column( A, 2UL ) -= c; // Subtraction assignment column( A, 2UL ) *= column( A, 0UL ); // Multiplication assignment double scalar = trans( c ) * column( A, 1UL ); // Scalar/dot/inner product between two vectors A = column( A, 1UL ) * trans( c ); // Outer product between two vectors \endcode // \n \section views_columns_non_fitting_storage_order Views on Matrices with Non-Fitting Storage Order // <hr> // // Especially noteworthy is that column views can be created for both row-major and column-major // matrices. Whereas the interface of a row-major matrix only allows to traverse a row directly // and the interface of a column-major matrix only allows to traverse a column, via views it is // possible to traverse a row of a column-major matrix or a column of a row-major matrix. For // instance: \code blaze::DynamicMatrix<int,blaze::rowMajor> A( 64UL, 32UL ); // ... Resizing and initialization // Creating a reference to the 1st column of a column-major matrix A auto col1 = column( A, 1UL ); for( auto it=col1.begin(); it!=col1.end(); ++it ) { // ... } \endcode // However, please note that creating a column view on a matrix stored in a row-major fashion // can result in a considerable performance decrease in comparison to a column view on a matrix // with column-major storage format. This is due to the non-contiguous storage of the matrix // elements. Therefore care has to be taken in the choice of the most suitable storage order: \code // Setup of two row-major matrices blaze::DynamicMatrix<double,blaze::rowMajor> A( 128UL, 128UL ); blaze::DynamicMatrix<double,blaze::rowMajor> B( 128UL, 128UL ); // ... Resizing and initialization // The computation of the 15th column of the multiplication between A and B ... blaze::DynamicVector<double,blaze::columnVector> x = column( A * B, 15UL ); // ... is essentially the same as the following computation, which multiplies // A with the 15th column of the row-major matrix B. blaze::DynamicVector<double,blaze::columnVector> x = A * column( B, 15UL ); \endcode // Although \b Blaze performs the resulting matrix/vector multiplication as efficiently as possible // using a column-major storage order for matrix \c B would result in a more efficient evaluation. // // \n Previous: \ref views_row_selections &nbsp; &nbsp; Next: \ref views_column_selections */ //************************************************************************************************* //**Column Selections****************************************************************************** /*!\page views_column_selections Column Selections // // \tableofcontents // // // Column selections provide views on arbitrary compositions of columns of dense and sparse // matrices. These views act as a reference to the selected columns and represent them as another // dense or sparse matrix. This reference is valid and can be used in every way any other dense // or sparse matrix can be used as long as the matrix containing the columns is not resized or // entirely destroyed. The column selection also acts as an alias to the matrix elements in the // specified range: Changes made to the columns (e.g. modifying values, inserting or erasing // elements) are immediately visible in the matrix and changes made via the matrix are immediately // visible in the columns. // // // \n \section views_column_selections_setup Setup of Column Selections // // A column selection can be created very conveniently via the \c columns() function. It can be // included via the header file \code #include <blaze/math/Columns.h> \endcode // The indices of the columns to be selected can be specified either at compile time or at runtime // (by means of an initializer list, array or vector): \code blaze::DynamicMatrix<double,blaze::columnMajor> A; // ... Resizing and initialization // Selecting the columns 4, 6, 8, and 10 (compile time arguments) auto cs1 = columns<4UL,6UL,8UL,10UL>( A ); // Selecting the columns 3, 2, and 1 (runtime arguments via an initializer list) const std::initializer_list<size_t> list{ 3UL, 2UL, 1UL }; auto cs2 = columns( A, { 3UL, 2UL, 1UL } ); auto cs3 = columns( A, list ); // Selecting the columns 1, 2, 3, 3, 2, and 1 (runtime arguments via a std::array) const std::array<size_t> array{ 1UL, 2UL, 3UL, 3UL, 2UL, 1UL }; auto cs4 = columns( A, array ); auto cs5 = columns( A, array.data(), array.size() ); // Selecting the column 4 fives times (runtime arguments via a std::vector) const std::vector<size_t> vector{ 4UL, 4UL, 4UL, 4UL, 4UL }; auto cs6 = columns( A, vector ); auto cs7 = columns( A, vector.data(), vector.size() ); \endcode // Note that it is possible to alias the columns of the underlying matrix in any order. Also note // that it is possible to use the same index multiple times. The \c columns() function returns an // expression representing the view on the selected columns. The type of this expression depends // on the given arguments, primarily the type of the matrix and the compile time arguments. If // the type is required, it can be determined via the \c decltype specifier: \code using MatrixType = blaze::DynamicMatrix<int>; using ColumnsType = decltype( blaze::columns<3UL,0UL,4UL,8UL>( std::declval<MatrixType>() ) ); \endcode // The resulting view can be treated as any other dense or sparse matrix, i.e. it can be assigned // to, it can be copied from, and it can be used in arithmetic operations. Note, however, that a // column selection will always be treated as a column-major matrix, regardless of the storage // order of the matrix containing the columns. The view can also be used on both sides of an // assignment: It can either be used as an alias to grant write access to specific columns of a // matrix primitive on the left-hand side of an assignment or to grant read-access to specific // columns of a matrix primitive or expression on the right-hand side of an assignment. The // following example demonstrates this in detail: \code blaze::DynamicMatrix<double,blaze::columnMajor> A; blaze::DynamicMatrix<double,blaze::rowMajor> B; blaze::CompressedMatrix<double,blaze::columnMajor> C; // ... Resizing and initialization // Selecting the columns 1, 3, 5, and 7 of A auto cs = columns( A, { 1UL, 3UL, 5UL, 7UL } ); // Setting columns 1, 3, 5, and 7 of A to column 4 of B cs = columns( B, { 4UL, 4UL, 4UL, 4UL } ); // Setting the columns 2, 4, 6, and 8 of A to C columns( A, { 2UL, 4UL, 6UL, 8UL } ) = C; // Setting the first 4 columns of A to the columns 5, 4, 3, and 2 of C submatrix( A, 0UL, 0UL, A.rows(), 4UL ) = columns( C, { 5UL, 4UL, 3UL, 2UL } ); // Rotating the result of the addition between columns 1, 3, 5, and 7 of A and C B = columns( cs + C, { 2UL, 3UL, 0UL, 1UL } ); \endcode // \warning It is the programmer's responsibility to ensure the column selection does not outlive // the viewed matrix: \code // Creating a column selection on a temporary matrix; results in a dangling reference! auto cs = columns<2UL,0UL>( DynamicMatrix<int>{ { 1, 2, 3 }, { 4, 5, 6 }, { 7, 8, 9 } } ); \endcode // \n \section views_column_selections_element_access Element Access // // The elements of a column selection can be directly accessed via the function call operator: \code blaze::DynamicMatrix<double,blaze::columnMajor> A; // ... Resizing and initialization // Creating a view on the first four columns of A in reverse order auto cs = columns( A, { 3UL, 2UL, 1UL, 0UL } ); // Setting the element (0,0) of the column selection, which corresponds // to the element at position (0,3) in matrix A cs(0,0) = 2.0; \endcode // Alternatively, the elements of a column selection can be traversed via (const) iterators. // Just as with matrices, in case of non-const column selection, \c begin() and \c end() return // an iterator, which allows to manipuate the elements, in case of constant column selection an // iterator to immutable elements is returned: \code blaze::DynamicMatrix<int,blaze::columnMajor> A( 512UL, 256UL ); // ... Resizing and initialization // Creating a reference to a selection of columns of matrix A auto cs = columns( A, { 16UL, 32UL, 64UL, 128UL } ); // Traversing the elements of the 0th column via iterators to non-const elements for( auto it=cs.begin(0); it!=cs.end(0); ++it ) { *it = ...; // OK: Write access to the dense value. ... = *it; // OK: Read access to the dense value. } // Traversing the elements of the 1st column via iterators to const elements for( auto it=cs.cbegin(1); it!=cs.cend(1); ++it ) { *it = ...; // Compilation error: Assignment to the value via iterator-to-const is invalid. ... = *it; // OK: Read access to the dense value. } \endcode \code blaze::CompressedMatrix<int,blaze::columnMajor> A( 512UL, 256UL ); // ... Resizing and initialization // Creating a reference to a selection of columns of matrix A auto cs = columns( A, { 16UL, 32UL, 64UL, 128UL } ); // Traversing the elements of the 0th column via iterators to non-const elements for( auto it=cs.begin(0); it!=cs.end(0); ++it ) { it->value() = ...; // OK: Write access to the value of the non-zero element. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the sparse element. } // Traversing the elements of the 1st column via iterators to const elements for( auto it=cs.cbegin(1); it!=cs.cend(1); ++it ) { it->value() = ...; // Compilation error: Assignment to the value via iterator-to-const is invalid. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the sparse element. } \endcode // \n \section views_column_selections_element_insertion Element Insertion // // Inserting/accessing elements in a sparse column selection can be done by several alternative // functions. The following example demonstrates all options: \code blaze::CompressedMatrix<double,blaze::columnMajor> A( 512UL, 256UL ); // Non-initialized matrix of size 512x256 auto cs = columns( A, { 10UL, 20UL, 30UL, 40UL } ); // View on the columns 10, 20, 30, and 40 of A // The function call operator provides access to all possible elements of the sparse column // selection, including the zero elements. In case the function call operator is used to // access an element that is currently not stored in the sparse column selection, the element // is inserted into the column selection. cs(2,4) = 2.0; // The second operation for inserting elements is the set() function. In case the element is // not contained in the column selection it is inserted into the column selection, if it is // already contained in the column selection its value is modified. cs.set( 2UL, 5UL, -1.2 ); // An alternative for inserting elements into the column selection is the insert() function. // However, it inserts the element only in case the element is not already contained in the // column selection. cs.insert( 2UL, 6UL, 3.7 ); // Just as in the case of sparse matrices, elements can also be inserted via the append() // function. In case of column selections, append() also requires that the appended element's // index is strictly larger than the currently largest non-zero index in the according column // of the column selection and that the according column's capacity is large enough to hold the // new element. Note however that due to the nature of a column selection, which may be an alias // to an arbitrary collection of columns, the append() function does not work as efficiently // for a column selection as it does for a matrix. cs.reserve( 2UL, 10UL ); cs.append( 2UL, 10UL, -2.1 ); \endcode // \n \section views_column_selections_common_operations Common Operations // // A view on specific columns of a matrix can be used like any other dense or sparse matrix. For // instance, the current size of the matrix, i.e. the number of rows or columns can be obtained // via the \c rows() and \c columns() functions, the current total capacity via the \c capacity() // function, and the number of non-zero elements via the \c nonZeros() function. However, since // column selections are views on specific columns of a matrix, several operations are not possible, // such as resizing and swapping: \code blaze::DynamicMatrix<int,blaze::columnMajor> A( 42UL, 42UL ); // ... Resizing and initialization // Creating a view on the columns 8, 16, 24, and 32 of matrix A auto cs = columns( A, { 8UL, 16UL, 24UL, 32UL } ); cs.rows(); // Returns the number of rows of the column selection cs.columns(); // Returns the number of columns of the column selection cs.capacity(); // Returns the capacity of the column selection cs.nonZeros(); // Returns the number of non-zero elements contained in the column selection cs.resize( 10UL, 8UL ); // Compilation error: Cannot resize a column selection auto cs2 = columns( A, 9UL, 17UL, 25UL, 33UL ); swap( cs, cs2 ); // Compilation error: Swap operation not allowed \endcode // \n \section views_column_selections_arithmetic_operations Arithmetic Operations // // Both dense and sparse column selections can be used in all arithmetic operations that any other // dense or sparse matrix can be used in. The following example gives an impression of the use of // dense column selctions within arithmetic operations. All operations (addition, subtraction, // multiplication, scaling, ...) can be performed on all possible combinations of dense and // sparse matrices with fitting element types: \code blaze::DynamicMatrix<double,blaze::columnMajor> D1, D2, D3; blaze::CompressedMatrix<double,blaze::columnMajor> S1, S2; blaze::CompressedVector<double,blaze::columnVector> a, b; // ... Resizing and initialization std::initializer_list<size_t> indices1{ 0UL, 3UL, 6UL, 9UL, 12UL, 15UL, 18UL, 21UL }; std::initializer_list<size_t> indices2{ 1UL, 4UL, 7UL, 10UL, 13UL, 16UL, 19UL, 22UL }; std::initializer_list<size_t> indices3{ 2UL, 5UL, 8UL, 11UL, 14UL, 17UL, 20UL, 23UL }; auto cs = columns( D1, indices1 ); // Selecting the every third column of D1 in the range [0..21] cs = D2; // Dense matrix assignment to the selected columns columns( D1, indices2 ) = S1; // Sparse matrix assignment to the selected columns D3 = cs + D2; // Dense matrix/dense matrix addition S2 = S1 - columns( D1, indices2 ); // Sparse matrix/dense matrix subtraction D2 = cs % columns( D1, indices3 ); // Dense matrix/dense matrix Schur product D2 = columns( D1, indices2 ) * D1; // Dense matrix/dense matrix multiplication columns( D1, indices2 ) *= 2.0; // In-place scaling of the second selection of columns D2 = columns( D1, indices3 ) * 2.0; // Scaling of the elements in the third selection of columns D2 = 2.0 * columns( D1, indices3 ); // Scaling of the elements in the third selection of columns columns( D1, indices1 ) += D2; // Addition assignment columns( D1, indices2 ) -= S1; // Subtraction assignment columns( D1, indices3 ) %= cs; // Schur product assignment a = columns( D1, indices1 ) * b; // Dense matrix/sparse vector multiplication \endcode // \n \section views_column_selections_on_row_major_matrix Column Selections on a Row-Major Matrix // // Especially noteworthy is that column selections can be created for both row-major and // column-major matrices. Whereas the interface of a row-major matrix only allows to traverse a // row directly and the interface of a column-major matrix only allows to traverse a column, via // views it is possible to traverse a row of a column-major matrix or a column of a row-major // matrix. For instance: \code blaze::DynamicMatrix<int,blaze::rowMajor> A( 64UL, 32UL ); // ... Resizing and initialization // Creating a reference to the 1st and 3rd column of a column-major matrix A auto cs = columns( A, { 1UL, 3UL } ); // Traversing column 0 of the selection, which corresponds to the 1st column of matrix A for( auto it=cs.begin( 0UL ); it!=cs.end( 0UL ); ++it ) { // ... } \endcode // However, please note that creating a column selection on a matrix stored in a row-major fashion // can result in a considerable performance decrease in comparison to a column selection on a // matrix with column-major storage format. This is due to the non-contiguous storage of the // matrix elements. Therefore care has to be taken in the choice of the most suitable storage // order: \code // Setup of two row-major matrices blaze::DynamicMatrix<double,blaze::rowMajor> A( 128UL, 128UL ); blaze::DynamicMatrix<double,blaze::rowMajor> B( 128UL, 128UL ); // ... Resizing and initialization // The computation of the 15th, 30th, and 45th column of the multiplication between A and B ... blaze::DynamicMatrix<double,blaze::columnMajor> x = columns( A * B, { 15UL, 30UL, 45UL } ); // ... is essentially the same as the following computation, which multiplies // A with the 15th, 30th, and 45th column of the row-major matrix B. blaze::DynamicMatrix<double,blaze::columnMajor> x = A * column( B, { 15UL, 30UL, 45UL } ); \endcode // Although \b Blaze performs the resulting matrix/matrix multiplication as efficiently as possible // using a column-major storage order for matrix \c A would result in a more efficient evaluation. // // \n Previous: \ref views_columns &nbsp; &nbsp; Next: \ref views_bands */ //************************************************************************************************* //**Bands****************************************************************************************** /*!\page views_bands Bands // // \tableofcontents // // // Bands provide views on a specific band of a dense or sparse matrix (e.g. the diagonal, the // subdiagonal, ...). As such, bands act as a reference to a specific band. This reference // is valid and can be used in every way any other vector can be used as long as the matrix // containing the band is not resized or entirely destroyed. The band also acts as an alias to // the band elements: Changes made to the elements (e.g. modifying values, inserting or erasing // elements) are immediately visible in the matrix and changes made via the matrix are immediately // visible in the band. // // // \n \section views_bands_setup Setup of Bands // <hr> // // \image html band.png // \image latex band.eps "Band view" width=250pt // // A reference to a dense or sparse band can be created very conveniently via the \c band() // function. It can be included via the header file \code #include <blaze/math/Band.h> \endcode // The band index must be in the range from \f$[min(0,1-M)..max(0,N-1)]\f$, where \c M is the // total number of rows and \c N is the total number of columns, and can be specified both at // compile time or at runtime: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization // Creating a reference to the 1st lower band of matrix A (compile time index) auto band1 = band<-1L>( A ); // Creating a reference to the 2nd upper band of matrix A (runtime index) auto band2 = band( A, 2L ); \endcode // In addition, the \c diagonal() function provides a convenient shortcut for the setup of a view // on the diagonal of a dense or sparse matrix. It has the same effect as calling the \c band() // function with a compile time index of 0: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization // Creating a reference to the diagonal of matrix A via the band() and diagonal() functions auto diag1 = band<0L>( A ); auto diag2 = diagonal( A ); static_assert( blaze::IsSame< decltype(diag1), decltype(diag2) >::value, "Non-identical types detected" ); \endcode // Both the \c band() and the \c diagonal() function return an expression representing the band // view. The type of this expression depends on the given arguments, primarily the type of the // matrix and the compile time arguments. If the type is required, it can be determined via // \c decltype specifier: \code using MatrixType = blaze::DynamicMatrix<int>; using BandType = decltype( blaze::band<1L>( std::declval<MatrixType>() ) ); using DiagonalType = decltype( blaze::diagonal( std::declval<MatrixType>() ) ); \endcode // This resulting view can be treated as any other vector, i.e. it can be assigned to, it can // be copied from, and it can be used in arithmetic operations. By default, bands are considered // column vectors, but this setting can be changed via the \c defaultTransposeFlag switch. The // reference can also be used on both sides of an assignment: The band can either be used as an // alias to grant write access to a specific band of a matrix primitive on the left-hand side of // an assignment or to grant read-access to a specific band of a matrix primitive or expression // on the right-hand side of an assignment. The following example demonstrates this in detail: \code blaze::DynamicVector<double,blaze::rowVector> x; blaze::CompressedVector<double,blaze::rowVector> y; blaze::DynamicMatrix<double,blaze::rowMajor> A, B; blaze::CompressedMatrix<double,blaze::rowMajor> C, D; // ... Resizing and initialization // Setting the 2nd upper band of matrix A to x auto band2 = band( A, 2L ); band2 = x; // Setting the 3rd upper band of matrix B to y band( B, 3L ) = y; // Setting x to the 2nd lower band of the result of the matrix multiplication x = band( A * B, -2L ); // Setting y to the 2nd upper band of the result of the sparse matrix multiplication y = band( C * D, 2L ); \endcode // \warning It is the programmer's responsibility to ensure the band does not outlive the viewed // matrix: \code // Creating a band on a temporary matrix; results in a dangling reference! auto band1 = band<1L>( DynamicMatrix<int>{ { 1, 2, 3 }, { 4, 5, 6 }, { 7, 8, 9 } } ); \endcode // \n \section views_bands_element_access Element Access // <hr> // // The elements of a band can be directly accessed with the subscript operator: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization // Creating a view on the 4th upper band of matrix A auto band4 = band( A, 4L ); // Setting the 1st element of the dense band, which corresponds // to the 1st element in the 4th upper band of matrix A band4[1] = 2.0; \endcode // The numbering of the band elements is \f[\left(\begin{array}{*{5}{c}} 0 & 1 & 2 & \cdots & N-1 \\ \end{array}\right),\f] // where N is the number of elements of the referenced band. Alternatively, the elements of a band // can be traversed via iterators. Just as with vectors, in case of non-const band, \c begin() and // \c end() return an iterator, which allows to manipulate the elements, in case of constant bands // an iterator to immutable elements is returned: \code blaze::DynamicMatrix<int,blaze::rowMajor> A( 128UL, 256UL ); // ... Resizing and initialization // Creating a reference to the 5th upper band of matrix A auto band5 = band( A, 5L ); // Traversing the elements via iterators to non-const elements for( auto it=band5.begin(); it!=band5.end(); ++it ) { *it = ...; // OK; Write access to the dense band value ... = *it; // OK: Read access to the dense band value. } // Traversing the elements via iterators to const elements for( auto it=band5.cbegin(); it!=band5.cend(); ++it ) { *it = ...; // Compilation error: Assignment to the value via iterator-to-const is invalid. ... = *it; // OK: Read access to the dense band value. } \endcode \code blaze::CompressedMatrix<int,blaze::rowMajor> A( 128UL, 256UL ); // ... Resizing and initialization // Creating a reference to the 5th band of matrix A auto band5 = band( A, 5L ); // Traversing the elements via iterators to non-const elements for( auto it=band5.begin(); it!=band5.end(); ++it ) { it->value() = ...; // OK: Write access to the value of the non-zero element. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the sparse element. } // Traversing the elements via iterators to const elements for( auto it=band5.cbegin(); it!=band5.cend(); ++it ) { it->value() = ...; // Compilation error: Assignment to the value via iterator-to-const is invalid. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the sparse element. } \endcode // \n \section views_bands_element_insertion Element Insertion // <hr> // // Inserting/accessing elements in a sparse band can be done by several alternative functions. // The following example demonstrates all options: \code blaze::CompressedMatrix<double,blaze::rowMajor> A( 10UL, 100UL ); // Non-initialized 10x100 matrix auto diag( band( A, 0L ) ); // Reference to the diagonal of A // The subscript operator provides access to all possible elements of the sparse band, // including the zero elements. In case the subscript operator is used to access an element // that is currently not stored in the sparse band, the element is inserted into the band. diag[42] = 2.0; // The second operation for inserting elements is the set() function. In case the element // is not contained in the band it is inserted into the band, if it is already contained in // the band its value is modified. diag.set( 45UL, -1.2 ); // An alternative for inserting elements into the band is the insert() function. However, // it inserts the element only in case the element is not already contained in the band. diag.insert( 50UL, 3.7 ); \endcode // \n \section views_bands_common_operations Common Operations // <hr> // // A band view can be used like any other column vector. This means that with only a few // exceptions all \ref vector_operations and \ref arithmetic_operations can be used. For instance, // the current number of band elements can be obtained via the \c size() function, the current // capacity via the \c capacity() function, and the number of non-zero elements via the // \c nonZeros() function. However, since bands are references to specific bands of a matrix, // several operations are not possible, such as resizing and swapping. The following example // shows this by means of a dense band view: \code blaze::DynamicMatrix<int,blaze::rowMajor> A( 42UL, 42UL ); // ... Resizing and initialization // Creating a reference to the 2nd upper band of matrix A auto band2 = band( A, 2L ); band2.size(); // Returns the number of elements in the band band2.capacity(); // Returns the capacity of the band band2.nonZeros(); // Returns the number of non-zero elements contained in the band band2.resize( 84UL ); // Compilation error: Cannot resize a single band of a matrix auto band3 = band( A, 3L ); swap( band2, band3 ); // Compilation error: Swap operation not allowed \endcode // \n \section views_bands_arithmetic_operations Arithmetic Operations // <hr> // // Both dense and sparse bands can be used in all arithmetic operations that any other dense or // sparse vector can be used in. The following example gives an impression of the use of dense // bands within arithmetic operations. All operations (addition, subtraction, multiplication, // scaling, ...) can be performed on all possible combinations of dense and sparse bands with // fitting element types: \code blaze::DynamicVector<double,blaze::columnVector> a( 2UL, 2.0 ), b; blaze::CompressedVector<double,blaze::columnVector> c( 2UL ); c[1] = 3.0; blaze::DynamicMatrix<double,blaze::rowMajor> A( 4UL, 2UL ); // Non-initialized 4x2 matrix auto band1( band( A, 1L ) ); // Reference to the 1st upper band of A auto diag ( band( A, 0L ) ); // Reference to the diagonal of A band1[0] = 0.0; // Manual initialization of the 1st upper band of A diag = 1.0; // Homogeneous initialization of the diagonal of A band( A, -1L ) = a; // Dense vector initialization of the 1st lower band of A band( A, -2L ) = c; // Sparse vector initialization of the 2nd lower band of A b = diag + a; // Dense vector/dense vector addition b = c + band( A, -1L ); // Sparse vector/dense vector addition b = diag * band( A, -2L ); // Component-wise vector multiplication band( A, -1L ) *= 2.0; // In-place scaling of the 1st upper band b = band( A, -1L ) * 2.0; // Scaling of the 1st upper band b = 2.0 * band( A, -1L ); // Scaling of the 1st upper band band( A, -2L ) += a; // Addition assignment band( A, -2L ) -= c; // Subtraction assignment band( A, -2L ) *= band( A, 0L ); // Multiplication assignment double scalar = trans( c ) * band( A, -1L ); // Scalar/dot/inner product between two vectors A = band( A, -1L ) * trans( c ); // Outer product between two vectors \endcode // \n Previous: \ref views_column_selections &nbsp; &nbsp; Next: \ref arithmetic_operations */ //************************************************************************************************* //**Arithmetic Operations************************************************************************** /*!\page arithmetic_operations Arithmetic Operations // // \tableofcontents // // // \b Blaze provides the following arithmetic operations for vectors and matrices: // // <ul> // <li> \ref addition </li> // <li> \ref subtraction </li> // <li> \ref scalar_multiplication </li> // <li> \ref vector_vector_multiplication // <ul> // <li> \ref componentwise_multiplication </li> // <li> \ref inner_product </li> // <li> \ref outer_product </li> // <li> \ref cross_product </li> // </ul> // </li> // <li> \ref vector_vector_division </li> // <li> \ref matrix_vector_multiplication </li> // <li> \ref matrix_matrix_multiplication </li> // </ul> // // \n Previous: \ref views_bands &nbsp; &nbsp; Next: \ref addition */ //************************************************************************************************* //**Addition*************************************************************************************** /*!\page addition Addition // // The addition of vectors and matrices is as intuitive as the addition of scalar values. For both // the vector addition as well as the matrix addition the addition operator can be used. It even // enables the addition of dense and sparse vectors as well as the addition of dense and sparse // matrices: \code blaze::DynamicVector<int> v1( 5UL ), v3; blaze::CompressedVector<float> v2( 5UL ); // ... Initializing the vectors v3 = v1 + v2; // Addition of a two column vectors of different data type \endcode \code blaze::DynamicMatrix<float,rowMajor> M1( 7UL, 3UL ); blaze::CompressedMatrix<size_t,columnMajor> M2( 7UL, 3UL ), M3; // ... Initializing the matrices M3 = M1 + M2; // Addition of a row-major and a column-major matrix of different data type \endcode // Note that it is necessary that both operands have exactly the same dimensions. Violating this // precondition results in an exception. Also note that in case of vectors it is only possible to // add vectors with the same transpose flag: \code blaze::DynamicVector<int,columnVector> v1( 5UL ); blaze::CompressedVector<float,rowVector> v2( 5UL ); v1 + v2; // Compilation error: Cannot add a column vector and a row vector v1 + trans( v2 ); // OK: Addition of two column vectors \endcode // In case of matrices, however, it is possible to add row-major and column-major matrices. Note // however that in favor of performance the addition of two matrices with the same storage order // is favorable. The same argument holds for the element type: In case two vectors or matrices // with the same element type are added, the performance can be much higher due to vectorization // of the operation. \code blaze::DynamicVector<double>v1( 100UL ), v2( 100UL ), v3; // ... Initialization of the vectors v3 = v1 + v2; // Vectorized addition of two double precision vectors \endcode \code blaze::DynamicMatrix<float> M1( 50UL, 70UL ), M2( 50UL, 70UL ), M3; // ... Initialization of the matrices M3 = M1 + M2; // Vectorized addition of two row-major, single precision dense matrices \endcode // \n Previous: \ref arithmetic_operations &nbsp; &nbsp; Next: \ref subtraction */ //************************************************************************************************* //**Subtraction************************************************************************************ /*!\page subtraction Subtraction // // The subtraction of vectors and matrices works exactly as intuitive as the addition, but with // the subtraction operator. For both the vector subtraction as well as the matrix subtraction // the subtraction operator can be used. It also enables the subtraction of dense and sparse // vectors as well as the subtraction of dense and sparse matrices: \code blaze::DynamicVector<int> v1( 5UL ), v3; blaze::CompressedVector<float> v2( 5UL ); // ... Initializing the vectors v3 = v1 - v2; // Subtraction of a two column vectors of different data type blaze::DynamicMatrix<float,rowMajor> M1( 7UL, 3UL ); blaze::CompressedMatrix<size_t,columnMajor> M2( 7UL, 3UL ), M3; // ... Initializing the matrices M3 = M1 - M2; // Subtraction of a row-major and a column-major matrix of different data type \endcode // Note that it is necessary that both operands have exactly the same dimensions. Violating this // precondition results in an exception. Also note that in case of vectors it is only possible to // subtract vectors with the same transpose flag: \code blaze::DynamicVector<int,columnVector> v1( 5UL ); blaze::CompressedVector<float,rowVector> v2( 5UL ); v1 - v2; // Compilation error: Cannot subtract a row vector from a column vector v1 - trans( v2 ); // OK: Subtraction of two column vectors \endcode // In case of matrices, however, it is possible to subtract row-major and column-major matrices. // Note however that in favor of performance the subtraction of two matrices with the same storage // order is favorable. The same argument holds for the element type: In case two vectors or matrices // with the same element type are added, the performance can be much higher due to vectorization // of the operation. \code blaze::DynamicVector<double>v1( 100UL ), v2( 100UL ), v3; // ... Initialization of the vectors v3 = v1 - v2; // Vectorized subtraction of two double precision vectors blaze::DynamicMatrix<float> M1( 50UL, 70UL ), M2( 50UL, 70UL ), M3; // ... Initialization of the matrices M3 = M1 - M2; // Vectorized subtraction of two row-major, single precision dense matrices \endcode // \n Previous: \ref addition &nbsp; &nbsp; Next: \ref scalar_multiplication */ //************************************************************************************************* //**Scalar Multiplication************************************************************************** /*!\page scalar_multiplication Scalar Multiplication // // The scalar multiplication is the multiplication of a scalar value with a vector or a matrix. // In \b Blaze it is possible to use all built-in/fundamental data types except bool as scalar // values. Additionally, it is possible to use std::complex values with the same built-in data // types as element type. \code blaze::StaticVector<int,3UL> v1{ 1, 2, 3 }; blaze::DynamicVector<double> v2 = v1 * 1.2; blaze::CompressedVector<float> v3 = -0.3F * v1; \endcode \code blaze::StaticMatrix<int,3UL,2UL> M1{ { 1, 2 }, { 3, 4 }, { 5, 6 } }; blaze::DynamicMatrix<double> M2 = M1 * 1.2; blaze::CompressedMatrix<float> M3 = -0.3F * M1; \endcode // Vectors and matrices cannot be used for as scalar value for scalar multiplications (see the // following example). However, each vector and matrix provides the \c scale() function, which // can be used to scale a vector or matrix element-wise with arbitrary scalar data types: \code blaze::CompressedMatrix< blaze::StaticMatrix<int,3UL,3UL> > M1; blaze::StaticMatrix<int,3UL,3UL> scalar; M1 * scalar; // No scalar multiplication, but matrix/matrix multiplication M1.scale( scalar ); // Scalar multiplication \endcode // \n Previous: \ref subtraction &nbsp; &nbsp; Next: \ref componentwise_multiplication */ //************************************************************************************************* //**Vector/Vector Multiplication******************************************************************* /*!\page vector_vector_multiplication Vector/Vector Multiplication // // \n \section componentwise_multiplication Componentwise Multiplication // <hr> // // Multiplying two vectors with the same transpose flag (i.e. either blaze::columnVector or // blaze::rowVector) via the multiplication operator results in a componentwise multiplication // of the two vectors: \code using blaze::DynamicVector; using blaze::CompressedVector; CompressedVector<int,columnVector> v1( 17UL ); DynamicVector<int,columnVector> v2( 17UL ); StaticVector<double,10UL,rowVector> v3; DynamicVector<double,rowVector> v4( 10UL ); // ... Initialization of the vectors CompressedVector<int,columnVector> v5( v1 * v2 ); // Componentwise multiplication of a sparse and // a dense column vector. The result is a sparse // column vector. DynamicVector<double,rowVector> v6( v3 * v4 ); // Componentwise multiplication of two dense row // vectors. The result is a dense row vector. \endcode // \n \section inner_product Inner Product / Scalar Product / Dot Product // <hr> // // The multiplication between a row vector and a column vector results in an inner product between // the two vectors: \code blaze::StaticVector<int,3UL,rowVector> v1{ 2, 5, -1 }; blaze::DynamicVector<int,columnVector> v2{ -1, 3, -2 }; int result = v1 * v2; // Results in the value 15 \endcode // The \c trans() function can be used to transpose a vector as necessary: \code blaze::StaticVector<int,3UL,rowVector> v1{ 2, 5, -1 }; blaze::StaticVector<int,3UL,rowVector> v2{ -1, 3, -2 }; int result = v1 * trans( v2 ); // Also results in the value 15 \endcode // Alternatively, either the \c inner() function, the \c dot() function or the comma operator can // be used for any combination of vectors (row or column vectors) to perform an inner product: \code blaze::StaticVector<int,3UL,columnVector> v1{ 2, 5, -1 }; blaze::StaticVector<int,3UL,rowVector> v2{ -1, 3, -2 }; // All alternatives for the inner product between a column vector and a row vector int result1 = trans( v1 ) * trans( v2 ); int result2 = inner( v1, v2 ); int result3 = dot( v1, v2 ); int result4 = (v1,v2); \endcode // When using the comma operator, please note the brackets embracing the inner product expression. // Due to the low precedence of the comma operator (lower even than the assignment operator) these // brackets are strictly required for a correct evaluation of the inner product. // // // \n \section outer_product Outer Product // <hr> // // The multiplication between a column vector and a row vector results in the outer product of // the two vectors: \code blaze::StaticVector<int,3UL,columnVector> v1{ 2, 5, -1 }; blaze::DynamicVector<int,rowVector> v2{ -1, 3, -2 }; StaticMatrix<int,3UL,3UL> M1 = v1 * v2; \endcode // The \c trans() function can be used to transpose a vector as necessary: \code blaze::StaticVector<int,3UL,rowVector> v1{ 2, 5, -1 }; blaze::StaticVector<int,3UL,rowVector> v2{ -1, 3, -2 }; int result = trans( v1 ) * v2; \endcode // Alternatively, the \c outer() function can be used for any combination of vectors (row or column // vectors) to perform an outer product: \code blaze::StaticVector<int,3UL,rowVector> v1{ 2, 5, -1 }; blaze::StaticVector<int,3UL,rowVector> v2{ -1, 3, -2 }; StaticMatrix<int,3UL,3UL> M1 = outer( v1, v2 ); // Outer product between two row vectors \endcode // \n \section cross_product Cross Product // <hr> // // Two vectors with the same transpose flag can be multiplied via the cross product. The cross // product between two vectors \f$ a \f$ and \f$ b \f$ is defined as \f[ \left(\begin{array}{*{1}{c}} c_0 \\ c_1 \\ c_2 \\ \end{array}\right) = \left(\begin{array}{*{1}{c}} a_1 b_2 - a_2 b_1 \\ a_2 b_0 - a_0 b_2 \\ a_0 b_1 - a_1 b_0 \\ \end{array}\right). \f] // Due to the absence of a \f$ \times \f$ operator in the C++ language, the cross product is // realized via the \c cross() function. Alternatively, the modulo operator (i.e. \c operator%) // can be used in case infix notation is required: \code blaze::StaticVector<int,3UL,columnVector> v1{ 2, 5, -1 }; blaze::DynamicVector<int,columnVector> v2{ -1, 3, -2 }; blaze::StaticVector<int,3UL,columnVector> v3( cross( v1, v2 ) ); blaze::StaticVector<int,3UL,columnVector> v4( v1 % v2 ); \endcode // Please note that the cross product is restricted to three dimensional (dense and sparse) // column vectors. // // \n Previous: \ref scalar_multiplication &nbsp; &nbsp; Next: \ref vector_vector_division */ //************************************************************************************************* //**Vector/Vector Division************************************************************************* /*!\page vector_vector_division Vector/Vector Division // // \n \section componentwise_division Componentwise Division // <hr> // // Dividing a vector by a dense vector with the same transpose flag (i.e. either blaze::columnVector // or blaze::rowVector) via the division operator results in a componentwise division: \code using blaze::DynamicVector; using blaze::CompressedVector; CompressedVector<int,columnVector> v1( 17UL ); DynamicVector<int,columnVector> v2( 17UL ); StaticVector<double,10UL,rowVector> v3; DynamicVector<double,rowVector> v4( 10UL ); // ... Initialization of the vectors CompressedVector<int,columnVector> v5( v1 / v2 ); // Componentwise division of a sparse and a // dense column vector. The result is a sparse // column vector. DynamicVector<double,rowVector> v6( v3 / v4 ); // Componentwise division of two dense row // vectors. The result is a dense row vector. \endcode // Note that all values of the divisor must be non-zero and that no checks are performed to assert // this precondition! // // \n Previous: \ref vector_vector_multiplication &nbsp; &nbsp; Next: \ref matrix_vector_multiplication */ //************************************************************************************************* //**Matrix/Vector Multiplication******************************************************************* /*!\page matrix_vector_multiplication Matrix/Vector Multiplication // // In \b Blaze matrix/vector multiplications can be as intuitively formulated as in mathematical // textbooks. Just as in textbooks there are two different multiplications between a matrix and // a vector: a matrix/column vector multiplication and a row vector/matrix multiplication: \code using blaze::StaticVector; using blaze::DynamicVector; using blaze::DynamicMatrix; DynamicMatrix<int> M1( 39UL, 12UL ); StaticVector<int,12UL,columnVector> v1; // ... Initialization of the matrix and the vector DynamicVector<int,columnVector> v2 = M1 * v1; // Matrix/column vector multiplication DynamicVector<int,rowVector> v3 = trans( v1 ) * M1; // Row vector/matrix multiplication \endcode // Note that the storage order of the matrix poses no restrictions on the operation. Also note, // that the highest performance for a multiplication between a dense matrix and a dense vector can // be achieved if both the matrix and the vector have the same scalar element type. // // \n Previous: \ref vector_vector_division &nbsp; &nbsp; Next: \ref matrix_matrix_multiplication */ //************************************************************************************************* //**Matrix/Matrix Multiplication******************************************************************* /*!\page matrix_matrix_multiplication Matrix/Matrix Multiplication // // \n \section schur_product Componentwise Multiplication / Schur Product // <hr> // // Multiplying two matrices with the same dimensions (i.e. the same number of rows and columns) // via the modulo operator results in a componentwise multiplication (Schur product) of the two // matrices: \code using blaze::DynamicMatrix; using blaze::CompressedMatrix; DynamicMatrix<double> M1( 28UL, 35UL ); CompressedMatrix<float> M2( 28UL, 35UL ); // ... Initialization of the matrices DynamicMatrix<double> M3 = M1 % M2; \endcode // \n \section matrix_product Matrix Product // <hr> // // The matrix/matrix product can be formulated exactly as in mathematical textbooks: \code using blaze::DynamicMatrix; using blaze::CompressedMatrix; DynamicMatrix<double> M1( 45UL, 85UL ); CompressedMatrix<float> M2( 85UL, 37UL ); // ... Initialization of the matrices DynamicMatrix<double> M3 = M1 * M2; \endcode // The storage order of the two matrices poses no restrictions on the operation, all variations // are possible. It is also possible to multiply two matrices with different element type, as // long as the element types themselves can be multiplied and added. Note however that the // highest performance for a multiplication between two matrices can be expected for two // matrices with the same scalar element type. // // In case the resulting matrix is known to be symmetric, Hermitian, lower triangular, upper // triangular, or diagonal, the computation can be optimized by explicitly declaring the // multiplication as symmetric, Hermitian, lower triangular, upper triangular, or diagonal by // means of the \ref matrix_operations_declaration_operations : \code using blaze::DynamicMatrix; DynamicMatrix<double> M1, M2, M3; // ... Initialization of the square matrices M3 = declsym ( M1 * M2 ); // Declare the result of the matrix multiplication as symmetric M3 = declherm( M1 * M2 ); // Declare the result of the matrix multiplication as Hermitian M3 = decllow ( M1 * M2 ); // Declare the result of the matrix multiplication as lower triangular M3 = declupp ( M1 * M2 ); // Declare the result of the matrix multiplication as upper triangular M3 = decldiag( M1 * M2 ); // Declare the result of the matrix multiplication as diagonal \endcode // Using a declaration operation on the a multiplication expression can speed up the computation // by a factor of 2. Note however that the caller of the according declaration operation takes // full responsibility for the correctness of the declaration. Falsely declaring a multiplication // as symmetric, Hermitian, lower triangular, upper triangular, or diagonal leads to undefined // behavior! // // \n Previous: \ref matrix_vector_multiplication &nbsp; &nbsp; Next: \ref shared_memory_parallelization */ //************************************************************************************************* //**Shared Memory Parallelization****************************************************************** /*!\page shared_memory_parallelization Shared Memory Parallelization // // For all possible operations \b Blaze tries to achieve maximum performance on a single CPU // core. However, today's CPUs are not single core anymore, but provide several (homogeneous // or heterogeneous) compute cores. In order to fully exploit the performance potential of a // multicore CPU, computations have to be parallelized across all available cores of a CPU. // For this purpose, \b Blaze provides four different shared memory parallelization techniques: // // - \ref hpx_parallelization // - \ref cpp_threads_parallelization // - \ref boost_threads_parallelization // - \ref openmp_parallelization // // When any of the shared memory parallelization techniques is activated, all arithmetic // operations on dense vectors and matrices (including additions, subtractions, multiplications, // divisions, and all componentwise arithmetic operations) and most operations on sparse vectors // and matrices are automatically run in parallel. However, in addition, \b Blaze provides means // to enforce the serial execution of specific operations: // // - \ref serial_execution // // \n Previous: \ref matrix_matrix_multiplication &nbsp; &nbsp; Next: \ref hpx_parallelization */ //************************************************************************************************* //**HPX Parallelization**************************************************************************** /*!\page hpx_parallelization HPX Parallelization // // \tableofcontents // // // The first shared memory parallelization provided with \b Blaze is based on // <a href="http://stellar.cct.lsu.edu/projects/hpx/">HPX</a>. // // // \n \section hpx_setup HPX Setup // <hr> // // In order to enable the HPX-based parallelization, the following steps have to be taken: First, // the \c BLAZE_USE_HPX_THREADS command line argument has to be explicitly specified during // compilation: \code ... -DBLAZE_USE_HPX_THREADS ... \endcode // Second, the HPX library and depending libraries such as Boost, hwloc, etc. have to be linked. // And third, the HPX threads have to be initialized by a call to the \c hpx::init() function (see // the <a href="http://stellar.cct.lsu.edu/files/hpx_0.9.0/docs/hpx/tutorial.html">HPX tutorial</a> // for further details). These three actions will cause the \b Blaze library to automatically try // to run all operations in parallel with the specified number of HPX threads. // // Note that the HPX-based parallelization has priority over the OpenMP-based, C++11 thread-based, // and Boost thread-based parallelizations, i.e. is preferred in case multiple parallelizations // are enabled in combination with the HPX thread parallelization. // // The number of threads used by the HPX backend has to be specified via the command line: \code ... --hpx:threads 4 ... \endcode // Please note that the \b Blaze library does not limit the available number of threads. Therefore // it is in YOUR responsibility to choose an appropriate number of threads. The best performance, // though, can be expected if the specified number of threads matches the available number of // cores. // // In order to query the number of threads used for the parallelization of operations, the // \c getNumThreads() function can be used: \code const size_t threads = blaze::getNumThreads(); \endcode // In the context of HPX threads, the function will return the actual number of threads used by // the HPX subsystem. // // // \n \section hpx_configuration HPX Configuration // <hr> // // As in case of the other shared memory parallelizations \b Blaze is not unconditionally running // an operation in parallel (see for instance \ref openmp_parallelization). Only in case a given // operation is large enough and exceeds a certain threshold the operation is executed in parallel. // All thresholds related to the HPX-based parallelization are contained within the configuration // file <tt><blaze/config/Thresholds.h></tt>. // // Please note that these thresholds are highly sensitiv to the used system architecture and // the shared memory parallelization technique. Therefore the default values cannot guarantee // maximum performance for all possible situations and configurations. They merely provide a // reasonable standard for the current CPU generation. Also note that the provided defaults // have been determined using the OpenMP parallelization and require individual adaption for // the HPX-based parallelization. // // \n Previous: \ref shared_memory_parallelization &nbsp; &nbsp; Next: \ref cpp_threads_parallelization */ //************************************************************************************************* //**C++11 Thread Parallelization******************************************************************* /*!\page cpp_threads_parallelization C++11 Thread Parallelization // // \tableofcontents // // // In addition to the HPX-based shared memory parallelization, starting with \b Blaze 2.1, // \b Blaze also provides a shared memory parallelization based on C++11 threads. // // // \n \section cpp_threads_setup C++11 Thread Setup // <hr> // // In order to enable the C++11 thread-based parallelization, first the according C++11-specific // compiler flags have to be used and second the \c BLAZE_USE_CPP_THREADS command line argument // has to be explicitly specified. For instance, in case of the GNU C++ and Clang compilers the // compiler flags have to be extended by \code ... -std=c++11 -DBLAZE_USE_CPP_THREADS ... \endcode // This simple action will cause the \b Blaze library to automatically try to run all operations // in parallel with the specified number of C++11 threads. Note that in case both HPX and C++11 // threads are enabled on the command line, the HPX-based parallelization has priority and is // preferred. // // The number of threads can be either specified via the environment variable \c BLAZE_NUM_THREADS \code export BLAZE_NUM_THREADS=4 // Unix systems set BLAZE_NUM_THREADS=4 // Windows systems \endcode // or alternatively via the \c setNumThreads() function provided by the \b Blaze library: \code blaze::setNumThreads( 4 ); \endcode // Please note that the \b Blaze library does not limit the available number of threads. Therefore // it is in YOUR responsibility to choose an appropriate number of threads. The best performance, // though, can be expected if the specified number of threads matches the available number of // cores. // // In order to query the number of threads used for the parallelization of operations, the // \c getNumThreads() function can be used: \code const size_t threads = blaze::getNumThreads(); \endcode // In the context of C++11 threads, the function will return the previously specified number of // threads. // // // \n \section cpp_threads_configuration C++11 Thread Configuration // <hr> // // As in case of the OpenMP-based parallelization \b Blaze is not unconditionally running an // operation in parallel. In case \b Blaze deems the parallel execution as counterproductive for // the overall performance, the operation is executed serially. One of the main reasons for not // executing an operation in parallel is the size of the operands. For instance, a vector addition // is only executed in parallel if the size of both vector operands exceeds a certain threshold. // Otherwise, the performance could seriously decrease due to the overhead caused by the thread // setup. However, in order to be able to adjust the \b Blaze library to a specific system, it // is possible to configure these thresholds manually. All thresholds are contained within the // configuration file <tt><blaze/config/Thresholds.h></tt>. // // Please note that these thresholds are highly sensitiv to the used system architecture and // the shared memory parallelization technique. Therefore the default values cannot guarantee // maximum performance for all possible situations and configurations. They merely provide a // reasonable standard for the current CPU generation. Also note that the provided defaults // have been determined using the OpenMP parallelization and require individual adaption for // the C++11 thread parallelization. // // // \n \section cpp_threads_known_issues Known Issues // <hr> // // There is a known issue in Visual Studio 2012 and 2013 that may cause C++11 threads to hang // if their destructor is executed after the \c main() function: // // http://connect.microsoft.com/VisualStudio/feedback/details/747145 // // Unfortunately, the C++11 parallelization of the \b Blaze library is affected from this bug. // In order to circumvent this problem, \b Blaze provides the \c shutDownThreads() function, // which can be used to manually destroy all threads at the end of the \c main() function: \code int main() { // ... Using the C++11 thread parallelization of Blaze shutDownThreads(); } \endcode // Please note that this function may only be used at the end of the \c main() function. After // this function no further computation may be executed! Also note that this function has an // effect for Visual Studio compilers only and doesn't need to be used with any other compiler. // // \n Previous: \ref hpx_parallelization &nbsp; &nbsp; Next: \ref boost_threads_parallelization */ //************************************************************************************************* //**Boost Thread Parallelization******************************************************************* /*!\page boost_threads_parallelization Boost Thread Parallelization // // \tableofcontents // // // The third available shared memory parallelization provided with \b Blaze is based // on <a href="https://www.boost.org/doc/libs/1_68_0/doc/html/thread.html">Boost threads</a>. // // // \n \section boost_threads_setup Boost Thread Setup // <hr> // // In order to enable the Boost thread-based parallelization, two steps have to be taken: First, // the \c BLAZE_USE_BOOST_THREADS command line argument has to be explicitly specified during // compilation: \code ... -DBLAZE_USE_BOOST_THREADS ... \endcode // Second, the according Boost libraries have to be linked. These two simple actions will cause // the \b Blaze library to automatically try to run all operations in parallel with the specified // number of Boost threads. Note that the HPX-based and C++11 thread-based parallelizations have // priority, i.e. are preferred in case either is enabled in combination with the Boost thread // parallelization. // // The number of threads can be either specified via the environment variable \c BLAZE_NUM_THREADS \code export BLAZE_NUM_THREADS=4 // Unix systems set BLAZE_NUM_THREADS=4 // Windows systems \endcode // or alternatively via the \c setNumThreads() function provided by the \b Blaze library: \code blaze::setNumThreads( 4 ); \endcode // Please note that the \b Blaze library does not limit the available number of threads. Therefore // it is in YOUR responsibility to choose an appropriate number of threads. The best performance, // though, can be expected if the specified number of threads matches the available number of // cores. // // In order to query the number of threads used for the parallelization of operations, the // \c getNumThreads() function can be used: \code const size_t threads = blaze::getNumThreads(); \endcode // In the context of Boost threads, the function will return the previously specified number of // threads. // // // \n \section boost_threads_configuration Boost Thread Configuration // <hr> // // As in case of the other shared memory parallelizations \b Blaze is not unconditionally running // an operation in parallel (see \ref openmp_parallelization or \ref cpp_threads_parallelization). // All thresholds related to the Boost thread parallelization are also contained within the // configuration file <tt><blaze/config/Thresholds.h></tt>. // // Please note that these thresholds are highly sensitiv to the used system architecture and // the shared memory parallelization technique. Therefore the default values cannot guarantee // maximum performance for all possible situations and configurations. They merely provide a // reasonable standard for the current CPU generation. Also note that the provided defaults // have been determined using the OpenMP parallelization and require individual adaption for // the Boost thread parallelization. // // \n Previous: \ref cpp_threads_parallelization &nbsp; &nbsp; Next: \ref openmp_parallelization */ //************************************************************************************************* //**OpenMP Parallelization************************************************************************* /*!\page openmp_parallelization OpenMP Parallelization // // \tableofcontents // // // The fourth and final shared memory parallelization provided with \b Blaze is based on // <a href="https://www.openmp.org">OpenMP</a>. // // // \n \section openmp_setup OpenMP Setup // <hr> // // To enable the OpenMP-based parallelization, all that needs to be done is to explicitly specify // the use of OpenMP on the command line: \code -fopenmp // GNU/Clang C++ compiler -openmp // Intel C++ compiler /openmp // Visual Studio \endcode // This simple action will cause the \b Blaze library to automatically try to run all operations // in parallel with the specified number of threads. Note however that the HPX-based, the C++11 // thread-based, and the Boost thread-based parallelizations have priority, i.e. are preferred in // case either is enabled in combination with the OpenMP thread parallelization. // // As common for OpenMP, the number of threads can be specified either via an environment variable \code export OMP_NUM_THREADS=4 // Unix systems set OMP_NUM_THREADS=4 // Windows systems \endcode // or via an explicit call to the \c omp_set_num_threads() function: \code omp_set_num_threads( 4 ); \endcode // Alternatively, the number of threads can also be specified via the \c setNumThreads() function // provided by the \b Blaze library: \code blaze::setNumThreads( 4 ); \endcode // Please note that the \b Blaze library does not limit the available number of threads. Therefore // it is in YOUR responsibility to choose an appropriate number of threads. The best performance, // though, can be expected if the specified number of threads matches the available number of // cores. // // In order to query the number of threads used for the parallelization of operations, the // \c getNumThreads() function can be used: \code const size_t threads = blaze::getNumThreads(); \endcode // In the context of OpenMP, the function returns the maximum number of threads OpenMP will use // within a parallel region and is therefore equivalent to the \c omp_get_max_threads() function. // // // \n \section openmp_configuration OpenMP Configuration // <hr> // // Note that \b Blaze is not unconditionally running an operation in parallel. In case \b Blaze // deems the parallel execution as counterproductive for the overall performance, the operation // is executed serially. One of the main reasons for not executing an operation in parallel is // the size of the operands. For instance, a vector addition is only executed in parallel if the // size of both vector operands exceeds a certain threshold. Otherwise, the performance could // seriously decrease due to the overhead caused by the thread setup. However, in order to be // able to adjust the \b Blaze library to a specific system, it is possible to configure these // thresholds manually. All shared memory thresholds are contained within the configuration file // <tt><blaze/config/Thresholds.h></tt>. // // Please note that these thresholds are highly sensitiv to the used system architecture and // the shared memory parallelization technique (see also \ref cpp_threads_parallelization and // \ref boost_threads_parallelization). Therefore the default values cannot guarantee maximum // performance for all possible situations and configurations. They merely provide a reasonable // standard for the current CPU generation. // // // \n \section openmp_first_touch First Touch Policy // <hr> // // So far the \b Blaze library does not (yet) automatically initialize dynamic memory according // to the first touch principle. Consider for instance the following vector triad example: \code using blaze::columnVector; const size_t N( 1000000UL ); blaze::DynamicVector<double,columnVector> a( N ), b( N ), c( N ), d( N ); // Initialization of the vectors b, c, and d for( size_t i=0UL; i<N; ++i ) { b[i] = rand<double>(); c[i] = rand<double>(); d[i] = rand<double>(); } // Performing a vector triad a = b + c * d; \endcode // If this code, which is prototypical for many OpenMP applications that have not been optimized // for ccNUMA architectures, is run across several locality domains (LD), it will not scale // beyond the maximum performance achievable on a single LD if the working set does not fit into // the cache. This is because the initialization loop is executed by a single thread, writing to // \c b, \c c, and \c d for the first time. Hence, all memory pages belonging to those arrays will // be mapped into a single LD. // // As mentioned above, this problem can be solved by performing vector initialization in parallel: \code // ... // Initialization of the vectors b, c, and d #pragma omp parallel for for( size_t i=0UL; i<N; ++i ) { b[i] = rand<double>(); c[i] = rand<double>(); d[i] = rand<double>(); } // ... \endcode // This simple modification makes a huge difference on ccNUMA in memory-bound situations (as for // instance in all BLAS level 1 operations and partially BLAS level 2 operations). Therefore, in // order to achieve the maximum possible performance, it is imperative to initialize the memory // according to the later use of the data structures. // // // \n \section openmp_limitations Limitations of the OpenMP Parallelization // <hr> // // There are a few important limitations to the current \b Blaze OpenMP parallelization. The first // one involves the explicit use of an OpenMP parallel region (see \ref openmp_parallel), the // other one the OpenMP \c sections directive (see \ref openmp_sections). // // // \n \subsection openmp_parallel The Parallel Directive // // In OpenMP threads are explicitly spawned via the an OpenMP parallel directive: \code // Serial region, executed by a single thread #pragma omp parallel { // Parallel region, executed by the specified number of threads } // Serial region, executed by a single thread \endcode // Conceptually, the specified number of threads (see \ref openmp_setup) is created every time a // parallel directive is encountered. Therefore, from a performance point of view, it seems to be // beneficial to use a single OpenMP parallel directive for several operations: \code blaze::DynamicVector<double> x, y1, y2; blaze::DynamicMatrix<double> A, B; #pragma omp parallel { y1 = A * x; y2 = B * x; } \endcode // Unfortunately, this optimization approach is not allowed within the \b Blaze library. More // explicitly, it is not allowed to put an operation into a parallel region. The reason is that // the entire code contained within a parallel region is executed by all threads. Although this // appears to just comprise the contained computations, a computation (or more specifically the // assignment of an expression to a vector or matrix) can contain additional logic that must not // be handled by multiple threads (as for instance memory allocations, setup of temporaries, etc.). // Therefore it is not possible to manually start a parallel region for several operations, but // \b Blaze will spawn threads automatically, depending on the specifics of the operation at hand // and the given operands. // // \n \subsection openmp_sections The Sections Directive // // OpenMP provides several work-sharing construct to distribute work among threads. One of these // constructs is the \c sections directive: \code blaze::DynamicVector<double> x, y1, y2; blaze::DynamicMatrix<double> A, B; // ... Resizing and initialization #pragma omp sections { #pragma omp section y1 = A * x; #pragma omp section y2 = B * x; } \endcode // In this example, two threads are used to compute two distinct matrix/vector multiplications // concurrently. Thereby each of the \c sections is executed by exactly one thread. // // Unfortunately \b Blaze does not support concurrent parallel computations and therefore this // approach does not work with any of the \b Blaze parallelization techniques. All techniques // (including the C++11 and Boost thread parallelizations; see \ref cpp_threads_parallelization // and \ref boost_threads_parallelization) are optimized for the parallel computation of an // operation within a single thread of execution. This means that \b Blaze tries to use all // available threads to compute the result of a single operation as efficiently as possible. // Therefore, for this special case, it is advisable to disable all \b Blaze parallelizations // and to let \b Blaze compute all operations within a \c sections directive in serial. This can // be done by either completely disabling the \b Blaze parallelization (see \ref serial_execution) // or by selectively serializing all operations within a \c sections directive via the \c serial() // function: \code blaze::DynamicVector<double> x, y1, y2; blaze::DynamicMatrix<double> A, B; // ... Resizing and initialization #pragma omp sections { #pragma omp section y1 = serial( A * x ); #pragma omp section y2 = serial( B * x ); } \endcode // Please note that the use of the \c BLAZE_SERIAL_SECTION (see also \ref serial_execution) does // NOT work in this context! // // \n Previous: \ref boost_threads_parallelization &nbsp; &nbsp; Next: \ref serial_execution */ //************************************************************************************************* //**Serial Execution******************************************************************************* /*!\page serial_execution Serial Execution // // Sometimes it may be necessary to enforce the serial execution of specific operations. For this // purpose, the \b Blaze library offers three possible options: the serialization of a single // expression via the \c serial() function, the serialization of a block of expressions via the // \c BLAZE_SERIAL_SECTION, and the general deactivation of the parallel execution. // // // \n \section serial_execution_serial_expression Option 1: Serialization of a Single Expression // <hr> // // The first option is the serialization of a specific operation via the \c serial() function: \code blaze::DynamicMatrix<double> A, B, C; // ... Resizing and initialization C = serial( A + B ); \endcode // \c serial() enforces the serial evaluation of the enclosed expression. It can be used on any // kind of dense or sparse vector or matrix expression. // // // \n \section serial_execution_serial_section Option 2: Serialization of Multiple Expressions // <hr> // // The second option is the temporary and local enforcement of a serial execution via the // \c BLAZE_SERIAL_SECTION: \code using blaze::rowMajor; using blaze::columnVector; blaze::DynamicMatrix<double,rowMajor> A; blaze::DynamicVector<double,columnVector> b, c, d, x, y, z; // ... Resizing and initialization // Parallel execution // If possible and beneficial for performance the following operation is executed in parallel. x = A * b; // Serial execution // All operations executed within the serial section are guaranteed to be executed in // serial (even if a parallel execution would be possible and/or beneficial). BLAZE_SERIAL_SECTION { y = A * c; z = A * d; } // Parallel execution continued // ... \endcode // Within the scope of the \c BLAZE_SERIAL_SECTION, all operations are guaranteed to run in serial. // Outside the scope of the serial section, all operations are run in parallel (if beneficial for // the performance). // // Note that the \c BLAZE_SERIAL_SECTION must only be used within a single thread of execution. // The use of the serial section within several concurrent threads will result undefined behavior! // // // \n \section serial_execution_deactivate_parallelism Option 3: Deactivation of Parallel Execution // <hr> // // The third option is the general deactivation of the parallel execution (even in case OpenMP is // enabled on the command line). This can be achieved via the \c BLAZE_USE_SHARED_MEMORY_PARALLELIZATION // switch in the <tt>./blaze/config/SMP.h</tt> configuration file: \code #define BLAZE_USE_SHARED_MEMORY_PARALLELIZATION 1 \endcode // In case the \c BLAZE_USE_SHARED_MEMORY_PARALLELIZATION switch is set to 0, the shared memory // parallelization is deactivated altogether. // // \n Previous: \ref openmp_parallelization &nbsp; &nbsp; Next: \ref serialization */ //************************************************************************************************* //**Serialization********************************************************************************** /*!\page serialization Serialization // // Sometimes it is necessary to store vector and/or matrices on disk, for instance for storing // results or for sharing specific setups with other people. The \b Blaze math serialization // module provides the according functionality to create platform independent, portable, binary // representations of vectors and matrices that can be used to store the \b Blaze data structures // without loss of precision and to reliably transfer them from one machine to another. // // The following two pages explain how to serialize vectors and matrices: // // - \ref vector_serialization // - \ref matrix_serialization // // \n Previous: \ref serial_execution &nbsp; &nbsp; Next: \ref vector_serialization */ //************************************************************************************************* //**Vector Serialization*************************************************************************** /*!\page vector_serialization Vector Serialization // // The following example demonstrates the (de-)serialization of dense and sparse vectors: \code using blaze::columnVector; using blaze::rowVector; // Serialization of both vectors { blaze::StaticVector<double,5UL,rowVector> d; blaze::CompressedVector<int,columnVector> s; // ... Resizing and initialization // Creating an archive that writes into a the file "vectors.blaze" blaze::Archive<std::ofstream> archive( "vectors.blaze" ); // Serialization of both vectors into the same archive. Note that d lies before s! archive << d << s; } // Reconstitution of both vectors { blaze::DynamicVector<double,rowVector> d1; blaze::DynamicVector<int,rowVector> d2; // Creating an archive that reads from the file "vectors.blaze" blaze::Archive<std::ifstream> archive( "vectors.blaze" ); // Reconstituting the former d vector into d1. Note that it is possible to reconstitute // the vector into a differrent kind of vector (StaticVector -> DynamicVector), but that // the type of elements has to be the same. archive >> d1; // Reconstituting the former s vector into d2. Note that is is even possible to reconstitute // a sparse vector as a dense vector (also the reverse is possible) and that a column vector // can be reconstituted as row vector (and vice versa). Note however that also in this case // the type of elements is the same! archive >> d2 } \endcode // The (de-)serialization of vectors is not restricted to vectors of built-in data type, but can // also be used for vectors with vector or matrix element type: \code // Serialization { blaze::CompressedVector< blaze::DynamicVector< blaze::complex<double> > > vec; // ... Resizing and initialization // Creating an archive that writes into a the file "vector.blaze" blaze::Archive<std::ofstream> archive( "vector.blaze" ); // Serialization of the vector into the archive archive << vec; } // Deserialization { blaze::CompressedVector< blaze::DynamicVector< blaze::complex<double> > > vec; // Creating an archive that reads from the file "vector.blaze" blaze::Archive<std::ifstream> archive( "vector.blaze" ); // Reconstitution of the vector from the archive archive >> vec; } \endcode // As the examples demonstrates, the vector serialization offers an enormous flexibility. However, // several actions result in errors: // // - vectors cannot be reconstituted as matrices (and vice versa) // - the element type of the serialized and reconstituted vector must match, which means // that on the source and destination platform the general type (signed/unsigned integral // or floating point) and the size of the type must be exactly the same // - when reconstituting a \c StaticVector, its size must match the size of the serialized vector // // In case an error is encountered during (de-)serialization, a \c std::runtime_exception is // thrown. // // \n Previous: \ref serialization &nbsp; &nbsp; Next: \ref matrix_serialization */ //************************************************************************************************* //**Matrix Serialization*************************************************************************** /*!\page matrix_serialization Matrix Serialization // // The serialization of matrices works in the same manner as the serialization of vectors. The // following example demonstrates the (de-)serialization of dense and sparse matrices: \code using blaze::rowMajor; using blaze::columnMajor; // Serialization of both matrices { blaze::StaticMatrix<double,3UL,5UL,rowMajor> D; blaze::CompressedMatrix<int,columnMajor> S; // ... Resizing and initialization // Creating an archive that writes into a the file "matrices.blaze" blaze::Archive<std::ofstream> archive( "matrices.blaze" ); // Serialization of both matrices into the same archive. Note that D lies before S! archive << D << S; } // Reconstitution of both matrices { blaze::DynamicMatrix<double,rowMajor> D1; blaze::DynamicMatrix<int,rowMajor> D2; // Creating an archive that reads from the file "matrices.blaze" blaze::Archive<std::ifstream> archive( "matrices.blaze" ); // Reconstituting the former D matrix into D1. Note that it is possible to reconstitute // the matrix into a differrent kind of matrix (StaticMatrix -> DynamicMatrix), but that // the type of elements has to be the same. archive >> D1; // Reconstituting the former S matrix into D2. Note that is is even possible to reconstitute // a sparse matrix as a dense matrix (also the reverse is possible) and that a column-major // matrix can be reconstituted as row-major matrix (and vice versa). Note however that also // in this case the type of elements is the same! archive >> D2 } \endcode // Note that also in case of matrices it is possible to (de-)serialize matrices with vector or // matrix elements: \code // Serialization { blaze::CompressedMatrix< blaze::DynamicMatrix< blaze::complex<double> > > mat; // ... Resizing and initialization // Creating an archive that writes into a the file "matrix.blaze" blaze::Archive<std::ofstream> archive( "matrix.blaze" ); // Serialization of the matrix into the archive archive << mat; } // Deserialization { blaze::CompressedMatrix< blaze::DynamicMatrix< blaze::complex<double> > > mat; // Creating an archive that reads from the file "matrix.blaze" blaze::Archive<std::ifstream> archive( "matrix.blaze" ); // Reconstitution of the matrix from the archive archive >> mat; } \endcode // Note that just as the vector serialization, the matrix serialization is restricted by a // few important rules: // // - matrices cannot be reconstituted as vectors (and vice versa) // - the element type of the serialized and reconstituted matrix must match, which means // that on the source and destination platform the general type (signed/unsigned integral // or floating point) and the size of the type must be exactly the same // - when reconstituting a \c StaticMatrix, the number of rows and columns must match those // of the serialized matrix // // In case an error is encountered during (de-)serialization, a \c std::runtime_exception is // thrown. // // \n Previous: \ref vector_serialization &nbsp; &nbsp; Next: \ref customization \n */ //************************************************************************************************* //**Customization********************************************************************************** /*!\page customization Customization // // Although \b Blaze tries to work out of the box for every possible setting, still it may be // necessary to adapt the library to specific requirements. The following three pages explain // how to customize the \b Blaze library to your own needs: // // - \ref configuration_files // - \ref vector_and_matrix_customization // - \ref error_reporting_customization // // \n Previous: \ref matrix_serialization &nbsp; &nbsp; Next: \ref configuration_files */ //************************************************************************************************* //**Configuration Files**************************************************************************** /*!\page configuration_files Configuration Files // // \tableofcontents // // // Sometimes it is necessary to adapt \b Blaze to specific requirements. For this purpose // \b Blaze provides several configuration files in the <tt>./blaze/config/</tt> subdirectory, // which provide ample opportunity to customize internal settings, behavior, and thresholds. // This chapter explains the most important of these configuration files. For a complete // overview of all customization opportunities, please go to the configuration files in the // <tt>./blaze/config/</tt> subdirectory or see the complete \b Blaze documentation. // // // \n \section transpose_flag Default Vector Storage // <hr> // // The \b Blaze default is that all vectors are created as column vectors (if not specified // explicitly): \code blaze::StaticVector<double,3UL> x; // Creates a 3-dimensional static column vector \endcode // The header file <tt>./blaze/config/TransposeFlag.h</tt> allows the configuration of the default // vector storage (i.e. the default transpose flag) of all vectors within the \b Blaze library. // The default transpose flag is specified via the \c BLAZE_DEFAULT_TRANSPOSE_FLAG macro: \code #define BLAZE_DEFAULT_TRANSPOSE_FLAG blaze::columnVector \endcode // Alternatively the default transpose flag can be specified via command line or by defining this // symbol manually before including any \b Blaze header file: \code #define BLAZE_DEFAULT_TRANSPOSE_FLAG blaze::columnVector #include <blaze/Blaze.h> \endcode // Valid settings for \c BLAZE_DEFAULT_TRANSPOSE_FLAG are blaze::rowVector and blaze::columnVector. // // // \n \section storage_order Default Matrix Storage // <hr> // // Matrices are by default created as row-major matrices: \code blaze::StaticMatrix<double,3UL,3UL> A; // Creates a 3x3 row-major matrix \endcode // The header file <tt>./blaze/config/StorageOrder.h</tt> allows the configuration of the default // matrix storage order. Via the \c BLAZE_DEFAULT_STORAGE_ORDER macro the default storage order // for all matrices of the \b Blaze library can be specified. \code #define BLAZE_DEFAULT_STORAGE_ORDER blaze::rowMajor \endcode // Alternatively the default storage order can be specified via command line or by defining this // symbol manually before including any \b Blaze header file: \code #define BLAZE_DEFAULT_STORAGE_ORDER blaze::rowMajor #include <blaze/Blaze.h> \endcode // Valid settings for \c BLAZE_DEFAULT_STORAGE_ORDER are blaze::rowMajor and blaze::columnMajor. // // // \n \section blas_mode BLAS Mode // <hr> // // In order to achieve maximum performance for multiplications with dense matrices, \b Blaze can // be configured to use a BLAS library. Via the following compilation switch in the configuration // file <tt>./blaze/config/BLAS.h</tt> BLAS can be enabled: \code #define BLAZE_BLAS_MODE 1 \endcode // In case the selected BLAS library provides parallel execution, the \c BLAZE_BLAS_IS_PARALLEL // switch should be activated to prevent \b Blaze from parallelizing on its own: \code #define BLAZE_BLAS_IS_PARALLEL 1 \endcode // Alternatively, both settings can be specified via command line or by defining the symbols // manually before including any \b Blaze header file: \code #define BLAZE_BLAS_MODE 1 #define BLAZE_BLAS_IS_PARALLEL 1 #include <blaze/Blaze.h> \endcode // In case no BLAS library is available, \b Blaze will still work and will not be reduced in // functionality, but performance may be limited. // // // \n \section cache_size Cache Size // <hr> // // The optimization of several \b Blaze compute kernels depends on the cache size of the target // architecture. By default, \b Blaze assumes a cache size of 3 MiByte. However, for optimal // speed the exact cache size of the system should be provided via the \c cacheSize value in the // <tt>./blaze/config/CacheSize.h</tt> configuration file: \code #define BLAZE_CACHE_SIZE 3145728UL; \endcode // The cache size can also be specified via command line or by defining this symbol manually // before including any \b Blaze header file: \code #define BLAZE_CACHE_SIZE 3145728UL #include <blaze/Blaze.h> \endcode // \n \section vectorization Vectorization // <hr> // // In order to achieve maximum performance and to exploit the compute power of a target platform // the \b Blaze library attempts to vectorize all linear algebra operations by SSE, AVX, and/or // AVX-512 intrinsics, depending on which instruction set is available. However, it is possible // to disable the vectorization entirely by the compile time switch in the configuration file // <tt>./blaze/config/Vectorization.h</tt>: \code #define BLAZE_USE_VECTORIZATION 1 \endcode // It is also possible to (de-)activate vectorization via command line or by defining this symbol // manually before including any \b Blaze header file: \code #define BLAZE_USE_VECTORIZATION 1 #include <blaze/Blaze.h> \endcode // In case the switch is set to 1, vectorization is enabled and the \b Blaze library is allowed // to use intrinsics to speed up computations. In case the switch is set to 0, vectorization is // disabled entirely and the \b Blaze library chooses default, non-vectorized functionality for // the operations. Note that deactivating the vectorization may pose a severe performance // limitation for a large number of operations! // // // \n \section thresholds Thresholds // <hr> // // For many computations \b Blaze distinguishes between small and large vectors and matrices. // This separation is especially important for the parallel execution of computations, since // the use of several threads only pays off for sufficiently large vectors and matrices. // Additionally, it also enables \b Blaze to select kernels that are optimized for a specific // size. // // In order to distinguish between small and large data structures \b Blaze provides several // thresholds that can be adapted to the characteristics of the target platform. For instance, // the \c DMATDVECMULT_THRESHOLD specifies the threshold between the application of the custom // \b Blaze kernels for small dense matrix/dense vector multiplications and the BLAS kernels // for large multiplications. All thresholds, including the thresholds for the OpenMP- and // thread-based parallelization, are contained within the configuration file // <tt><blaze/config/Thresholds.h></tt>. // // // \n \section padding Padding // <hr> // // By default the \b Blaze library uses padding for all dense vectors and matrices in order to // achieve maximum performance in all operations. Due to padding, the proper alignment of data // elements can be guaranteed and the need for remainder loops is minimized. However, on the // downside padding introduces an additional memory overhead, which can be large depending on // the used data type. // // The configuration file <tt>./blaze/config/Optimizations.h</tt> provides a compile time switch // that can be used to (de-)activate padding: \code #define BLAZE_USE_PADDING 1 \endcode // Alternatively it is possible to (de-)activate padding via command line or by defining this // symbol manually before including any \b Blaze header file: \code #define BLAZE_USE_PADDING 1 #include <blaze/Blaze.h> \endcode // If \c BLAZE_USE_PADDING is set to 1 padding is enabled for all dense vectors and matrices, if // it is set to 0 padding is disabled. Note however that disabling padding can considerably reduce // the performance of all dense vector and matrix operations! // // // \n \section streaming Streaming (Non-Temporal Stores) // <hr> // // For vectors and matrices that don't fit into the cache anymore non-temporal stores can provide // a significant performance advantage of about 20%. However, this advantage is only in effect in // case the memory bandwidth of the target architecture is maxed out. If the target architecture's // memory bandwidth cannot be exhausted the use of non-temporal stores can decrease performance // instead of increasing it. // // The configuration file <tt>./blaze/config/Optimizations.h</tt> provides a compile time switch // that can be used to (de-)activate streaming: \code #define BLAZE_USE_STREAMING 1 \endcode // Alternatively streaming can be (de-)activated via command line or by defining this symbol // manually before including any \b Blaze header file: \code #define BLAZE_USE_STREAMING 1 #include <blaze/Blaze.h> \endcode // If \c BLAZE_USE_STREAMING is set to 1 streaming is enabled, if it is set to 0 streaming is // disabled. It is recommended to consult the target architecture's white papers to decide whether // streaming is beneficial or hurtful for performance. // // // \n Previous: \ref customization &nbsp; &nbsp; Next: \ref vector_and_matrix_customization \n */ //************************************************************************************************* //**Customization of Vectors and Matrices********************************************************** /*!\page vector_and_matrix_customization Customization of Vectors and Matrices // // \tableofcontents // // // \n \section custom_data_members Custom Data Members // <hr> // // So far the \b Blaze library does not provide a lot of flexibility to customize the data // members of existing \ref vector_types and \ref matrix_types. However, to some extend it is // possible to customize vectors and matrices by inheritance. The following example gives an // impression on how to create a simple variation of \ref matrix_types_custom_matrix, which // automatically takes care of acquiring and releasing custom memory. \code template< typename Type // Data type of the matrix , bool SO = defaultStorageOrder > // Storage order class MyCustomMatrix : public CustomMatrix< Type, unaligned, unpadded, SO > { public: explicit inline MyCustomMatrix( size_t m, size_t n ) : CustomMatrix<Type,unaligned,unpadded,SO>() , array_( new Type[m*n] ) { this->reset( array_.get(), m, n ); } private: std::unique_ptr<Type[]> array_; }; \endcode // Please note that this is a simplified example with the intent to show the general approach. // The number of constructors, the memory acquisition, and the kind of memory management can of // course be adapted to specific requirements. Also, please note that since none of the \b Blaze // vectors and matrices have virtual destructors polymorphic destruction cannot be used. // // // \n \section custom_operations Custom Operations // <hr> // // There are two approaches to extend \b Blaze with custom operations. First, the \c map() // functions provide the possibility to execute componentwise custom operations on vectors and // matrices. Second, it is possible to add customized free functions. // // \n \subsection custom_operations_map The map() Functions // // Via the unary and binary \c map() functions it is possible to execute componentwise custom // operations on vectors and matrices. The unary \c map() function can be used to apply a custom // operation on each single element of a dense vector or matrix or each non-zero element of a // sparse vector or matrix. For instance, the following example demonstrates a custom square // root computation on a dense matrix: \code blaze::DynamicMatrix<double> A, B; B = map( A, []( double d ) { return std::sqrt( d ); } ); \endcode // The binary \c map() function can be used to apply an operation pairwise to the elements of // two dense vectors or two dense matrices. The following example demonstrates the merging of // two matrices of double precision values into a matrix of double precision complex numbers: \code blaze::DynamicMatrix<double> real{ { 2.1, -4.2 }, { 1.0, 0.6 } }; blaze::DynamicMatrix<double> imag{ { 0.3, 1.4 }, { 2.9, -3.4 } }; blaze::DynamicMatrix< complex<double> > cplx; // Creating the matrix // ( (-2.1, 0.3) (-4.2, -1.4) ) // ( ( 1.0, 2.9) ( 0.6, -3.4) ) cplx = map( real, imag, []( double r, double i ){ return complex( r, i ); } ); \endcode // These examples demonstrate the most convenient way of defining a unary custom operation by // passing a lambda to the \c map() function. Alternatively, it is possible to pass a custom // functor: \code struct Sqrt { double operator()( double a ) const { return std::sqrt( a ); } }; B = map( A, Sqrt() ); \endcode // In order for the functor to work in a call to \c map() it must define a function call operator, // which accepts arguments of the type of the according vector or matrix elements. // // Although the operation is automatically parallelized depending on the size of the vector or // matrix, no automatic vectorization is possible. In order to enable vectorization, a \c load() // function can be added to the functor, which handles the vectorized computation. Depending on // the data type this function is passed one of the following \b Blaze SIMD data types: // // <ul> // <li>SIMD data types for fundamental data types // <ul> // <li>\c blaze::SIMDint8: Packed SIMD type for 8-bit signed integral data types</li> // <li>\c blaze::SIMDuint8: Packed SIMD type for 8-bit unsigned integral data types</li> // <li>\c blaze::SIMDint16: Packed SIMD type for 16-bit signed integral data types</li> // <li>\c blaze::SIMDuint16: Packed SIMD type for 16-bit unsigned integral data types</li> // <li>\c blaze::SIMDint32: Packed SIMD type for 32-bit signed integral data types</li> // <li>\c blaze::SIMDuint32: Packed SIMD type for 32-bit unsigned integral data types</li> // <li>\c blaze::SIMDint64: Packed SIMD type for 64-bit signed integral data types</li> // <li>\c blaze::SIMDuint64: Packed SIMD type for 64-bit unsigned integral data types</li> // <li>\c blaze::SIMDfloat: Packed SIMD type for single precision floating point data</li> // <li>\c blaze::SIMDdouble: Packed SIMD type for double precision floating point data</li> // </ul> // </li> // <li>SIMD data types for complex data types // <ul> // <li>\c blaze::SIMDcint8: Packed SIMD type for complex 8-bit signed integral data types</li> // <li>\c blaze::SIMDcuint8: Packed SIMD type for complex 8-bit unsigned integral data types</li> // <li>\c blaze::SIMDcint16: Packed SIMD type for complex 16-bit signed integral data types</li> // <li>\c blaze::SIMDcuint16: Packed SIMD type for complex 16-bit unsigned integral data types</li> // <li>\c blaze::SIMDcint32: Packed SIMD type for complex 32-bit signed integral data types</li> // <li>\c blaze::SIMDcuint32: Packed SIMD type for complex 32-bit unsigned integral data types</li> // <li>\c blaze::SIMDcint64: Packed SIMD type for complex 64-bit signed integral data types</li> // <li>\c blaze::SIMDcuint64: Packed SIMD type for complex 64-bit unsigned integral data types</li> // <li>\c blaze::SIMDcfloat: Packed SIMD type for complex single precision floating point data</li> // <li>\c blaze::SIMDcdouble: Packed SIMD type for complex double precision floating point data</li> // </ul> // </li> // </ul> // // All SIMD types provide the \c value data member for a direct access to the underlying intrinsic // data element. In the following example, this intrinsic element is passed to the AVX function // \c _mm256_sqrt_pd(): \code struct Sqrt { double operator()( double a ) const { return std::sqrt( a ); } SIMDdouble load( const SIMDdouble& a ) const { return _mm256_sqrt_pd( a.value ); } }; \endcode // In this example, whenever vectorization is generally applicable, the \c load() function is // called instead of the function call operator for as long as the number of remaining elements // is larger-or-equal to the width of the packed SIMD type. In all other cases (which also // includes peel-off and remainder loops) the scalar operation is used. // // Please note that this example has two drawbacks: First, it will only compile in case the // intrinsic \c _mm256_sqrt_pd() function is available (i.e. when AVX is active). Second, the // availability of AVX is not taken into account. The first drawback can be alleviated by making // the \c load() function a function template. The second drawback can be dealt with by adding a // \c simdEnabled() function template to the functor: \code struct Sqrt { double operator()( double a ) const { return std::sqrt( a ); } template< typename T > T load( const T& a ) const { return _mm256_sqrt_pd( a.value ); } template< typename T > static constexpr bool simdEnabled() { #if defined(__AVX__) return true; #else return false; #endif } }; \endcode // The \c simdEnabled() function must be a \c static, \c constexpr function and must return whether // or not vectorization is available for the given data type \c T. In case the function returns // \c true, the \c load() function is used for a vectorized evaluation, in case the function // returns \c false, \c load() is not called. // // Note that this is a simplified example that is only working when used for dense vectors and // matrices with double precision floating point elements. The following code shows the complete // implementation of the according functor that is used within the \b Blaze library. The \b Blaze // \c Sqrt functor is working for all data types that are providing a square root operation: \code namespace blaze { struct Sqrt { template< typename T > BLAZE_ALWAYS_INLINE auto operator()( const T& a ) const { return sqrt( a ); } template< typename T > static constexpr bool simdEnabled() { return HasSIMDSqrt<T>::value; } template< typename T > BLAZE_ALWAYS_INLINE auto load( const T& a ) const { BLAZE_CONSTRAINT_MUST_BE_SIMD_PACK( T ); return sqrt( a ); } }; } // namespace blaze \endcode // The same approach can be taken for binary custom operations. The following code demonstrates // the \c Min functor of the \b Blaze library, which is working for all data types that provide // a \c min() operation: \code struct Min { explicit inline Min() {} template< typename T1, typename T2 > BLAZE_ALWAYS_INLINE decltype(auto) operator()( const T1& a, const T2& b ) const { return min( a, b ); } template< typename T1, typename T2 > static constexpr bool simdEnabled() { return HasSIMDMin<T1,T2>::value; } template< typename T1, typename T2 > BLAZE_ALWAYS_INLINE decltype(auto) load( const T1& a, const T2& b ) const { BLAZE_CONSTRAINT_MUST_BE_SIMD_PACK( T1 ); BLAZE_CONSTRAINT_MUST_BE_SIMD_PACK( T2 ); return min( a, b ); } }; \endcode // For more information on the available \b Blaze SIMD data types and functions, please see the // SIMD module in the complete \b Blaze documentation. // // \n \subsection custom_operations_free_functions Free Functions // // In order to extend \b Blaze with new functionality it is possible to add free functions. Free // functions can be used either as wrappers around calls to the map() function or to implement // general, non-componentwise operations. The following two examples will demonstrate both ideas. // // The first example shows the \c setToZero() function, which resets a sparse matrix to zero // without affecting the sparsity pattern. It is implemented as a convenience wrapper around // the map() function: \code template< typename MT // Type of the sparse matrix , bool SO > // Storage order void setToZero( blaze::SparseMatrix<MT,SO>& mat ) { (~mat) = blaze::map( ~mat, []( int ){ return 0; } ); } \endcode // The blaze::SparseMatrix class template is the base class for all kinds of sparse matrices and // provides an abstraction from the actual type \c MT of the sparse matrix. However, due to the // <a href="https://en.wikipedia.org/wiki/Curiously_recurring_template_pattern">Curiously Recurring Template Pattern (CRTP)</a> // it also enables a conversion back to the actual type. This downcast is performed via the tilde // operator (i.e. \c operator~()). The template parameter \c SO represents the storage order // (blaze::rowMajor or blaze::columnMajor) of the matrix. // // The second example shows the \c countZeros() function, which counts the number of values, which // are exactly zero, in a dense, row-major matrix: \code template< typename MT > size_t countZeros( blaze::DenseMatrix<MT,rowMajor>& mat ) { const size_t M( (~mat).rows() ); const size_t N( (~mat).columns() ); size_t count( 0UL ); for( size_t i=0UL; i<M; ++i ) { for( size_t j=0UL; j<N; ++j ) { if( blaze::isDefault<strict>( (~mat)(i,j) ) ) ++count; } } return count; } \endcode // The blaze::DenseMatrix class template is the base class for all kinds of dense matrices. Again, // it is possible to perform the conversion to the actual type via the tilde operator. // // The following two listings show the declarations of all vector and matrix base classes, which // can be used for custom free functions: \code template< typename VT // Concrete type of the dense or sparse vector , bool TF > // Transpose flag (blaze::columnVector or blaze::rowVector) class Vector; template< typename VT // Concrete type of the dense vector , bool TF > // Transpose flag (blaze::columnVector or blaze::rowVector) class DenseVector; template< typename VT // Concrete type of the sparse vector , bool TF > // Transpose flag (blaze::columnVector or blaze::rowVector) class SparseVector; \endcode \code template< typename MT // Concrete type of the dense or sparse matrix , bool SO > // Storage order (blaze::rowMajor or blaze::columnMajor) class Matrix; template< typename MT // Concrete type of the dense matrix , bool SO > // Storage order (blaze::rowMajor or blaze::columnMajor) class DenseMatrix; template< typename MT // Concrete type of the sparse matrix , bool SO > // Storage order (blaze::rowMajor or blaze::columnMajor) class SparseMatrix; \endcode // \n \section custom_data_types Custom Data Types // <hr> // // The \b Blaze library tries hard to make the use of custom data types as convenient, easy and // intuitive as possible. However, unfortunately it is not possible to meet the requirements of // all possible data types. Thus it might be necessary to provide \b Blaze with some additional // information about the data type. The following sections give an overview of the necessary steps // to enable the use of the hypothetical custom data type \c custom::double_t for vector and // matrix operations. For example: \code blaze::DynamicVector<custom::double_t> a, b, c; // ... Resizing and initialization c = a + b; \endcode // The \b Blaze library assumes that the \c custom::double_t data type provides \c operator+() // for additions, \c operator-() for subtractions, \c operator*() for multiplications and // \c operator/() for divisions. If any of these functions is missing it is necessary to implement // the operator to perform the according operation. For this example we assume that the custom // data type provides the four following functions instead of operators: \code namespace custom { double_t add ( const double_t& a, const double_t b ); double_t sub ( const double_t& a, const double_t b ); double_t mult( const double_t& a, const double_t b ); double_t div ( const double_t& a, const double_t b ); } // namespace custom \endcode // The following implementations will satisfy the requirements of the \b Blaze library: \code inline custom::double_t operator+( const custom::double_t& a, const custom::double_t& b ) { return add( a, b ); } inline custom::double_t operator-( const custom::double_t& a, const custom::double_t& b ) { return sub( a, b ); } inline custom::double_t operator*( const custom::double_t& a, const custom::double_t& b ) { return mult( a, b ); } inline custom::double_t operator/( const custom::double_t& a, const custom::double_t& b ) { return div( a, b ); } \endcode // \b Blaze will use all the information provided with these functions (for instance the return // type) to properly handle the operations. In the rare case that the return type cannot be // automatically determined from the operator it might be additionally necessary to provide a // specialization of the following four \b Blaze class templates: \code namespace blaze { template<> struct AddTrait<custom::double_t,custom::double_t> { using Type = custom::double_t; }; template<> struct SubTrait<custom::double_t,custom::double_t> { using Type = custom::double_t; }; template<> struct MultTrait<custom::double_t,custom::double_t> { using Type = custom::double_t; }; template<> struct DivTrait<custom::double_t,custom::double_t> { using Type = custom::double_t; }; } // namespace blaze \endcode // The same steps are necessary if several custom data types need to be combined (as for instance // \c custom::double_t and \c custom::float_t). Note that in this case both permutations need to // be taken into account: \code custom::double_t operator+( const custom::double_t& a, const custom::float_t& b ); custom::double_t operator+( const custom::float_t& a, const custom::double_t& b ); // ... \endcode // Please note that only built-in data types apply for vectorization and thus custom data types // cannot achieve maximum performance! // // // \n Previous: \ref configuration_files &nbsp; &nbsp; Next: \ref custom_operations \n */ //************************************************************************************************* //**Customization of the Error Reporting Mechanism************************************************* /*!\page error_reporting_customization Customization of the Error Reporting Mechanism // // \tableofcontents // // // \n \section error_reporting_background Background // <hr> // // The default way of \b Blaze to report errors of any kind is to throw a standard exception. // However, although in general this approach works well, in certain environments and under // special circumstances exceptions may not be the mechanism of choice and a different error // reporting mechanism may be desirable. For this reason, \b Blaze provides several macros, // which enable the customization of the error reporting mechanism. Via these macros it is // possible to replace the standard exceptions by some other exception type or a completely // different approach to report errors. // // // \n \section error_reporting_general_customization Customization of the Reporting Mechanism // <hr> // // In some cases it might be necessary to adapt the entire error reporting mechanism and to // replace it by some other means to signal failure. The primary macro for this purpose is the // \c BLAZE_THROW macro: \code #define BLAZE_THROW( EXCEPTION ) \ throw EXCEPTION \endcode // This macro represents the default mechanism of the \b Blaze library to report errors of any // kind. In order to customize the error reporing mechanism all that needs to be done is to // define the macro prior to including any \b Blaze header file. This will cause the \b Blaze // specific mechanism to be overridden. The following example demonstrates this by replacing // exceptions by a call to a \c log() function and a direct call to abort: \code #define BLAZE_THROW( EXCEPTION ) \ log( "..." ); \ abort() #include <blaze/Blaze.h> \endcode // Doing this will trigger a call to \c log() and an abort instead of throwing an exception // whenever an error (such as an invalid argument) is detected. // // \note It is possible to execute several statements instead of executing a single statement to // throw an exception. Also note that it is recommended to define the macro such that a subsequent // semicolon is required! // // \warning This macro is provided with the intention to assist in adapting \b Blaze to special // conditions and environments. However, the customization of the error reporting mechanism via // this macro can have a significant effect on the library. Thus be advised to use the macro // with due care! // // // \n \section error_reporting_exception_customization Customization of the Type of Exceptions // <hr> // // In addition to the customization of the entire error reporting mechanism it is also possible // to customize the type of exceptions being thrown. This can be achieved by customizing any // number of the following macros: \code #define BLAZE_THROW_BAD_ALLOC \ BLAZE_THROW( std::bad_alloc() ) #define BLAZE_THROW_LOGIC_ERROR( MESSAGE ) \ BLAZE_THROW( std::logic_error( MESSAGE ) ) #define BLAZE_THROW_INVALID_ARGUMENT( MESSAGE ) \ BLAZE_THROW( std::invalid_argument( MESSAGE ) ) #define BLAZE_THROW_LENGTH_ERROR( MESSAGE ) \ BLAZE_THROW( std::length_error( MESSAGE ) ) #define BLAZE_THROW_OUT_OF_RANGE( MESSAGE ) \ BLAZE_THROW( std::out_of_range( MESSAGE ) ) #define BLAZE_THROW_RUNTIME_ERROR( MESSAGE ) \ BLAZE_THROW( std::runtime_error( MESSAGE ) ) \endcode // In order to customize the type of exception the according macro has to be defined prior to // including any \b Blaze header file. This will override the \b Blaze default behavior. The // following example demonstrates this by replacing \c std::invalid_argument by a custom // exception type: \code class InvalidArgument { public: InvalidArgument(); explicit InvalidArgument( const std::string& message ); // ... }; #define BLAZE_THROW_INVALID_ARGUMENT( MESSAGE ) \ BLAZE_THROW( InvalidArgument( MESSAGE ) ) #include <blaze/Blaze.h> \endcode // By manually defining the macro, an \c InvalidArgument exception is thrown instead of a // \c std::invalid_argument exception. Note that it is recommended to define the macro such // that a subsequent semicolon is required! // // \warning These macros are provided with the intention to assist in adapting \b Blaze to // special conditions and environments. However, the customization of the type of an exception // via this macro may have an effect on the library. Thus be advised to use the macro with due // care! // // // \n \section error_reporting_special_errors Customization of Special Errors // <hr> // // Last but not least it is possible to customize the error reporting for special kinds of errors. // This can be achieved by customizing any number of the following macros: \code #define BLAZE_THROW_DIVISION_BY_ZERO( MESSAGE ) \ BLAZE_THROW_RUNTIME_ERROR( MESSAGE ) #define BLAZE_THROW_LAPACK_ERROR( MESSAGE ) \ BLAZE_THROW_RUNTIME_ERROR( MESSAGE ) \endcode // As explained in the previous sections, in order to customize the handling of special errors // the according macro has to be defined prior to including any \b Blaze header file. This will // override the \b Blaze default behavior. // // // \n Previous: \ref vector_and_matrix_customization &nbsp; &nbsp; Next: \ref blas_functions \n */ //************************************************************************************************* //**BLAS Functions********************************************************************************* /*!\page blas_functions BLAS Functions // // \tableofcontents // // // For vector/vector, matrix/vector and matrix/matrix multiplications with large dense matrices // \b Blaze relies on the efficiency of BLAS libraries. For this purpose, \b Blaze implements // several convenient C++ wrapper functions for several BLAS functions. The following sections // give a complete overview of all available BLAS level 1, 2 and 3 functions. // // // \n \section blas_level_1 BLAS Level 1 // <hr> // // \subsection blas_level_1_dotu Dot Product (dotu) // // The following wrapper functions provide a generic interface for the BLAS functions for the // dot product of two dense vectors (\c sdot(), \c ddot(), \c cdotu_sub(), and \c zdotu_sub()): \code namespace blaze { float dotu( int n, const float* x, int incX, const float* y, int incY ); double dotu( int n, const double* x, int incX, const double* y, int incY ); complex<float> dotu( int n, const complex<float>* x, int incX, const complex<float>* y, int incY ); complex<double> dotu( int n, const complex<double>* x, int incX, const complex<double>* y, int incY ); template< typename VT1, bool TF1, typename VT2, bool TF2 > ElementType_<VT1> dotu( const DenseVector<VT1,TF1>& x, const DenseVector<VT2,TF2>& y ); } // namespace blaze \endcode // \subsection blas_level_1_dotc Complex Conjugate Dot Product (dotc) // // The following wrapper functions provide a generic interface for the BLAS functions for the // complex conjugate dot product of two dense vectors (\c sdot(), \c ddot(), \c cdotc_sub(), // and \c zdotc_sub()): \code namespace blaze { float dotc( int n, const float* x, int incX, const float* y, int incY ); double dotc( int n, const double* x, int incX, const double* y, int incY ); complex<float> dotc( int n, const complex<float>* x, int incX, const complex<float>* y, int incY ); complex<double> dotc( int n, const complex<double>* x, int incX, const complex<double>* y, int incY ); template< typename VT1, bool TF1, typename VT2, bool TF2 > ElementType_<VT1> dotc( const DenseVector<VT1,TF1>& x, const DenseVector<VT2,TF2>& y ); } // namespace blaze \endcode // \subsection blas_level_1_axpy Axpy Product (axpy) // // The following wrapper functions provide a generic interface for the BLAS functions for the // axpy product of two dense vectors (\c saxpy(), \c daxpy(), \c caxpy(), and \c zaxpy()): \code namespace blaze { void axpy( int n, float alpha, const float* x, int incX, float* y, int incY ); void axpy( int n, double alpha, const double* x, int incX, double* y, int incY ); void axpy( int n, complex<float> alpha, const complex<float>* x, int incX, complex<float>* y, int incY ); void axpy( int n, complex<double> alpha, const complex<double>* x, int incX, complex<double>* y, int incY ); template< typename VT1, bool TF1, typename VT2, bool TF2, typename ST > void axpy( const DenseVector<VT1,TF1>& x, const DenseVector<VT2,TF2>& y, ST alpha ); } // namespace blaze \endcode // \n \section blas_level_2 BLAS Level 2 // <hr> // // \subsection blas_level_2_gemv General Matrix/Vector Multiplication (gemv) // // The following wrapper functions provide a generic interface for the BLAS functions for the // general matrix/vector multiplication (\c sgemv(), \c dgemv(), \c cgemv(), and \c zgemv()): \code namespace blaze { void gemv( CBLAS_ORDER layout, CBLAS_TRANSPOSE transA, int m, int n, float alpha, const float* A, int lda, const float* x, int incX, float beta, float* y, int incY ); void gemv( CBLAS_ORDER layout, CBLAS_TRANSPOSE transA, int m, int n, double alpha, const double* A, int lda, const double* x, int incX, double beta, double* y, int incY ); void gemv( CBLAS_ORDER layout, CBLAS_TRANSPOSE transA, int m, int n, complex<float> alpha, const complex<float>* A, int lda, const complex<float>* x, int incX, complex<float> beta, complex<float>* y, int incY ); void gemv( CBLAS_ORDER layout, CBLAS_TRANSPOSE transA, int m, int n, complex<double> alpha, const complex<double>* A, int lda, const complex<double>* x, int incX, complex<double> beta, complex<double>* y, int incY ); template< typename VT1, typename MT1, bool SO, typename VT2, typename ST > void gemv( DenseVector<VT1,false>& y, const DenseMatrix<MT1,SO>& A, const DenseVector<VT2,false>& x, ST alpha, ST beta ); template< typename VT1, typename VT2, typename MT1, bool SO, typename ST > void gemv( DenseVector<VT1,true>& y, const DenseVector<VT2,true>& x, const DenseMatrix<MT1,SO>& A, ST alpha, ST beta ); } // namespace blaze \endcode // \n \subsection blas_level_2_trmv Triangular Matrix/Vector Multiplication (trmv) // // The following wrapper functions provide a generic interface for the BLAS functions for the // matrix/vector multiplication with a triangular matrix (\c strmv(), \c dtrmv(), \c ctrmv(), // and \c ztrmv()): \code namespace blaze { void trmv( CBLAS_ORDER order, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, int n, const float* A, int lda, float* x, int incX ); void trmv( CBLAS_ORDER order, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, int n, const double* A, int lda, double* x, int incX ); void trmv( CBLAS_ORDER order, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, int n, const complex<float>* A, int lda, complex<float>* x, int incX ); void trmv( CBLAS_ORDER order, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, int n, const complex<double>* A, int lda, complex<double>* x, int incX ); template< typename VT, typename MT, bool SO > void trmv( DenseVector<VT,false>& x, const DenseMatrix<MT,SO>& A, CBLAS_UPLO uplo ); template< typename VT, typename MT, bool SO > void trmv( DenseVector<VT,true>& x, const DenseMatrix<MT,SO>& A, CBLAS_UPLO uplo ); } // namespace blaze \endcode // \n \section blas_level_3 BLAS Level 3 // <hr> // // \subsection blas_level_3_gemm General Matrix/Matrix Multiplication (gemm) // // The following wrapper functions provide a generic interface for the BLAS functions for the // general matrix/matrix multiplication (\c sgemm(), \c dgemm(), \c cgemm(), and \c zgemm()): \code namespace blaze { void gemm( CBLAS_ORDER order, CBLAS_TRANSPOSE transA, CBLAS_TRANSPOSE transB, int m, int n, int k, float alpha, const float* A, int lda, const float* B, int ldb, float beta, float* C, int ldc ); void gemm( CBLAS_ORDER order, CBLAS_TRANSPOSE transA, CBLAS_TRANSPOSE transB, int m, int n, int k, double alpha, const double* A, int lda, const double* B, int ldb, double beta, float* C, int ldc ); void gemm( CBLAS_ORDER order, CBLAS_TRANSPOSE transA, CBLAS_TRANSPOSE transB, int m, int n, int k, complex<float> alpha, const complex<float>* A, int lda, const complex<float>* B, int ldb, complex<float> beta, float* C, int ldc ); void gemm( CBLAS_ORDER order, CBLAS_TRANSPOSE transA, CBLAS_TRANSPOSE transB, int m, int n, int k, complex<double> alpha, const complex<double>* A, int lda, const complex<double>* B, int ldb, complex<double> beta, float* C, int ldc ); template< typename MT1, bool SO1, typename MT2, bool SO2, typename MT3, bool SO3, typename ST > void gemm( DenseMatrix<MT1,SO1>& C, const DenseMatrix<MT2,SO2>& A, const DenseMatrix<MT3,SO3>& B, ST alpha, ST beta ); } // namespace blaze \endcode // \n \subsection blas_level_3_trmm Triangular Matrix/Matrix Multiplication (trmm) // // The following wrapper functions provide a generic interface for the BLAS functions for the // matrix/matrix multiplication with a triangular matrix (\c strmm(), \c dtrmm(), \c ctrmm(), and // \c ztrmm()): \code namespace blaze { void trmm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, int m, int n, float alpha, const float* A, int lda, float* B, int ldb ); void trmm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, int m, int n, double alpha, const double* A, int lda, double* B, int ldb ); void trmm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, int m, int n, complex<float> alpha, const complex<float>* A, int lda, complex<float>* B, int ldb ); void trmm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, int m, int n, complex<double> alpha, const complex<double>* A, int lda, complex<double>* B, int ldb ); template< typename MT1, bool SO1, typename MT2, bool SO2, typename ST > void trmm( DenseMatrix<MT1,SO1>& B, const DenseMatrix<MT2,SO2>& A, CBLAS_SIDE side, CBLAS_UPLO uplo, ST alpha ); } // namespace blaze \endcode // \n \subsection blas_level_3_trsm Triangular System Solver (trsm) // // The following wrapper functions provide a generic interface for the BLAS functions for solving // a triangular system of equations (\c strsm(), \c dtrsm(), \c ctrsm(), and \c ztrsm()): \code namespace blaze { void trsm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, int m, int n, float alpha, const float* A, int lda, float* B, int ldb ); void trsm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, int m, int n, double alpha, const double* A, int lda, double* B, int ldb ); void trsm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, int m, int n, complex<float> alpha, const complex<float>* A, int lda, complex<float>* B, int ldb ); void trsm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, int m, int n, complex<double> alpha, const complex<double>* A, int lda, complex<double>* B, int ldb ); template< typename MT, bool SO, typename VT, bool TF, typename ST > void trsm( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, CBLAS_SIDE side, CBLAS_UPLO uplo, ST alpha ); template< typename MT1, bool SO1, typename MT2, bool SO2, typename ST > void trsm( const DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, CBLAS_SIDE side, CBLAS_UPLO uplo, ST alpha ); } // namespace blaze \endcode // \n Previous: \ref error_reporting_customization &nbsp; &nbsp; Next: \ref lapack_functions \n */ //************************************************************************************************* //**LAPACK Functions******************************************************************************* /*!\page lapack_functions LAPACK Functions // // \tableofcontents // // // \n \section lapack_introction Introduction // <hr> // // The \b Blaze library makes extensive use of the LAPACK functionality for various compute tasks // (including the decomposition, inversion and the computation of the determinant of dense matrices). // For this purpose, \b Blaze implements several convenient C++ wrapper functions for all required // LAPACK functions. The following sections give a complete overview of all available LAPACK wrapper // functions. For more details on the individual LAPACK functions see the \b Blaze function // documentation or the LAPACK online documentation browser: // // http://www.netlib.org/lapack/explore-html/ // // Most of the wrapper functions are implemented as thin wrappers around LAPACK functions. They // provide the parameters of the original LAPACK functions and thus provide maximum flexibility: \code constexpr size_t N( 100UL ); blaze::DynamicMatrix<double,blaze::columnMajor> A( N, N ); // ... Initializing the matrix const int m ( numeric_cast<int>( A.rows() ) ); // == N const int n ( numeric_cast<int>( A.columns() ) ); // == N const int lda ( numeric_cast<int>( A.spacing() ) ); // >= N const int lwork( n*lda ); const std::unique_ptr<int[]> ipiv( new int[N] ); // No initialization required const std::unique_ptr<double[]> work( new double[N] ); // No initialization required int info( 0 ); getrf( m, n, A.data(), lda, ipiv.get(), &info ); // Reports failure via 'info' getri( n, A.data(), lda, ipiv.get(), work.get(), lwork, &info ); // Reports failure via 'info' \endcode // Additionally, \b Blaze provides wrappers that provide a higher level of abstraction. These // wrappers provide a maximum of convenience: \code constexpr size_t N( 100UL ); blaze::DynamicMatrix<double,blaze::columnMajor> A( N, N ); // ... Initializing the matrix const std::unique_ptr<int[]> ipiv( new int[N] ); // No initialization required getrf( A, ipiv.get() ); // Cannot fail getri( A, ipiv.get() ); // Reports failure via exception \endcode // \note All functions only work for general, non-adapted matrices with \c float, \c double, // \c complex<float>, or \c complex<double> element type. The attempt to call the function with // adaptors or matrices of any other element type results in a compile time error! // // \note All functions can only be used if a fitting LAPACK library is available and linked to // the final executable. Otherwise a call to this function will result in a linker error. // // \note For performance reasons all functions do only provide the basic exception safety guarantee, // i.e. in case an exception is thrown the given matrix may already have been modified. // // // \n \section lapack_decomposition Matrix Decomposition // <hr> // // The following functions decompose/factorize the given dense matrix. Based on this decomposition // the matrix can be inverted or used to solve a linear system of equations. // // // \n \subsection lapack_lu_decomposition LU Decomposition // // The following functions provide an interface for the LAPACK functions \c sgetrf(), \c dgetrf(), // \c cgetrf(), and \c zgetrf(), which compute the LU decomposition for the given general matrix: \code namespace blaze { void getrf( int m, int n, float* A, int lda, int* ipiv, int* info ); void getrf( int m, int n, double* A, int lda, int* ipiv, int* info ); void getrf( int m, int n, complex<float>* A, int lda, int* ipiv, int* info ); void getrf( int m, int n, complex<double>* A, int lda, int* ipiv, int* info ); template< typename MT, bool SO > void getrf( DenseMatrix<MT,SO>& A, int* ipiv ); } // namespace blaze \endcode // The decomposition has the form \f[ A = P \cdot L \cdot U, \f]\n // where \c P is a permutation matrix, \c L is a lower unitriangular matrix, and \c U is an upper // triangular matrix. The resulting decomposition is stored within \a A: In case of a column-major // matrix, \c L is stored in the lower part of \a A and \c U is stored in the upper part. The unit // diagonal elements of \c L are not stored. In case \a A is a row-major matrix the result is // transposed. // // \note The LU decomposition will never fail, even for singular matrices. However, in case of a // singular matrix the resulting decomposition cannot be used for a matrix inversion or solving // a linear system of equations. // // // \n \subsection lapack_ldlt_decomposition LDLT Decomposition // // The following functions provide an interface for the LAPACK functions \c ssytrf(), \c dsytrf(), // \c csytrf(), and \c zsytrf(), which compute the LDLT (Bunch-Kaufman) decomposition for the given // symmetric indefinite matrix: \code namespace blaze { void sytrf( char uplo, int n, float* A, int lda, int* ipiv, float* work, int lwork, int* info ); void sytrf( char uplo, int n, double* A, int lda, int* ipiv, double* work, int lwork, int* info ); void sytrf( char uplo, int n, complex<float>* A, int lda, int* ipiv, complex<float>* work, int lwork, int* info ); void sytrf( char uplo, int n, complex<double>* A, int lda, int* ipiv, complex<double>* work, int lwork, int* info ); template< typename MT, bool SO > void sytrf( DenseMatrix<MT,SO>& A, char uplo, int* ipiv ); } // namespace blaze \endcode // The decomposition has the form \f[ A = U D U^{T} \texttt{ (if uplo = 'U'), or } A = L D L^{T} \texttt{ (if uplo = 'L'), } \f] // where \c U (or \c L) is a product of permutation and unit upper (lower) triangular matrices, // and \c D is symmetric and block diagonal with 1-by-1 and 2-by-2 diagonal blocks. The resulting // decomposition is stored within \a A: In case \a uplo is set to \c 'L' the result is stored in // the lower part of the matrix and the upper part remains untouched, in case \a uplo is set to // \c 'U' the result is stored in the upper part and the lower part remains untouched. // // \note The Bunch-Kaufman decomposition will never fail, even for singular matrices. However, in // case of a singular matrix the resulting decomposition cannot be used for a matrix inversion or // solving a linear system of equations. // // // \n \subsection lapack_ldlh_decomposition LDLH Decomposition // // The following functions provide an interface for the LAPACK functions \c chetrf() and \c zsytrf(), // which compute the LDLH (Bunch-Kaufman) decomposition for the given Hermitian indefinite matrix: \code namespace blaze { void hetrf( char uplo, int n, complex<float>* A, int lda, int* ipiv, complex<float>* work, int lwork, int* info ); void hetrf( char uplo, int n, complex<double>* A, int lda, int* ipiv, complex<double>* work, int lwork, int* info ); template< typename MT, bool SO > void hetrf( DenseMatrix<MT,SO>& A, char uplo, int* ipiv ); } // namespace blaze \endcode // The decomposition has the form \f[ A = U D U^{H} \texttt{ (if uplo = 'U'), or } A = L D L^{H} \texttt{ (if uplo = 'L'), } \f] // where \c U (or \c L) is a product of permutation and unit upper (lower) triangular matrices, // and \c D is Hermitian and block diagonal with 1-by-1 and 2-by-2 diagonal blocks. The resulting // decomposition is stored within \a A: In case \a uplo is set to \c 'L' the result is stored in // the lower part of the matrix and the upper part remains untouched, in case \a uplo is set to // \c 'U' the result is stored in the upper part and the lower part remains untouched. // // \note The Bunch-Kaufman decomposition will never fail, even for singular matrices. However, in // case of a singular matrix the resulting decomposition cannot be used for a matrix inversion or // solving a linear system of equations. // // // \n \subsection lapack_llh_decomposition Cholesky Decomposition // // The following functions provide an interface for the LAPACK functions \c spotrf(), \c dpotrf(), // \c cpotrf(), and \c zpotrf(), which compute the Cholesky (LLH) decomposition for the given // positive definite matrix: \code namespace blaze { void potrf( char uplo, int n, float* A, int lda, int* info ); void potrf( char uplo, int n, double* A, int lda, int* info ); void potrf( char uplo, int n, complex<float>* A, int lda, int* info ); void potrf( char uplo, int n, complex<double>* A, int lda, int* info ); template< typename MT, bool SO > void potrf( DenseMatrix<MT,SO>& A, char uplo ); } // namespace blaze \endcode // The decomposition has the form \f[ A = U^{T} U \texttt{ (if uplo = 'U'), or } A = L L^{T} \texttt{ (if uplo = 'L'), } \f] // where \c U is an upper triangular matrix and \c L is a lower triangular matrix. The Cholesky // decomposition fails if the given matrix \a A is not a positive definite matrix. In this case // a \a std::std::invalid_argument exception is thrown. // // // \n \subsection lapack_qr_decomposition QR Decomposition // // The following functions provide an interface for the LAPACK functions \c sgeqrf(), \c dgeqrf(), // \c cgeqrf(), and \c zgeqrf(), which compute the QR decomposition of the given general matrix: \code namespace blaze { void geqrf( int m, int n, float* A, int lda, float* tau, float* work, int lwork, int* info ); void geqrf( int m, int n, double* A, int lda, double* tau, double* work, int lwork, int* info ); void geqrf( int m, int n, complex<float>* A, int lda, complex<float>* tau, complex<float>* work, int lwork, int* info ); void geqrf( int m, int n, complex<double>* A, int lda, complex<double>* tau, complex<double>* work, int lwork, int* info ); template< typename MT, bool SO > void geqrf( DenseMatrix<MT,SO>& A, typename MT::ElementType* tau ); } // namespace blaze \endcode // The decomposition has the form \f[ A = Q \cdot R, \f] // where the \c Q is represented as a product of elementary reflectors \f[ Q = H(1) H(2) . . . H(k) \texttt{, with k = min(m,n).} \f] // Each H(i) has the form \f[ H(i) = I - tau \cdot v \cdot v^T, \f] // where \c tau is a real scalar, and \c v is a real vector with <tt>v(0:i-1) = 0</tt> and // <tt>v(i) = 1</tt>. <tt>v(i+1:m)</tt> is stored on exit in <tt>A(i+1:m,i)</tt>, and \c tau // in \c tau(i). Thus on exit the elements on and above the diagonal of the matrix contain the // min(\a m,\a n)-by-\a n upper trapezoidal matrix \c R (\c R is upper triangular if \a m >= \a n); // the elements below the diagonal, with the array \c tau, represent the orthogonal matrix \c Q as // a product of min(\a m,\a n) elementary reflectors. // // The following functions provide an interface for the LAPACK functions \c sorgqr(), \c dorgqr(), // \c cungqr(), and \c zunqqr(), which reconstruct the \c Q matrix from a QR decomposition: \code namespace blaze { void orgqr( int m, int n, int k, float* A, int lda, const float* tau, float* work, int lwork, int* info ); void orgqr( int m, int n, int k, double* A, int lda, const double* tau, double* work, int lwork, int* info ); void ungqr( int m, int n, int k, complex<float>* A, int lda, const complex<float>* tau, complex<float>* work, int lwork, int* info ); void ungqr( int m, int n, int k, complex<double>* A, int lda, const complex<double>* tau, complex<double>* work, int lwork, int* info ); template< typename MT, bool SO > void orgqr( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau ); template< typename MT, bool SO > void ungqr( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau ); } // namespace blaze \endcode // The following functions provide an interface for the LAPACK functions \c sormqr(), \c dormqr(), // \c cunmqr(), and \c zunmqr(), which can be used to multiply a matrix with the \c Q matrix from // a QR decomposition: \code namespace blaze { void ormqr( char side, char trans, int m, int n, int k, const float* A, int lda, const float* tau, float* C, int ldc, float* work, int lwork, int* info ); void ormqr( char side, char trans, int m, int n, int k, const double* A, int lda, const double* tau, double* C, int ldc, double* work, int lwork, int* info ); void unmqr( char side, char trans, int m, int n, int k, const complex<float>* A, int lda, const complex<float>* tau, complex<float>* C, int ldc, complex<float>* work, int lwork, int* info ); void unmqr( char side, char trans, int m, int n, int k, const complex<double>* A, int lda, const complex<double>* tau, complex<double>* C, int ldc, complex<double>* work, int lwork, int* info ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void ormqr( DenseMatrix<MT1,SO1>& C, const DenseMatrix<MT2,SO2>& A, char side, char trans, const ElementType_<MT2>* tau ); template< typename MT1, bool SO, typename MT2 > void unmqr( DenseMatrix<MT1,SO>& C, DenseMatrix<MT2,SO>& A, char side, char trans, ElementType_<MT2>* tau ); } // namespace blaze \endcode // \n \subsection lapack_rq_decomposition RQ Decomposition // // The following functions provide an interface for the LAPACK functions \c sgerqf(), \c dgerqf(), // \c cgerqf(), and \c zgerqf(), which compute the RQ decomposition of the given general matrix: \code namespace blaze { void gerqf( int m, int n, float* A, int lda, float* tau, float* work, int lwork, int* info ); void gerqf( int m, int n, double* A, int lda, double* tau, double* work, int lwork, int* info ); void gerqf( int m, int n, complex<float>* A, int lda, complex<float>* tau, complex<float>* work, int lwork, int* info ); void gerqf( int m, int n, complex<double>* A, int lda, complex<double>* tau, complex<double>* work, int lwork, int* info ); template< typename MT, bool SO > void gerqf( DenseMatrix<MT,SO>& A, typename MT::ElementType* tau ); } // namespace blaze \endcode // The decomposition has the form \f[ A = R \cdot Q, \f] // where the \c Q is represented as a product of elementary reflectors \f[ Q = H(1) H(2) . . . H(k) \texttt{, with k = min(m,n).} \f] // Each H(i) has the form \f[ H(i) = I - tau \cdot v \cdot v^T, \f] // where \c tau is a real scalar, and \c v is a real vector with <tt>v(n-k+i+1:n) = 0</tt> and // <tt>v(n-k+i) = 1</tt>. <tt>v(1:n-k+i-1)</tt> is stored on exit in <tt>A(m-k+i,1:n-k+i-1)</tt>, // and \c tau in \c tau(i). Thus in case \a m <= \a n, the upper triangle of the subarray // <tt>A(1:m,n-m+1:n)</tt> contains the \a m-by-\a m upper triangular matrix \c R and in case // \a m >= \a n, the elements on and above the (\a m-\a n)-th subdiagonal contain the \a m-by-\a n // upper trapezoidal matrix \c R; the remaining elements in combination with the array \c tau // represent the orthogonal matrix \c Q as a product of min(\a m,\a n) elementary reflectors. // // The following functions provide an interface for the LAPACK functions \c sorgrq(), \c dorgrq(), // \c cungrq(), and \c zunqrq(), which reconstruct the \c Q matrix from a RQ decomposition: \code namespace blaze { void orgrq( int m, int n, int k, float* A, int lda, const float* tau, float* work, int lwork, int* info ); void orgrq( int m, int n, int k, double* A, int lda, const double* tau, double* work, int lwork, int* info ); void ungrq( int m, int n, int k, complex<float>* A, int lda, const complex<float>* tau, complex<float>* work, int lwork, int* info ); void ungrq( int m, int n, int k, complex<double>* A, int lda, const complex<double>* tau, complex<double>* work, int lwork, int* info ); template< typename MT, bool SO > void orgrq( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau ); template< typename MT, bool SO > void ungrq( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau ); } // namespace blaze \endcode // The following functions provide an interface for the LAPACK functions \c sormrq(), \c dormrq(), // \c cunmrq(), and \c zunmrq(), which can be used to multiply a matrix with the \c Q matrix from // a RQ decomposition: \code namespace blaze { void ormrq( char side, char trans, int m, int n, int k, const float* A, int lda, const float* tau, float* C, int ldc, float* work, int lwork, int* info ); void ormrq( char side, char trans, int m, int n, int k, const double* A, int lda, const double* tau, double* C, int ldc, double* work, int lwork, int* info ); void unmrq( char side, char trans, int m, int n, int k, const complex<float>* A, int lda, const complex<float>* tau, complex<float>* C, int ldc, complex<float>* work, int lwork, int* info ); void unmrq( char side, char trans, int m, int n, int k, const complex<double>* A, int lda, const complex<double>* tau, complex<double>* C, int ldc, complex<double>* work, int lwork, int* info ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void ormrq( DenseMatrix<MT1,SO1>& C, const DenseMatrix<MT2,SO2>& A, char side, char trans, const ElementType_<MT2>* tau ); template< typename MT1, bool SO, typename MT2 > void unmrq( DenseMatrix<MT1,SO>& C, DenseMatrix<MT2,SO>& A, char side, char trans, ElementType_<MT2>* tau ); } // namespace blaze \endcode // \n \subsection lapack_ql_decomposition QL Decomposition // // The following functions provide an interface for the LAPACK functions \c sgeqlf(), \c dgeqlf(), // \c cgeqlf(), and \c zgeqlf(), which compute the QL decomposition of the given general matrix: \code namespace blaze { void geqlf( int m, int n, float* A, int lda, float* tau, float* work, int lwork, int* info ); void geqlf( int m, int n, double* A, int lda, double* tau, double* work, int lwork, int* info ); void geqlf( int m, int n, complex<float>* A, int lda, complex<float>* tau, complex<float>* work, int lwork, int* info ); void geqlf( int m, int n, complex<double>* A, int lda, complex<double>* tau, complex<double>* work, int lwork, int* info ); template< typename MT, bool SO > void geqlf( DenseMatrix<MT,SO>& A, typename MT::ElementType* tau ); } // namespace blaze \endcode // The decomposition has the form \f[ A = Q \cdot L, \f] // where the \c Q is represented as a product of elementary reflectors \f[ Q = H(k) . . . H(2) H(1) \texttt{, with k = min(m,n).} \f] // Each H(i) has the form \f[ H(i) = I - tau \cdot v \cdot v^T, \f] // where \c tau is a real scalar, and \c v is a real vector with <tt>v(m-k+i+1:m) = 0</tt> and // <tt>v(m-k+i) = 1</tt>. <tt>v(1:m-k+i-1)</tt> is stored on exit in <tt>A(1:m-k+i-1,n-k+i)</tt>, // and \c tau in \c tau(i). Thus in case \a m >= \a n, the lower triangle of the subarray // A(m-n+1:m,1:n) contains the \a n-by-\a n lower triangular matrix \c L and in case \a m <= \a n, // the elements on and below the (\a n-\a m)-th subdiagonal contain the \a m-by-\a n lower // trapezoidal matrix \c L; the remaining elements in combination with the array \c tau represent // the orthogonal matrix \c Q as a product of min(\a m,\a n) elementary reflectors. // // The following functions provide an interface for the LAPACK functions \c sorgql(), \c dorgql(), // \c cungql(), and \c zunqql(), which reconstruct the \c Q matrix from an QL decomposition: \code namespace blaze { void orgql( int m, int n, int k, float* A, int lda, const float* tau, float* work, int lwork, int* info ); void orgql( int m, int n, int k, double* A, int lda, const double* tau, double* work, int lwork, int* info ); void ungql( int m, int n, int k, complex<float>* A, int lda, const complex<float>* tau, complex<float>* work, int lwork, int* info ); void ungql( int m, int n, int k, complex<double>* A, int lda, const complex<double>* tau, complex<double>* work, int lwork, int* info ); template< typename MT, bool SO > void orgql( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau ); template< typename MT, bool SO > void ungql( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau ); } // namespace blaze \endcode // The following functions provide an interface for the LAPACK functions \c sormql(), \c dormql(), // \c cunmql(), and \c zunmql(), which can be used to multiply a matrix with the \c Q matrix from // a QL decomposition: \code namespace blaze { void ormql( char side, char trans, int m, int n, int k, const float* A, int lda, const float* tau, float* C, int ldc, float* work, int lwork, int* info ); void ormql( char side, char trans, int m, int n, int k, const double* A, int lda, const double* tau, double* C, int ldc, double* work, int lwork, int* info ); void unmql( char side, char trans, int m, int n, int k, const complex<float>* A, int lda, const complex<float>* tau, complex<float>* C, int ldc, complex<float>* work, int lwork, int* info ); void unmql( char side, char trans, int m, int n, int k, const complex<double>* A, int lda, const complex<double>* tau, complex<double>* C, int ldc, complex<double>* work, int lwork, int* info ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void ormql( DenseMatrix<MT1,SO1>& C, const DenseMatrix<MT2,SO2>& A, char side, char trans, const ElementType_<MT2>* tau ); template< typename MT1, bool SO, typename MT2 > void unmql( DenseMatrix<MT1,SO>& C, DenseMatrix<MT2,SO>& A, char side, char trans, ElementType_<MT2>* tau ); } // namespace blaze \endcode // \n \subsection lapack_lq_decomposition LQ Decomposition // // The following functions provide an interface for the LAPACK functions \c sgelqf(), \c dgelqf(), // \c cgelqf(), and \c zgelqf(), which compute the LQ decomposition of the given general matrix: \code namespace blaze { void gelqf( int m, int n, float* A, int lda, float* tau, float* work, int lwork, int* info ); void gelqf( int m, int n, double* A, int lda, double* tau, double* work, int lwork, int* info ); void gelqf( int m, int n, complex<float>* A, int lda, complex<float>* tau, complex<float>* work, int lwork, int* info ); void gelqf( int m, int n, complex<double>* A, int lda, complex<double>* tau, complex<double>* work, int lwork, int* info ); template< typename MT, bool SO > void gelqf( DenseMatrix<MT,SO>& A, typename MT::ElementType* tau ); } // namespace blaze \endcode // The decomposition has the form \f[ A = L \cdot Q, \f] // where the \c Q is represented as a product of elementary reflectors \f[ Q = H(k) . . . H(2) H(1) \texttt{, with k = min(m,n).} \f] // Each H(i) has the form \f[ H(i) = I - tau \cdot v \cdot v^T, \f] // where \c tau is a real scalar, and \c v is a real vector with <tt>v(0:i-1) = 0</tt> and // <tt>v(i) = 1</tt>. <tt>v(i+1:n)</tt> is stored on exit in <tt>A(i,i+1:n)</tt>, and \c tau // in \c tau(i). Thus on exit the elements on and below the diagonal of the matrix contain the // \a m-by-min(\a m,\a n) lower trapezoidal matrix \c L (\c L is lower triangular if \a m <= \a n); // the elements above the diagonal, with the array \c tau, represent the orthogonal matrix \c Q // as a product of min(\a m,\a n) elementary reflectors. // // The following functions provide an interface for the LAPACK functions \c sorglq(), \c dorglq(), // \c cunglq(), and \c zunqlq(), which reconstruct the \c Q matrix from an LQ decomposition: \code namespace blaze { void orglq( int m, int n, int k, float* A, int lda, const float* tau, float* work, int lwork, int* info ); void orglq( int m, int n, int k, double* A, int lda, const double* tau, double* work, int lwork, int* info ); void unglq( int m, int n, int k, complex<float>* A, int lda, const complex<float>* tau, complex<float>* work, int lwork, int* info ); void unglq( int m, int n, int k, complex<double>* A, int lda, const complex<double>* tau, complex<double>* work, int lwork, int* info ); template< typename MT, bool SO > void orglq( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau ); template< typename MT, bool SO > void unglq( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau ); } // namespace blaze \endcode // The following functions provide an interface for the LAPACK functions \c sormlq(), \c dormlq(), // \c cunmlq(), and \c zunmlq(), which can be used to multiply a matrix with the \c Q matrix from // a LQ decomposition: \code namespace blaze { void ormlq( char side, char trans, int m, int n, int k, const float* A, int lda, const float* tau, float* C, int ldc, float* work, int lwork, int* info ); void ormlq( char side, char trans, int m, int n, int k, const double* A, int lda, const double* tau, double* C, int ldc, double* work, int lwork, int* info ); void unmlq( char side, char trans, int m, int n, int k, const complex<float>* A, int lda, const complex<float>* tau, complex<float>* C, int ldc, complex<float>* work, int lwork, int* info ); void unmlq( char side, char trans, int m, int n, int k, const complex<double>* A, int lda, const complex<double>* tau, complex<double>* C, int ldc, complex<double>* work, int lwork, int* info ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void ormlq( DenseMatrix<MT1,SO1>& C, const DenseMatrix<MT2,SO2>& A, char side, char trans, const ElementType_<MT2>* tau ); template< typename MT1, bool SO, typename MT2 > void unmlq( DenseMatrix<MT1,SO>& C, DenseMatrix<MT2,SO>& A, char side, char trans, ElementType_<MT2>* tau ); } // namespace blaze \endcode // \n \section lapack_inversion Matrix Inversion // <hr> // // Given a matrix that has already been decomposed, the following functions can be used to invert // the matrix in-place. // // // \n \subsection lapack_lu_inversion LU-based Inversion // // The following functions provide an interface for the LAPACK functions \c sgetri(), \c dgetri(), // \c cgetri(), and \c zgetri(), which invert a general matrix that has already been decomposed by // an \ref lapack_lu_decomposition : \code namespace blaze { void getri( int n, float* A, int lda, const int* ipiv, float* work, int lwork, int* info ); void getri( int n, double* A, int lda, const int* ipiv, double* work, int lwork, int* info ); void getri( int n, complex<float>* A, int lda, const int* ipiv, complex<float>* work, int lwork, int* info ); void getri( int n, complex<double>* A, int lda, const int* ipiv, complex<double>* work, int lwork, int* info ); template< typename MT, bool SO > void getri( DenseMatrix<MT,SO>& A, const int* ipiv ); } // namespace blaze \endcode // The functions fail if ... // // - ... the given matrix is not a square matrix; // - ... the given matrix is singular and not invertible. // // The first four functions report failure via the \c info argument, the fifth function throws a // \a std::invalid_argument exception in case of an error. // // // \n \subsection lapack_ldlt_inversion LDLT-based Inversion // // The following functions provide an interface for the LAPACK functions \c ssytri(), \c dsytri(), // \c csytri(), and \c zsytri(), which invert a symmetric indefinite matrix that has already been // decomposed by an \ref lapack_ldlt_decomposition : \code namespace blaze { void sytri( char uplo, int n, float* A, int lda, const int* ipiv, float* work, int* info ); void sytri( char uplo, int n, double* A, int lda, const int* ipiv, double* work, int* info ); void sytri( char uplo, int n, complex<float>* A, int lda, const int* ipiv, complex<float>* work, int* info ); void sytri( char uplo, int n, complex<double>* A, int lda, const int* ipiv, complex<double>* work, int* info ); template< typename MT, bool SO > void sytri( DenseMatrix<MT,SO>& A, char uplo, const int* ipiv ); } // namespace blaze \endcode // The functions fail if ... // // - ... the given matrix is not a square matrix; // - ... the given matrix is singular and not invertible. // // The first four functions report failure via the \c info argument, the fifth function throws a // \a std::invalid_argument exception in case of an error. // // // \n \subsection lapack_ldlh_inversion LDLH-based Inversion // // The following functions provide an interface for the LAPACK functions \c chetri() and // \c zhetri(), which invert an Hermitian indefinite matrix that has already been decomposed by // an \ref lapack_ldlh_decomposition : \code namespace blaze { void hetri( char uplo, int n, complex<float>* A, int lda, const int* ipiv, complex<float>* work, int* info ); void hetri( char uplo, int n, complex<double>* A, int lda, const int* ipiv, complex<double>* work, int* info ); template< typename MT, bool SO > void hetri( DenseMatrix<MT,SO>& A, char uplo, const int* ipiv ); } // namespace blaze \endcode // The functions fail if ... // // - ... the given matrix is not a square matrix; // - ... the given matrix is singular and not invertible. // // The first four functions report failure via the \c info argument, the fifth function throws a // \a std::invalid_argument exception in case of an error. // // // \n \subsection lapack_llh_inversion Cholesky-based Inversion // // The following functions provide an interface for the LAPACK functions \c spotri(), \c dpotri(), // \c cpotri(), and \c zpotri(), which invert a positive definite matrix that has already been // decomposed by an \ref lapack_llh_decomposition : \code namespace blaze { void potri( char uplo, int n, float* A, int lda, int* info ); void potri( char uplo, int n, double* A, int lda, int* info ); void potri( char uplo, int n, complex<float>* A, int lda, int* info ); void potri( char uplo, int n, complex<double>* A, int lda, int* info ); template< typename MT, bool SO > void potri( DenseMatrix<MT,SO>& A, char uplo ); } // namespace blaze \endcode // The functions fail if ... // // - ... the given matrix is not a square matrix; // - ... the given \a uplo argument is neither 'L' nor 'U'; // - ... the given matrix is singular and not invertible. // // The first four functions report failure via the \c info argument, the fifth function throws a // \a std::invalid_argument exception in case of an error. // // // \n \subsection lapack_triangular_inversion Inversion of Triangular Matrices // // The following functions provide an interface for the LAPACK functions \c strtri(), \c dtrtri(), // \c ctrtri(), and \c ztrtri(), which invert the given triangular matrix in-place: \code namespace blaze { void trtri( char uplo, char diag, int n, float* A, int lda, int* info ); void trtri( char uplo, char diag, int n, double* A, int lda, int* info ); void trtri( char uplo, char diag, int n, complex<float>* A, int lda, int* info ); void trtri( char uplo, char diag, int n, complex<double>* A, int lda, int* info ); template< typename MT, bool SO > void trtri( DenseMatrix<MT,SO>& A, char uplo, char diag ); } // namespace blaze \endcode // The functions fail if ... // // - ... the given matrix is not a square matrix; // - ... the given \a uplo argument is neither 'L' nor 'U'; // - ... the given \a diag argument is neither 'U' nor 'N'; // - ... the given matrix is singular and not invertible. // // The first four functions report failure via the \c info argument, the fifth function throws a // \a std::invalid_argument exception in case of an error. // // // \n \section lapack_substitution Substitution // <hr> // // Given a matrix that has already been decomposed the following functions can be used to perform // the forward/backward substitution step to compute the solution to a system of linear equations. // Note that depending on the storage order of the system matrix and the given right-hand side the // functions solve different equation systems: // // Single right-hand side: // - \f$ A *x=b \f$ if \a A is column-major // - \f$ A^T*x=b \f$ if \a A is row-major // // Multiple right-hand sides: // - \f$ A *X =B \f$ if both \a A and \a B are column-major // - \f$ A^T*X =B \f$ if \a A is row-major and \a B is column-major // - \f$ A *X^T=B^T \f$ if \a A is column-major and \a B is row-major // - \f$ A^T*X^T=B^T \f$ if both \a A and \a B are row-major // // In this context the general system matrix \a A is a n-by-n matrix that has already been // factorized by the according decomposition function, \a x and \a b are n-dimensional vectors // and \a X and \a B are either row-major m-by-n matrices or column-major n-by-m matrices. // // // \n \subsection lapack_lu_substitution LU-based Substitution // // The following functions provide an interface for the LAPACK functions \c sgetrs(), \c dgetrs(), // \c cgetrs(), and \c zgetrs(), which perform the substitution step for a general matrix that has // already been decomposed by an \ref lapack_lu_decomposition : \code namespace blaze { void getrs( char trans, int n, int nrhs, const float* A, int lda, const int* ipiv, float* B, int ldb, int* info ); void getrs( char trans, int n, int nrhs, const double* A, int lda, const int* ipiv, double* B, int ldb, int* info ); void getrs( char trans, int n, const complex<float>* A, int lda, const int* ipiv, complex<float>* B, int ldb, int* info ); void getrs( char trans, int n, const complex<double>* A, int lda, const int* ipiv, complex<double>* B, int ldb, int* info ); template< typename MT, bool SO, typename VT, bool TF > void getrs( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char trans, const int* ipiv ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void getrs( const DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char trans, const int* ipiv ); } // namespace blaze \endcode // If the function exits successfully, the vector \a b or the matrix \a B contain the solution(s) // of the linear system of equations. The functions fail if ... // // - ... the given system matrix is not a square matrix; // - ... the given \a trans argument is neither 'N' nor 'T' nor 'C'; // - ... the sizes of the two given matrices do not match. // // The first four functions report failure via the \c info argument, the last two functions throw // a \a std::invalid_argument exception in case of an error. // // // \n \subsection lapack_ldlt_substitution LDLT-based Substitution // // The following functions provide an interface for the LAPACK functions \c ssytrs(), \c dsytrs(), // \c csytrs(), and \c zsytrs(), which perform the substitution step for a symmetric indefinite // matrix that has already been decomposed by an \ref lapack_ldlt_decomposition : \code namespace blaze { void sytrs( char uplo, int n, int nrhs, const float* A, int lda, const int* ipiv, float* B, int ldb, int* info ); void sytrs( char uplo, int n, int nrhs, const double* A, int lda, const int* ipiv, double* B, int ldb, int* info ); void sytrs( char uplo, int n, int nrhs, const complex<float>* A, int lda, const int* ipiv, complex<float>* B, int ldb, int* info ); void sytrs( char uplo, int n, int nrhs, const complex<double>* A, int lda, const int* ipiv, complex<double>* B, int ldb, int* info ); template< typename MT, bool SO, typename VT, bool TF > void sytrs( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo, const int* ipiv ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void sytrs( const DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char uplo, const int* ipiv ); } // namespace blaze \endcode // If the function exits successfully, the vector \a b or the matrix \a B contain the solution(s) // of the linear system of equations. The functions fail if ... // // - ... the given system matrix is not a square matrix; // - ... the given \a uplo argument is neither 'L' nor 'U'; // - ... the sizes of the two given matrices do not match. // // The first four functions report failure via the \c info argument, the last two functions throw // a \a std::invalid_argument exception in case of an error. // // // \n \subsection lapack_ldlh_substitution LDLH-based Substitution // // The following functions provide an interface for the LAPACK functions \c chetrs(), and \c zhetrs(), // which perform the substitution step for an Hermitian indefinite matrix that has already been // decomposed by an \ref lapack_ldlh_decomposition : \code namespace blaze { void hetrs( char uplo, int n, int nrhs, const complex<float>* A, int lda, const int* ipiv, complex<float>* B, int ldb, int* info ); void hetrs( char uplo, int n, int nrhs, const complex<double>* A, int lda, const int* ipiv, complex<double>* B, int ldb, int* info ); template< typename MT, bool SO, typename VT, bool TF > void hetrs( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo, const int* ipiv ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void hetrs( const DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char uplo, const int* ipiv ); } // namespace blaze \endcode // If the function exits successfully, the vector \a b or the matrix \a B contain the solution(s) // of the linear system of equations. The functions fail if ... // // - ... the given system matrix is not a square matrix; // - ... the given \a uplo argument is neither 'L' nor 'U'; // - ... the sizes of the two given matrices do not match. // // The first two functions report failure via the \c info argument, the last two functions throw // a \a std::invalid_argument exception in case of an error. // // // \n \subsection lapack_llh_substitution Cholesky-based Substitution // // The following functions provide an interface for the LAPACK functions \c spotrs(), \c dpotrs(), // \c cpotrs(), and \c zpotrs(), which perform the substitution step for a positive definite matrix // that has already been decomposed by an \ref lapack_llh_decomposition : \code namespace blaze { void potrs( char uplo, int n, int nrhs, const float* A, int lda, float* B, int ldb, int* info ); void potrs( char uplo, int n, int nrhs, const double* A, int lda, double* B, int ldb, int* info ); void potrs( char uplo, int n, int nrhs, const complex<float>* A, int lda, complex<float>* B, int ldb, int* info ); void potrs( char uplo, int n, int nrhs, const complex<double>* A, int lda, complex<double>* B, int ldb, int* info ); template< typename MT, bool SO, typename VT, bool TF > void potrs( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void potrs( const DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char uplo ); } // namespace blaze \endcode // If the function exits successfully, the vector \a b or the matrix \a B contain the solution(s) // of the linear system of equations. The functions fail if ... // // - ... the given system matrix is not a square matrix; // - ... the given \a uplo argument is neither 'L' nor 'U'; // - ... the sizes of the two given matrices do not match. // // The first two functions report failure via the \c info argument, the last two functions throw // a \a std::invalid_argument exception in case of an error. // // // \n \subsection lapack_triangular_substitution Substitution for Triangular Matrices // // The following functions provide an interface for the LAPACK functions \c strtrs(), \c dtrtrs(), // \c ctrtrs(), and \c ztrtrs(), which perform the substitution step for a triangular matrix: \code namespace blaze { void trtrs( char uplo, char trans, char diag, int n, int nrhs, const float* A, int lda, float* B, int ldb, int* info ); void trtrs( char uplo, char trans, char diag, int n, int nrhs, const double* A, int lda, double* B, int ldb, int* info ); void trtrs( char uplo, char trans, char diag, int n, int nrhs, const complex<float>* A, int lda, complex<float>* B, int ldb, int* info ); void trtrs( char uplo, char trans, char diag, int n, int nrhs, const complex<double>* A, int lda, complex<double>* B, int ldb, int* info ); template< typename MT, bool SO, typename VT, bool TF > void trtrs( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo, char trans, char diag ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void trtrs( const DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char uplo, char trans, char diag ); } // namespace blaze \endcode // If the function exits successfully, the vector \a b or the matrix \a B contain the solution(s) // of the linear system of equations. The functions fail if ... // // - ... the given system matrix is not a square matrix; // - ... the given \a uplo argument is neither 'L' nor 'U'; // - ... the given \a trans argument is neither 'N' nor 'T' nor 'C'; // - ... the given \a diag argument is neither 'U' nor 'N'; // - ... the sizes of the two given matrices do not match. // // The first four functions report failure via the \c info argument, the last two functions throw // a \a std::invalid_argument exception in case of an error. // // // \n \section lapack_linear_system_solver Linear System Solver // <hr> // // The following functions represent compound functions that perform both the decomposition step // as well as the substitution step to compute the solution to a system of linear equations. Note // that depending on the storage order of the system matrix and the given right-hand side the // functions solve different equation systems: // // Single right-hand side: // - \f$ A *x=b \f$ if \a A is column-major // - \f$ A^T*x=b \f$ if \a A is row-major // // Multiple right-hand sides: // - \f$ A *X =B \f$ if both \a A and \a B are column-major // - \f$ A^T*X =B \f$ if \a A is row-major and \a B is column-major // - \f$ A *X^T=B^T \f$ if \a A is column-major and \a B is row-major // - \f$ A^T*X^T=B^T \f$ if both \a A and \a B are row-major // // In this context the general system matrix \a A is a n-by-n matrix that has already been // factorized by the according decomposition function, \a x and \a b are n-dimensional vectors // and \a X and \a B are either row-major m-by-n matrices or column-major n-by-m matrices. // // // \subsection lapack_lu_linear_system_solver LU-based Linear System Solver // // The following functions provide an interface for the LAPACK functions \c sgesv(), \c dgesv(), // \c cgesv(), and \c zgesv(), which combine an \ref lapack_lu_decomposition and the according // \ref lapack_lu_substitution : \code namespace blaze { void gesv( int n, int nrhs, float* A, int lda, int* ipiv, float* B, int ldb, int* info ); void gesv( int n, int nrhs, double* A, int lda, int* ipiv, double* B, int ldb, int* info ); void gesv( int n, int nrhs, complex<float>* A, int lda, int* ipiv, complex<float>* B, int ldb, int* info ); void gesv( int n, int nrhs, complex<double>* A, int lda, int* ipiv, complex<double>* B, int ldb, int* info ); template< typename MT, bool SO, typename VT, bool TF > void gesv( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, int* ipiv ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void gesv( DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, int* ipiv ); } // namespace blaze \endcode // If the function exits successfully, the vector \a b or the matrix \a B contain the // solution(s) of the linear system of equations and \a A has been decomposed by means of an // \ref lapack_lu_decomposition. // // The functions fail if ... // // - ... the given system matrix is not a square matrix; // - ... the given system matrix is singular and not invertible. // // The first four functions report failure via the \c info argument, the fifth function throws a // \a std::invalid_argument exception in case of an error. // // // \n \subsection lapack_ldlt_linear_system_solver LDLT-based Linear System Solver // // The following functions provide an interface for the LAPACK functions \c ssysv(), \c dsysv(), // \c csysv(), and \c zsysv(), which combine an \ref lapack_ldlt_decomposition and the according // \ref lapack_ldlt_substitution : \code namespace blaze { void sysv( char uplo, int n, int nrhs, float* A, int lda, int* ipiv, float* B, int ldb, float* work, int lwork, int* info ); void sysv( char uplo, int n, int nrhs, double* A, int lda, int* ipiv, double* B, int ldb, double* work, int lwork, int* info ); void sysv( char uplo, int n, int nrhs, complex<float>* A, int lda, int* ipiv, complex<float>* B, int ldb, complex<float>* work, int lwork, int* info ); void sysv( char uplo, int n, int nrhs, complex<double>* A, int lda, int* ipiv, complex<double>* B, int ldb, complex<double>* work, int lwork, int* info ); template< typename MT, bool SO, typename VT, bool TF > void sysv( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo, int* ipiv ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void sysv( DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char uplo, int* ipiv ); } // namespace blaze \endcode // If the function exits successfully, the vector \a b or the matrix \a B contain the // solution(s) of the linear system of equations and \a A has been decomposed by means of an // \ref lapack_ldlt_decomposition. // // The functions fail if ... // // - ... the given system matrix is not a square matrix; // - ... the given \a uplo argument is neither 'L' nor 'U'; // - ... the sizes of the two given matrices do not match; // - ... the given system matrix is singular and not invertible. // // The first four functions report failure via the \c info argument, the fifth function throws a // \a std::invalid_argument exception in case of an error. // // // \n \subsection lapack_ldlh_linear_system_solver LDLH-based Linear System Solver // // The following functions provide an interface for the LAPACK functions \c shesv(), \c dhesv(), // \c chesv(), and \c zhesv(), which combine an \ref lapack_ldlh_decomposition and the according // \ref lapack_ldlh_substitution : \code namespace blaze { void hesv( char uplo, int n, int nrhs, complex<float>* A, int lda, int* ipiv, complex<float>* B, int ldb, complex<float>* work, int lwork, int* info ); void hesv( char uplo, int n, int nrhs, complex<double>* A, int lda, int* ipiv, complex<double>* B, int ldb, complex<double>* work, int lwork, int* info ); template< typename MT, bool SO, typename VT, bool TF > void hesv( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo, int* ipiv ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void hesv( DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char uplo, int* ipiv ); } // namespace blaze \endcode // If the function exits successfully, the vector \a b or the matrix \a B contain the // solution(s) of the linear system of equations and \a A has been decomposed by means of an // \ref lapack_ldlh_decomposition. // // The functions fail if ... // // - ... the given system matrix is not a square matrix; // - ... the given \a uplo argument is neither 'L' nor 'U'; // - ... the sizes of the two given matrices do not match; // - ... the given system matrix is singular and not invertible. // // The first two functions report failure via the \c info argument, the fifth function throws a // \a std::invalid_argument exception in case of an error. // // // \n \subsection lapack_llh_linear_system_solver Cholesky-based Linear System Solver // // The following functions provide an interface for the LAPACK functions \c sposv(), \c dposv(), // \c cposv(), and \c zposv(), which combine an \ref lapack_llh_decomposition and the according // \ref lapack_llh_substitution : \code namespace blaze { void posv( char uplo, int n, int nrhs, float* A, int lda, float* B, int ldb, int* info ); void posv( char uplo, int n, int nrhs, double* A, int lda, double* B, int ldb, int* info ); void posv( char uplo, int n, int nrhs, complex<float>* A, int lda, complex<float>* B, int ldb, int* info ); void posv( char uplo, int n, int nrhs, complex<double>* A, int lda, complex<double>* B, int ldb, int* info ); template< typename MT, bool SO, typename VT, bool TF > void posv( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void posv( DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char uplo ); } // namespace blaze \endcode // If the function exits successfully, the vector \a b or the matrix \a B contain the // solution(s) of the linear system of equations and \a A has been decomposed by means of an // \ref lapack_llh_decomposition. // // The functions fail if ... // // - ... the given system matrix is not a square matrix; // - ... the given \a uplo argument is neither 'L' nor 'U'; // - ... the sizes of the two given matrices do not match; // - ... the given system matrix is singular and not invertible. // // The first four functions report failure via the \c info argument, the fifth function throws a // \a std::invalid_argument exception in case of an error. // // // \n \subsection lapack_triangular_linear_system_solver Linear System Solver for Triangular Matrices // // The following functions provide an interface for the LAPACK functions \c strsv(), \c dtrsv(), // \c ctrsv(), and \c ztrsv(): \code namespace blaze { void trsv( char uplo, char trans, char diag, int n, const float* A, int lda, float* x, int incX ); void trsv( char uplo, char trans, char diag, int n, const double* A, int lda, double* x, int incX ); void trsv( char uplo, char trans, char diag, int n, const complex<float>* A, int lda, complex<float>* x, int incX ); void trsv( char uplo, char trans, char diag, int n, const complex<double>* A, int lda, complex<double>* x, int incX ); template< typename MT, bool SO, typename VT, bool TF > void trsv( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo, char trans, char diag ); } // namespace blaze \endcode // If the function exits successfully, the vector \a b or the matrix \a B contain the // solution(s) of the linear system of equations. // // The functions fail if ... // // - ... the given system matrix is not a square matrix; // - ... the given \a uplo argument is neither 'L' nor 'U'; // - ... the given \a trans argument is neither 'N' nor 'T' nor 'C'; // - ... the given \a diag argument is neither 'U' nor 'N'. // // The last function throws a \a std::invalid_argument exception in case of an error. Note that // none of the functions does perform any test for singularity or near-singularity. Such tests // must be performed prior to calling this function! // // // \n \section lapack_eigenvalues Eigenvalues/Eigenvectors // // \subsection lapack_eigenvalues_general General Matrices // // The following functions provide an interface for the LAPACK functions \c sgeev(), \c dgeev(), // \c cgeev(), and \c zgeev(), which compute the eigenvalues and optionally the eigenvectors of // the given general matrix: \code namespace blaze { void geev( char jobvl, char jobvr, int n, float* A, int lda, float* wr, float* wi, float* VL, int ldvl, float* VR, int ldvr, float* work, int lwork, int* info ); void geev( char jobvl, char jobvr, int n, double* A, int lda, double* wr, double* wi, double* VL, int ldvl, double* VR, int ldvr, double* work, int lwork, int* info ); void geev( char jobvl, char jobvr, int n, complex<float>* A, int lda, complex<float>* w, complex<float>* VL, int ldvl, complex<float>* VR, int ldvr, complex<float>* work, int lwork, float* rwork, int* info ); void geev( char jobvl, char jobvr, int n, complex<double>* A, int lda, complex<double>* w, complex<double>* VL, int ldvl, complex<double>* VR, int ldvr, complex<double>* work, int lwork, double* rwork, int* info ); template< typename MT, bool SO, typename VT, bool TF > void geev( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& w ); template< typename MT1, bool SO1, typename MT2, bool SO2, typename VT, bool TF > void geev( DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& VL, DenseVector<VT,TF>& w ); template< typename MT1, bool SO1, typename VT, bool TF, typename MT2, bool SO2 > void geev( DenseMatrix<MT1,SO1>& A, DenseVector<VT,TF>& w, DenseMatrix<MT2,SO2>& VR ); template< typename MT1, bool SO1, typename MT2, bool SO2, typename VT, bool TF, typename MT3, bool SO3 > void geev( DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& VL, DenseVector<VT,TF>& w, DenseMatrix<MT3,SO3>& VR ); } // namespace blaze \endcode // The complex eigenvalues of the given matrix \a A are returned in the given vector \a w. // Please note that no order of eigenvalues can be assumed, except that complex conjugate pairs // of eigenvalues appear consecutively with the eigenvalue having the positive imaginary part // first. // // If \a VR is provided as an argument, the right eigenvectors are returned in the rows of \a VR // in case \a VR is a row-major matrix and in the columns of \a VR in case \a VR is a column-major // matrix. The right eigenvector \f$v[j]\f$ of \a A satisfies \f[ A * v[j] = lambda[j] * v[j], \f] // where \f$lambda[j]\f$ is its eigenvalue. // // If \a VL is provided as an argument, the left eigenvectors are returned in the rows of \a VL // in case \a VL is a row-major matrix and in the columns of \a VL in case \a VL is a column-major // matrix. The left eigenvector \f$u[j]\f$ of \a A satisfies \f[ u[j]^{H} * A = lambda[j] * u[j]^{H}, \f] // where \f$u[j]^{H}\f$ denotes the conjugate transpose of \f$u[j]\f$. // // \a w, \a VL, and \a VR are resized to the correct dimensions (if possible and necessary). The // functions fail if ... // // - ... the given matrix \a A is not a square matrix; // - ... the given matrix \a VL is a fixed size matrix and the dimensions don't match; // - ... the given vector \a w is a fixed size vector and the size doesn't match; // - ... the given matrix \a VR is a fixed size matrix and the dimensions don't match; // - ... the eigenvalue computation fails. // // The first four functions report failure via the \c info argument, the last four functions throw // an exception in case of an error. // // // \n \subsection lapack_eigenvalues_symmetric Symmetric Matrices // // The following functions provide an interface for the LAPACK functions \c ssyev() and \c dsyev(), // which compute the eigenvalues and eigenvectors of the given symmetric matrix: \code namespace blaze { void syev( char jobz, char uplo, int n, float* A, int lda, float* w, float* work, int lwork, int* info ); void syev( char jobz, char uplo, int n, double* A, int lda, double* w, double* work, int lwork, int* info ); template< typename MT, bool SO, typename VT, bool TF > void syev( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& w, char jobz, char uplo ); } // namespace blaze \endcode // Alternatively, the following functions can be used, which provide an interface to the LAPACK // functions \c ssyevd() and \c dsyevd(). In contrast to the \c syev() functions they use a // divide-and-conquer strategy for the computation of the left and right eigenvectors: \code namespace blaze { void syevd( char jobz, char uplo, int n, float* A, int lda, float* w, float* work, int lwork, int* iwork, int liwork, int* info ); void syevd( char jobz, char uplo, int n, double* A, int lda, double* w, double* work, int lwork, int* iwork, int liwork, int* info ); template< typename MT, bool SO, typename VT, bool TF > void syevd( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& w, char jobz, char uplo ); } // namespace blaze \endcode // The real eigenvalues are returned in ascending order in the given vector \a w. \a w is resized // to the correct size (if possible and necessary). In case \a A is a row-major matrix, the left // eigenvectors are returned in the rows of \a A, in case \a A is a column-major matrix, the right // eigenvectors are returned in the columns of \a A. // // The functions fail if ... // // - ... the given matrix \a A is not a square matrix; // - ... the given vector \a w is a fixed size vector and the size doesn't match; // - ... the given \a jobz argument is neither \c 'V' nor \c 'N'; // - ... the given \a uplo argument is neither \c 'L' nor \c 'U'; // - ... the eigenvalue computation fails. // // The first two functions report failure via the \c info argument, the last function throws an // exception in case of an error. // // Via the following functions, which wrap the LAPACK functions \c ssyevx() and \c dsyevx(), it // is possible to compute a subset of eigenvalues and/or eigenvectors of a symmetric matrix: \code namespace blaze { void syevx( char jobz, char range, char uplo, int n, float* A, int lda, float vl, float vu, int il, int iu, float abstol, int* m, float* w, float* Z, int ldz, float* work, int lwork, int* iwork, int* ifail, int* info ); void syevx( char jobz, char range, char uplo, int n, double* A, int lda, double vl, double vu, int il, int iu, double abstol, int* m, double* w, double* Z, int ldz, double* work, int lwork, int* iwork, int* ifail, int* info ); template< typename MT, bool SO, typename VT, bool TF > size_t syevx( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& w, char uplo ); template< typename MT, bool SO, typename VT, bool TF, typename ST > size_t syevx( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& w, char uplo, ST low, ST upp ); template< typename MT1, bool SO1, typename VT, bool TF, typename MT2, bool SO2 > size_t syevx( DenseMatrix<MT1,SO1>& A, DenseVector<VT,TF>& w, DenseMatrix<MT2,SO2>& Z, char uplo ); template< typename MT1, bool SO1, typename VT, bool TF, typename MT2, bool SO2, typename ST > size_t syevx( DenseMatrix<MT1,SO1>& A, DenseVector<VT,TF>& w, DenseMatrix<MT2,SO2>& Z, char uplo, ST low, ST upp ); } // namespace blaze \endcode // The number of eigenvalues to be computed is specified by the lower bound \c low and the upper // bound \c upp, which either form an integral or a floating point range. // // In case \a low and \a upp are of integral type, the function computes all eigenvalues in the // index range \f$[low..upp]\f$. The \a num resulting real eigenvalues are stored in ascending // order in the given vector \a w, which is either resized (if possible) or expected to be a // \a num-dimensional vector. The eigenvectors are returned in the rows of \a Z in case \a Z is // row-major matrix and in the columns of \a Z in case \a Z is a column-major matrix. \a Z is // resized (if possible) or expected to be a \a num-by-\a n row-major matrix or a \a n-by-\a num // column-major matrix. // // In case \a low and \a upp are of floating point type, the function computes all eigenvalues // in the half-open interval \f$(low..upp]\f$. The resulting real eigenvalues are stored in // ascending order in the given vector \a w, which is either resized (if possible) or expected // to be an \a n-dimensional vector. The eigenvectors are returned in the rows of \a Z in case // \a Z is a row-major matrix and in the columns of \a Z in case \a Z is a column-major matrix. // \a Z is resized (if possible) or expected to be a \a n-by-\a n matrix. // // The functions fail if ... // // - ... the given matrix \a A is not a square matrix; // - ... the given vector \a w is a fixed size vector and the size doesn't match; // - ... the given matrix \a Z is a fixed size matrix and the dimensions don't match; // - ... the given \a uplo argument is neither \c 'L' nor \c 'U'; // - ... the eigenvalue computation fails. // // The first two functions report failure via the \c info argument, the last four functions throw // an exception in case of an error. // // // \n \subsection lapack_eigenvalues_hermitian Hermitian Matrices // // The following functions provide an interface for the LAPACK functions \c cheev() and \c zheev(), // which compute the eigenvalues and eigenvectors of the given Hermitian matrix: \code namespace blaze { void heev( char jobz, char uplo, int n, complex<float>* A, int lda, float* w, complex<float>* work, int lwork, float* rwork, int* info ); void heev( char jobz, char uplo, int n, complex<double>* A, int lda, double* w, complex<double>* work, int lwork, float* rwork, int* info ); template< typename MT, bool SO, typename VT, bool TF > void heev( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& w, char jobz, char uplo ); } // namespace blaze \endcode // Alternatively, the following functions can be used, which provide an interface to the LAPACK // functions \c cheevd() and \c zheevd(). In contrast to the \c heev() functions they use a // divide-and-conquer strategy for the computation of the left and right eigenvectors: \code namespace blaze { void heevd( char jobz, char uplo, int n, complex<float>* A, int lda, float* w, complex<float>* work, int lwork, float* rwork, int* lrwork, int* iwork, int* liwork, int* info ); void heevd( char jobz, char uplo, int n, complex<double>* A, int lda, double* w, complex<double>* work, int lwork, double* rwork, int lrwork, int* iwork, int* liwork, int* info ); template< typename MT, bool SO, typename VT, bool TF > void heevd( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& w, char jobz, char uplo ); } // namespace blaze \endcode // The real eigenvalues are returned in ascending order in the given vector \a w. \a w is resized // to the correct size (if possible and necessary). In case \a A is a row-major matrix, the left // eigenvectors are returned in the rows of \a A, in case \a A is a column-major matrix, the right // eigenvectors are returned in the columns of \a A. // // The functions fail if ... // // - ... the given matrix \a A is not a square matrix; // - ... the given vector \a w is a fixed size vector and the size doesn't match; // - ... the given \a jobz argument is neither \c 'V' nor \c 'N'; // - ... the given \a uplo argument is neither \c 'L' nor \c 'U'; // - ... the eigenvalue computation fails. // // The first two functions report failure via the \c info argument, the last function throws an // exception in case of an error. // // Via the following functions, which wrap the LAPACK functions \c cheevx() and \c zheevx(), it // is possible to compute a subset of eigenvalues and/or eigenvectors of an Hermitian matrix: \code namespace blaze { void heevx( char jobz, char range, char uplo, int n, complex<float>* A, int lda, float vl, float vu, int il, int iu, float abstol, int* m, float* w, complex<float>* Z, int ldz, complex<float>* work, int lwork, float* rwork, int* iwork, int* ifail, int* info ); void heevx( char jobz, char range, char uplo, int n, complex<double>* A, int lda, double vl, double vu, int il, int iu, double abstol, int* m, double* w, complex<double>* Z, int ldz, complex<double>* work, int lwork, double* rwork, int* iwork, int* ifail, int* info ); template< typename MT, bool SO, typename VT, bool TF > size_t heevx( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& w, char uplo ); template< typename MT, bool SO, typename VT, bool TF, typename ST > size_t heevx( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& w, char uplo, ST low, ST upp ); template< typename MT1, bool SO1, typename VT, bool TF, typename MT2, bool SO2 > size_t heevx( DenseMatrix<MT1,SO1>& A, DenseVector<VT,TF>& w, DenseMatrix<MT2,SO2>& Z, char uplo ); template< typename MT1, bool SO1, typename VT, bool TF, typename MT2, bool SO2, typename ST > size_t heevx( DenseMatrix<MT1,SO1>& A, DenseVector<VT,TF>& w, DenseMatrix<MT2,SO2>& Z, char uplo, ST low, ST upp ); } // namespace blaze \endcode // The number of eigenvalues to be computed is specified by the lower bound \c low and the upper // bound \c upp, which either form an integral or a floating point range. // // In case \a low and \a upp are of integral type, the function computes all eigenvalues in the // index range \f$[low..upp]\f$. The \a num resulting real eigenvalues are stored in ascending // order in the given vector \a w, which is either resized (if possible) or expected to be a // \a num-dimensional vector. The eigenvectors are returned in the rows of \a Z in case \a Z is // row-major matrix and in the columns of \a Z in case \a Z is a column-major matrix. \a Z is // resized (if possible) or expected to be a \a num-by-\a n row-major matrix or a \a n-by-\a num // column-major matrix. // // In case \a low and \a upp are of floating point type, the function computes all eigenvalues // in the half-open interval \f$(low..upp]\f$. The resulting real eigenvalues are stored in // ascending order in the given vector \a w, which is either resized (if possible) or expected // to be an \a n-dimensional vector. The eigenvectors are returned in the rows of \a Z in case // \a Z is a row-major matrix and in the columns of \a Z in case \a Z is a column-major matrix. // \a Z is resized (if possible) or expected to be a \a n-by-\a n matrix. // // The functions fail if ... // // - ... the given matrix \a A is not a square matrix; // - ... the given vector \a w is a fixed size vector and the size doesn't match; // - ... the given matrix \a Z is a fixed size matrix and the dimensions don't match; // - ... the given \a uplo argument is neither \c 'L' nor \c 'U'; // - ... the eigenvalue computation fails. // // The first two functions report failure via the \c info argument, the last four functions throw // an exception in case of an error. // // // \n \section lapack_singular_values Singular Values/Singular Vectors // // The following functions provide an interface for the LAPACK functions \c sgesvd(), \c dgesvd(), // \c cgesvd(), and \c zgesvd(), which perform a singular value decomposition (SVD) on the given // general matrix: \code namespace blaze { void gesvd( char jobu, char jobv, int m, int n, float* A, int lda, float* s, float* U, int ldu, float* V, int ldv, float* work, int lwork, int* info ); void gesvd( char jobu, char jobv, int m, int n, double* A, int lda, double* s, double* U, int ldu, double* V, int ldv, double* work, int lwork, int* info ); void gesvd( char jobu, char jobv, int m, int n, complex<float>* A, int lda, float* s, complex<float>* U, int ldu, complex<float>* V, int ldv, complex<float>* work, int lwork, float* rwork, int* info ); void gesvd( char jobu, char jobv, int m, int n, complex<double>* A, int lda, double* s, complex<double>* U, int ldu, complex<double>* V, int ldv, complex<double>* work, int lwork, double* rwork, int* info ); template< typename MT, bool SO, typename VT, bool TF > void gesvd( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& s, char jobu, char jobv ); template< typename MT1, bool SO, typename MT2, typename VT, bool TF > void gesvd( DenseMatrix<MT1,SO>& A, DenseMatrix<MT2,SO>& U, DenseVector<VT,TF>& s, char jobu, char jobv ); template< typename MT1, bool SO, typename VT, bool TF, typename MT2 > void gesvd( DenseMatrix<MT1,SO>& A, DenseVector<VT,TF>& s, DenseMatrix<MT2,SO>& V, char jobu, char jobv ); template< typename MT1, bool SO, typename MT2, typename VT, bool TF, typename MT3 > void gesvd( DenseMatrix<MT1,SO>& A, DenseMatrix<MT2,SO>& U, DenseVector<VT,TF>& s, DenseMatrix<MT3,SO>& V, char jobu, char jobv ); } // namespace blaze \endcode // Alternatively, the following functions can be used, which provide an interface to the LAPACK // functions \c sgesdd(), \c dgesdd(), \c cgesdd(), and \c zgesdd(). In contrast to the \c gesvd() // functions they compute the singular value decomposition (SVD) of the given general matrix by // applying a divide-and-conquer strategy for the computation of the left and right singular // vectors: \code namespace blaze { void gesdd( char jobz, int m, int n, float* A, int lda, float* s, float* U, int ldu, float* V, int ldv, float* work, int lwork, int* iwork, int* info ); void gesdd( char jobz, int m, int n, double* A, int lda, double* s, double* U, int ldu, double* V, int ldv, double* work, int lwork, int* iwork, int* info ); void gesdd( char jobz, int m, int n, complex<float>* A, int lda, float* s, complex<float>* U, int ldu, complex<float>* V, int ldv, complex<float>* work, int lwork, float* rwork, int* iwork, int* info ); void gesdd( char jobz, int m, int n, complex<double>* A, int lda, double* s, complex<double>* U, int ldu, complex<double>* V, int ldv, complex<double>* work, int lwork, double* rwork, int* iwork, int* info ); template< typename MT, bool SO, typename VT, bool TF > void gesdd( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& s ); template< typename MT1, bool SO, typename MT2, typename VT, bool TF > void gesdd( DenseMatrix<MT1,SO>& A, DenseMatrix<MT2,SO>& U, DenseVector<VT,TF>& s, char jobz ); template< typename MT1, bool SO, typename MT2, typename VT, bool TF > void gesdd( DenseMatrix<MT1,SO>& A, DenseVector<VT,TF>& s, DenseMatrix<MT2,SO>& V, char jobz ); template< typename MT1, bool SO, typename MT2, typename VT, bool TF, typename MT3 > void gesdd( DenseMatrix<MT1,SO>& A, DenseMatrix<MT2,SO>& U, DenseVector<VT,TF>& s, DenseMatrix<MT3,SO>& V, char jobz ); } // namespace blaze \endcode // The resulting decomposition has the form \f[ A = U \cdot S \cdot V, \f] // where \a S is a \a m-by-\a n matrix, which is zero except for its min(\a m,\a n) diagonal // elements, \a U is an \a m-by-\a m orthogonal matrix, and \a V is a \a n-by-\a n orthogonal // matrix. The diagonal elements of \a S are the singular values of \a A, the first min(\a m,\a n) // columns of \a U and rows of \a V are the left and right singular vectors of \a A, respectively. // // The resulting min(\a m,\a n) real and non-negative singular values are returned in descending // order in the vector \a s, which is resized to the correct size (if possible and necessary). // // Via the following functions, which wrap the LAPACK functions \c sgesvdx(), \c dgesvdx(), // \c cgesvdx(), and \c zgesvdx(), it is possible to compute a subset of singular values and/or // vectors: \code namespace blaze { void gesvdx( char jobu, char jobv, char range, int m, int n, float* A, int lda, float vl, float vu, int il, int iu, int* ns, float* s, float* U, int ldu, float* V, int ldv, float* work, int lwork, int* iwork, int* info ); void gesvdx( char jobu, char jobv, char range, int m, int n, double* A, int lda, double vl, double vu, int il, int iu, int* ns, double* s, double* U, int ldu, double* V, int ldv, double* work, int lwork, int* iwork, int* info ); void gesvdx( char jobu, char jobv, char range, int m, int n, complex<float>* A, int lda, float vl, float vu, int il, int iu, int* ns, float* s, complex<float>* U, int ldu, complex<float>* V, int ldv, complex<float>* work, int lwork, float* rwork, int* iwork, int* info ); void gesvdx( char jobu, char jobv, char range, int m, int n, complex<double>* A, int lda, double vl, double vu, int il, int iu, int* ns, double* s, complex<double>* U, int ldu, complex<double>* V, int ldv, complex<double>* work, int lwork, double* rwork, int* iwork, int* info ); template< typename MT, bool SO, typename VT, bool TF > size_t gesvdx( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& s ); template< typename MT, bool SO, typename VT, bool TF, typename ST > size_t gesvdx( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& s, ST low, ST upp ); template< typename MT1, bool SO, typename MT2, typename VT, bool TF > size_t gesvdx( DenseMatrix<MT1,SO>& A, DenseMatrix<MT2,SO>& U, DenseVector<VT,TF>& s ); template< typename MT1, bool SO, typename MT2, typename VT, bool TF, typename ST > size_t gesvdx( DenseMatrix<MT1,SO>& A, DenseMatrix<MT2,SO>& U, DenseVector<VT,TF>& s, ST low, ST upp ); template< typename MT1, bool SO, typename VT, bool TF, typename MT2 > size_t gesvdx( DenseMatrix<MT1,SO>& A, DenseVector<VT,TF>& s, DenseMatrix<MT2,SO>& V ); template< typename MT1, bool SO, typename VT, bool TF, typename MT2, typename ST > size_t gesvdx( DenseMatrix<MT1,SO>& A, DenseVector<VT,TF>& s, DenseMatrix<MT2,SO>& V, ST low, ST upp ); template< typename MT1, bool SO, typename MT2, typename VT, bool TF, typename MT3 > size_t gesvdx( DenseMatrix<MT1,SO>& A, DenseMatrix<MT2,SO>& U, DenseVector<VT,TF>& s, DenseMatrix<MT3,SO>& V ); template< typename MT1, bool SO, typename MT2, typename VT, bool TF, typename MT3, typename ST > size_t gesvdx( DenseMatrix<MT1,SO>& A, DenseMatrix<MT2,SO>& U, DenseVector<VT,TF>& s, DenseMatrix<MT3,SO>& V, ST low, ST upp ); } // namespace blaze \endcode // The number of singular values to be computed is specified by the lower bound \a low and the // upper bound \a upp, which either form an integral or a floating point range. // // In case \a low and \a upp form are of integral type, the function computes all singular values // in the index range \f$[low..upp]\f$. The \a num resulting real and non-negative singular values // are stored in descending order in the given vector \a s, which is either resized (if possible) // or expected to be a \a num-dimensional vector. The resulting left singular vectors are stored // in the given matrix \a U, which is either resized (if possible) or expected to be a // \a m-by-\a num matrix. The resulting right singular vectors are stored in the given matrix \a V, // which is either resized (if possible) or expected to be a \a num-by-\a n matrix. // // In case \a low and \a upp are of floating point type, the function computes all singular values // in the half-open interval \f$(low..upp]\f$. The resulting real and non-negative singular values // are stored in descending order in the given vector \a s, which is either resized (if possible) // or expected to be a min(\a m,\a n)-dimensional vector. The resulting left singular vectors are // stored in the given matrix \a U, which is either resized (if possible) or expected to be a // \a m-by-min(\a m,\a n) matrix. The resulting right singular vectors are stored in the given // matrix \a V, which is either resized (if possible) or expected to be a min(\a m,\a n)-by-\a n // matrix. // // The functions fail if ... // // - ... the given matrix \a U is a fixed size matrix and the dimensions don't match; // - ... the given vector \a s is a fixed size vector and the size doesn't match; // - ... the given matrix \a V is a fixed size matrix and the dimensions don't match; // - ... the given scalar values don't form a proper range; // - ... the singular value decomposition fails. // // The first four functions report failure via the \c info argument, the remaining functions throw // an exception in case of an error. // // // \n Previous: \ref blas_functions &nbsp; &nbsp; Next: \ref block_vectors_and_matrices \n */ //************************************************************************************************* //**Block Vectors and Matrices********************************************************************* /*!\page block_vectors_and_matrices Block Vectors and Matrices // // \tableofcontents // // // \n \section block_vectors_and_matrices_general General Concepts // <hr> // // In addition to fundamental element types, the \b Blaze library supports vectors and matrices // with non-fundamental element type. For instance, it is possible to define block matrices by // using a matrix type as the element type: \code using blaze::DynamicMatrix; using blaze::DynamicVector; using blaze::rowMajor; using blaze::columnVector; DynamicMatrix< DynamicMatrix<double,rowMajor>, rowMajor > A; DynamicVector< DynamicVector<double,columnVector >, columnVector > x, y; // ... Resizing and initialization y = A * x; \endcode // The matrix/vector multiplication in this example runs fully parallel and uses vectorization // for every inner matrix/vector multiplication and vector addition. // // // \n \section block_vectors_and_matrices_pitfalls Pitfalls // <hr> // // The only thing to keep in mind when using non-fundamental element types is that all operations // between the elements have to be well defined. More specifically, the size of vector and matrix // elements has to match. The attempt to combine two non-matching elements results in either a // compilation error (in case of statically sized elements) or an exception (for dynamically sized // elements): \code DynamicVector< StaticVector<int,2UL> > a; DynamicVector< StaticVector<int,3UL> > b; DynamicVector< DynamicVector<int> > c( a + b ); // Compilation error: element size doesn't match \endcode // Therefore please don't forget that dynamically sized elements (e.g. \c blaze::DynamicVector, // \c blaze::HybridVector, \c blaze::DynamicMatrix, \c blaze::HybridMatrix, ...) need to be sized // accordingly upfront. // // // \n \section block_vectors_and_matrices_examples Examples // <hr> // // The first example demonstrates the multiplication between a statically sized block matrix // and a block vector: \code using namespace blaze; // ( ( 1 1 ) ( 2 2 ) ) ( ( 1 ) ) ( ( 10 ) ) // ( ( 1 1 ) ( 2 2 ) ) ( ( 1 ) ) ( ( 10 ) ) // ( ) * ( ) = ( ) // ( ( 3 3 ) ( 4 4 ) ) ( ( 2 ) ) ( ( 22 ) ) // ( ( 3 3 ) ( 4 4 ) ) ( ( 2 ) ) ( ( 22 ) ) using M2x2 = StaticMatrix<int,2UL,2UL,rowMajor>; using V2 = StaticVector<int,2UL,columnVector>; DynamicMatrix<M2x2,rowMajor> A{ { M2x2(1), M2x2(2) }, { M2x2(3), M2x2(4) } }; DynamicVector<V2,columnVector> x{ V2(1), V2(2) }; DynamicVector<V2,columnVector> y( A * x ); \endcode // The second example shows the multiplication between a compressed block matrix with blocks of // varying size and a compressed block vector: \code using namespace blaze; // ( ( 1 -2 3 ) ( 5 -1 ) ) ( ( 1 ) ) ( ( -3 ) ) // ( ( 4 1 0 ) ( 1 2 ) ) ( ( 0 ) ) ( ( 7 ) ) // ( ( 0 2 4 ) ( 3 1 ) ) ( ( 1 ) ) ( ( 3 ) ) // ( ) ( ) ( ) // ( ( 1 ) ) * ( ( 2 ) ) = ( ( 2 ) ) // ( ) ( ) ( ) // ( ( 0 -1 1 ) ( 1 0 ) ) ( ( -1 ) ) ( ( 0 ) ) // ( ( 2 -1 2 ) ( 0 1 ) ) ( ( 2 ) ) ( ( 6 ) ) using M3x3 = HybridMatrix<int,3UL,3UL,rowMajor>; using V3 = HybridVector<int,3UL,columnVector>; CompressedMatrix<M3x3,rowMajor> A( 3UL, 3UL, 5UL ); A(0,0) = M3x3{ { 1, -2, 3 }, { 4, 1, 0 }, { 0, 2, 4 } }; A(0,2) = M3x3{ { 5, -1 }, { 1, 2 }, { 3, 1 } }; A(1,1) = M3x3{ { 1 } }; A(2,0) = M3x3{ { 0, -1, 1 }, { 2, -1, 2 } }; A(2,2) = M3x3{ { 1, 0 }, { 0, 1 } }; CompressedVector<V3,columnVector> x( 3UL, 3UL ); x[0] = V3{ 1, 0, 1 }; x[1] = V3{ 2 }; x[2] = V3{ -1, 2 }; CompressedVector<V3,columnVector> y( A * x ); \endcode // \n Previous: \ref lapack_functions &nbsp; &nbsp; Next: \ref intra_statement_optimization \n */ //************************************************************************************************* //**Intra-Statement Optimization******************************************************************* /*!\page intra_statement_optimization Intra-Statement Optimization // // One of the prime features of the \b Blaze library is the automatic intra-statement optimization. // In order to optimize the overall performance of every single statement \b Blaze attempts to // rearrange the operands based on their types. For instance, the following addition of dense and // sparse vectors \code blaze::DynamicVector<double> d1, d2, d3; blaze::CompressedVector<double> s1; // ... Resizing and initialization d3 = d1 + s1 + d2; \endcode // is automatically rearranged and evaluated as \code // ... d3 = d1 + d2 + s1; // <- Note that s1 and d2 have been rearranged \endcode // This order of operands is highly favorable for the overall performance since the addition of // the two dense vectors \c d1 and \c d2 can be handled much more efficiently in a vectorized // fashion. // // This intra-statement optimization can have a tremendous effect on the performance of a statement. // Consider for instance the following computation: \code blaze::DynamicMatrix<double> A, B; blaze::DynamicVector<double> x, y; // ... Resizing and initialization y = A * B * x; \endcode // Since multiplications are evaluated from left to right, this statement would result in a // matrix/matrix multiplication, followed by a matrix/vector multiplication. However, if the // right subexpression is evaluated first, the performance can be dramatically improved since the // matrix/matrix multiplication can be avoided in favor of a second matrix/vector multiplication. // The \b Blaze library exploits this by automatically restructuring the expression such that the // right multiplication is evaluated first: \code // ... y = A * ( B * x ); \endcode // Note however that although this intra-statement optimization may result in a measurable or // even significant performance improvement, this behavior may be undesirable for several reasons, // for instance because of numerical stability. Therefore, in case the order of evaluation matters, // the best solution is to be explicit and to separate a statement into several statements: \code blaze::DynamicVector<double> d1, d2, d3; blaze::CompressedVector<double> s1; // ... Resizing and initialization d3 = d1 + s1; // Compute the dense vector/sparse vector addition first ... d3 += d2; // ... and afterwards add the second dense vector \endcode \code // ... blaze::DynamicMatrix<double> A, B, C; blaze::DynamicVector<double> x, y; // ... Resizing and initialization C = A * B; // Compute the left-hand side matrix-matrix multiplication first ... y = C * x; // ... before the right-hand side matrix-vector multiplication \endcode // Alternatively, it is also possible to use the \c eval() function to fix the order of evaluation: \code blaze::DynamicVector<double> d1, d2, d3; blaze::CompressedVector<double> s1; // ... Resizing and initialization d3 = d1 + eval( s1 + d2 ); \endcode \code blaze::DynamicMatrix<double> A, B; blaze::DynamicVector<double> x, y; // ... Resizing and initialization y = eval( A * B ) * x; \endcode // \n Previous: \ref block_vectors_and_matrices &nbsp; &nbsp; Next: \ref faq \n */ //************************************************************************************************* //**FAQ******************************************************************************************** /*!\page faq Frequently Asked Questions (FAQ) // // \tableofcontents // // // <hr> // \section faq_padding A StaticVector/StaticMatrix is larger than expected. Is this a bug? // // The size of a \c StaticVector, \c StaticMatrix, \c HybridVector, or \c HybridMatrix can // indeed be larger than expected: \code StaticVector<int,3> a; StaticMatrix<int,3,3> A; sizeof( a ); // Evaluates to 16, 32, or even 64, but not 12 sizeof( A ); // Evaluates to 48, 96, or even 144, but not 36 \endcode // In order to achieve the maximum possible performance the \b Blaze library tries to enable // SIMD vectorization even for small vectors. For that reason \b Blaze by default uses padding // elements for all dense vectors and matrices to guarantee that at least a single SIMD vector // can be loaded. Depending on the used SIMD technology that can significantly increase the size // of a \c StaticVector, \c StaticMatrix, \c HybridVector or \c HybridMatrix: \code StaticVector<int,3> a; StaticMatrix<int,3,3> A; sizeof( a ); // Evaluates to 16 in case of SSE, 32 in case of AVX, and 64 in case of AVX-512 // (under the assumption that an integer occupies 4 bytes) sizeof( A ); // Evaluates to 48 in case of SSE, 96 in case of AVX, and 144 in case of AVX-512 // (under the assumption that an integer occupies 4 bytes) \endcode // The configuration file <tt>./blaze/config/Optimizations.h</tt> provides a compile time switch // that can be used to (de-)activate padding: \code #define BLAZE_USE_PADDING 1 \endcode // Alternatively it is possible to (de-)activate padding via command line or by defining this // symbol manually before including any \b Blaze header file: \code #define BLAZE_USE_PADDING 1 #include <blaze/Blaze.h> \endcode // If \c BLAZE_USE_PADDING is set to 1 padding is enabled for all dense vectors and matrices, if // it is set to 0 padding is disabled. Note however that disabling padding can considerably reduce // the performance of all dense vector and matrix operations! // // // <hr> // \section faq_alignment Despite disabling padding, a StaticVector/StaticMatrix is still larger than expected. Is this a bug? // // Despite disabling padding via the \c BLAZE_USE_PADDING compile time switch (see \ref faq_padding), // the size of a \c StaticVector, \c StaticMatrix, \c HybridVector, or \c HybridMatrix can still // be larger than expected: \code #define BLAZE_USE_PADDING 1 #include <blaze/Blaze.h> StaticVector<int,3> a; StaticVector<int,5> b; sizeof( a ); // Always evaluates to 12 sizeof( b ); // Evaluates to 32 with SSE (larger than expected) and to 20 with AVX or AVX-512 (expected) \endcode // The reason for this behavior is the used SIMD technology. If SSE is used, which provides 128 // bit wide registers, a single SIMD pack can usually hold 4 integers (128 bit divided by 32 bit). // Since the second vector contains enough elements is possible to benefit from vectorization. // However, SSE requires an alignment of 16 bytes, which ultimately results in a total size of // 32 bytes for the \c StaticVector (2 times 16 bytes due to 5 integer elements). If AVX or AVX-512 // is used, which provide 256 bit or 512 bit wide registers, a single SIMD vector can hold 8 or 16 // integers, respectively. Even the second vector does not hold enough elements to benefit from // vectorization, which is why \b Blaze does not enforce a 32 byte (for AVX) or even 64 byte // alignment (for AVX-512). // // It is possible to disable the vectorization entirely by the compile time switch in the // <tt>./blaze/config/Vectorization.h</tt> configuration file: \code #define BLAZE_USE_VECTORIZATION 1 \endcode // It is also possible to (de-)activate vectorization via command line or by defining this symbol // manually before including any \b Blaze header file: \code #define BLAZE_USE_VECTORIZATION 1 #include <blaze/Blaze.h> \endcode // In case the switch is set to 1, vectorization is enabled and the \b Blaze library is allowed // to use intrinsics and the necessary alignment to speed up computations. In case the switch is // set to 0, vectorization is disabled entirely and the \b Blaze library chooses default, // non-vectorized functionality for the operations. Note that deactivating the vectorization may // pose a severe performance limitation for a large number of operations! // // // <hr> // \section faq_blas To which extend does Blaze make use of BLAS functions under the hood? // // Currently the only BLAS functions that are utilized by \b Blaze are the \c gemm() functions // for the multiplication of two dense matrices (i.e. \c sgemm(), \c dgemm(), \c cgemm(), and // \c zgemm()). All other operations are always and unconditionally performed by native \b Blaze // kernels. // // The \c BLAZE_BLAS_MODE config switch (see <tt>./blaze/config/BLAS.h</tt>) determines whether // \b Blaze is allowed to use BLAS kernels. If \c BLAZE_BLAS_MODE is set to 0 then \b Blaze // does not utilize the BLAS kernels and unconditionally uses its own custom kernels. If // \c BLAZE_BLAS_MODE is set to 1 then \b Blaze is allowed to choose between using BLAS kernels // or its own custom kernels. In case of the dense matrix multiplication this decision is based // on the size of the dense matrices. For large matrices, \b Blaze uses the BLAS kernels, for // small matrices it uses its own custom kernels. The threshold for this decision can be // configured via the \c BLAZE_DMATDMATMULT_THRESHOLD, \c BLAZE_DMATTDMATMULT_THRESHOLD, // \c BLAZE_TDMATDMATMULT_THRESHOLD and \c BLAZE_TDMATTDMATMULT_THRESHOLD config switches // (see <tt>./blaze/config/Thresholds.h</tt>). // // Please note that the extend to which \b Blaze uses BLAS kernels can change in future releases // of \b Blaze! // // // <hr> // \section faq_lapack To which extend does Blaze make use of LAPACK functions under the hood? // // \b Blaze uses LAPACK functions for matrix decomposition, matrix inversion, computing the // determinants and eigenvalues, and the SVD. In contrast to the BLAS functionality (see // \ref faq_blas), you cannot disable LAPACK or switch to custom kernels. In case you try to // use any of these functionalities, but do not provide (i.e. link) a LAPACK library you will // get link time errors. // // Please note that the extend to which \b Blaze uses LAPACK kernels can change in future releases // of \b Blaze! // // // <hr> // \section faq_compile_times The compile time is too high if I include <blaze/Blaze.h>. Can I reduce it? // // The include file <tt><blaze/Blaze.h></tt> includes the entire functionality of the \b Blaze // library, which by now is several hundred thousand lines of source code. That means that a lot // of source code has to be parsed whenever <tt><blaze/Blaze.h></tt> is encountered. However, it // is rare that everything is required within a single compilation unit. Therefore it is easily // possible to reduce compile times by including only those \b Blaze features that are used within // the compilation unit. For instance, instead of including <tt><blaze/Blaze.h></tt> it could be // enough to include <tt><blaze/math/DynamicVector.h></tt>, which would reduce the compilation // times by about 20%. // // Additionally we are taking care to implement new \b Blaze functionality such that compile times // do not explode and try to reduce the compile times of existing features. Thus newer releases of // \b Blaze can also improve compile times. // // \n Previous: \ref intra_statement_optimization &nbsp; &nbsp; Next: \ref issue_creation_guidelines \n */ //************************************************************************************************* //**FAQ******************************************************************************************** /*!\page issue_creation_guidelines Issue Creation Guidelines // // \tableofcontents // // // One of the most important aspects of the \b Blaze project is the // <a href="https://bitbucket.org/blaze-lib/blaze/issues">issue management</a> on the official // \b Blaze Bitbucket page. We cordially invite all \b Blaze users to submit feature requests // and bug reports, as we believe that this is a significant part of making \b Blaze a better // library. However, we are asking to follow a small set of guidelines when creating an issue // to facilitate the issue management on our side and also to make issues more useful for users // of \b Blaze. // // // <hr> // \section issues_title Title // // The title is the most important detail of an issue. A well chosen title makes it easy to grasp // the idea of an issue and improves the discoverability. Therefore, please choose a title that // is ... // // - ... as descriptive as possible; // - ... as concise as possible; // - ... as unambiguous as possible. // // Also, please create a separate issue for each idea/problem/etc. A very general title or an // \"and\" in the title could be an indication that the issue is not specific enough and should // be split into several issues. // // \subsection issues_title_good_examples Good Examples // // - \"Provide support for AVX-512 SIMD operations\" // - \"Add support for the Boost Multiprecision Library\" // - \"Introduce reduction operations into Blaze\" // - \"Compilation error on KNL with -march=knl\" // // \subsection issues_title_bad_examples Bad Examples // // - \"Several requests\" (instead create separate issues for each single request) // - \"Improve the performance\" (instead specify which operation should perform better) // - \"Blaze library compilation error\" (instead try to be more specific) // // // <hr> // \section issues_description Description // // The description should help us to understand your idea or problem in as much detail as possible. // Also, it helps to clearly spell out your expectations (how a feature is supposed to work, how // the behavior should be, etc.). Please spend a couple of minutes to try to make the description // as comprehensive as possible. // // // <hr> // \section issues_assignee Assignee // // There is no need to assign the issue to a particular person. It is perfectly ok if you just // ignore this setting. // // // <hr> // \section issues_kind Kind of Issue // // There are four kinds of issues available in the Bitbucket issue tracker: \ref issues_kind_bug, // \ref issues_kind_enhancement, \ref issues_kind_proposal, and \ref issues_kind_task. In the // following we try to give guidelines on which kind to choose for a particular issue: // // \subsection issues_kind_bug Bug // // Please choose the category \ref issues_kind_bug if ... // // - ... you experience a compilation error despite your best efforts to get it right; // - ... you experience a crash/failure despite your best efforts to get it right; // - ... you experience problems when combining features; // - ... a feature does not work as specified/documented (i.e. can be considered broken). // // Please \b don't choose the category \ref issues_kind_bug if ... // // - ... you feel a feature should work differently than it currently does (instead create a // \ref issues_kind_proposal with a convincing title and description); // - ... you are not sure how to use a feature (instead create an \ref issues_kind_enhancement // issue to extend the documentation); // - ... you are missing a feature (instead create a \ref issues_kind_proposal or // \ref issues_kind_enhancement issue). // // If you select the category \ref issues_kind_bug, please also try to provide a minimum example // that fails. That helps us to minimize the time to resolve the bug. // // As we try to keep \b Blaze bug-free, we will always prioritize bug issues. However, we will // also quickly close bug issues as \"wontfix\" if the described issue is not a bug (i.e. one of // the problems mentioned above). We will \b not relabel a bug issue to \ref issues_kind_enhancement // or \ref issues_kind_proposal, even if they would be reasonable extensions to \b Blaze. // // \subsection issues_kind_enhancement Enhancement // // Please choose the category \ref issues_kind_enhancement if ... // // - ... you need an add-on to an existing feature; // - ... you need an extension of an existing feature; // - ... you need an extended documentation for an existing feature. // // \ref issues_kind_enhancement is very similar to \ref issues_kind_proposal, so we don't mind // if an \ref issues_kind_enhancement is labeled as a \ref issues_kind_proposal or vice versa. // Just make sure you don't request an extension or new feature as a \ref issues_kind_bug. // // \subsection issues_kind_proposal Proposal // // Please choose the category \ref issues_kind_proposal if ... // // - ... you want to request a new feature; // - ... you want to change an existing feature. // // \ref issues_kind_proposal is very similar to \ref issues_kind_enhancement, so we don't mind if // a \ref issues_kind_proposal is labeled as an \ref issues_kind_enhancement or vice versa. Just // make sure you don't request an extension or new feature as a \ref issues_kind_bug. // // \subsection issues_kind_task Task // // Please choose the category \ref issues_kind_task if ... // // - ... you want us to do something not feature related; // - ... you have something else in mind which does not fall in the other three categories. // // // <hr> // \section issues_priority Priority // // Via the priority of an issue you can tell us how important the issue is to you. Therefore the // priority can have an influence on when we will deal with the issue. However, unfortunately we // don't have an infinite amount of time and we can not deal with an arbitrary amount of issues // at the same time. We will therefore take the priority into account, but mainly schedule the // issues based on impact to all \b Blaze users and the estimated time to resolve it. // // You can choose between \ref issues_priority_blocker, \ref issues_priority_critical, // \ref issues_priority_major, \ref issues_priority_minor, and \ref issues_priority_trivial. // // \subsection issues_priority_blocker Blocker // // Please choose a \ref issues_priority_blocker priority if ... // // - ... you cannot work with \b Blaze due to the described \ref issues_kind_bug; // - ... the \ref issues_kind_bug likely has an influence on \b all \b Blaze users. // // Please note that the categories \ref issues_kind_enhancement or \ref issues_kind_proposal // should never be a \ref issues_priority_blocker! // // \subsection issues_priority_critical Critical // // Please choose a \ref issues_priority_critical priority if ... // // - ... you can work around a \ref issues_kind_bug, but the workaround is (much) slower or awful; // - ... you cannot use \b Blaze without the proposed feature; // - ... you consider it to be essential for \b all \b Blaze users. // // \subsection issues_priority_major Major // // Please choose a \ref issues_priority_major priority if ... // // - ... a \ref issues_kind_bug or feature request is not \ref issues_priority_critical, but // still very important to you; // - ... you consider it to have a \ref issues_priority_major impact on most \b Blaze users. // // The \ref issues_priority_major category is the default setting in Bitbucket and we therefore // consider it as the default priority for issues. // // \subsection issues_priority_minor Minor // // Please choose a \ref issues_priority_minor priority if ... // // - ... a \ref issues_kind_bug does not affect many \b Blaze users; // - ... a feature request would only be useful for a small number of \b Blaze users; // - ... a feature would be nice to have, but is not particularly important. // // \subsection issues_priority_trivial Trivial // // Please choose a \ref issues_priority_trivial priority if ... // // - ... a \ref issues_kind_bug hardly affects anyone; // - ... a feature request would only be useful for very few \b Blaze users; // - ... the expected time to resolve an issue is very small. // // // <hr> // \section issues_attachment Attachments // // You can always provide us with additional information in the form of attachments. Feel free // to attach something to the issue if ... // // - ... it can help us to analyze a \ref issues_kind_bug; // - ... you have some source code that demonstrates a problem; // - ... you already have a working prototype that sketches the idea; // - ... you have additional resources that could help us. // // We appreciate anything that simplifies our work and speeds up our progress. // // \n Previous: \ref faq &nbsp; &nbsp; Next: \ref blaze_references \n */ //************************************************************************************************* //**Blaze References******************************************************************************* /*!\page blaze_references Blaze References // // In case you need references to the \b Blaze library (for papers or other publications), please // feel free to use one of the following references: \code @misc{blazelib, author = "Klaus {Iglberger}", title = "Blaze C++ Linear Algebra Library", howpublished = "https://bitbucket.org/blaze-lib", year = 2012 } \endcode \code @article{iglberger2012_1, author = "Klaus {Iglberger} and Georg {Hager} and Jan {Treibig} and Ulrich {R{\"u}de}", title = "Expression Templates Revisited: A Performance Analysis of Current Methodologies", journal = "SIAM Journal on Scientific Computing", year = 2012, volume = 34(2), pages = C42--C69 } \endcode \code @inproceedings{iglberger2012_2, author = "Klaus {Iglberger} and Georg {Hager} and Jan {Treibig} and Ulrich {R{\"u}de}", title = "High Performance Smart Expression Template Math Libraries", booktitle = "Proceedings of the 2nd International Workshop on New Algorithms and Programming Models for the Manycore Era (APMM 2012) at HPCS 2012", year = 2012 } \endcode // \n Previous: \ref issue_creation_guidelines */ //************************************************************************************************* #endif
convolution_3x3_pack4.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv3x3s1_winograd64_transform_kernel_pack4_neon(const Mat& kernel, Mat& kernel_tm_pack4, int inch, int outch) { // winograd23 transform kernel Mat kernel_tm; kernel_tm.create(8*8, inch, outch); const float ktm[8][3] = { { 1.0f, 0.0f, 0.0f}, {-2.0f/9, -2.0f/9, -2.0f/9}, {-2.0f/9, 2.0f/9, -2.0f/9}, {1.0f/90, 1.0f/45, 2.0f/45}, {1.0f/90, -1.0f/45, 2.0f/45}, {1.0f/45, 1.0f/90, 1.0f/180}, {1.0f/45, -1.0f/90, 1.0f/180}, { 0.0f, 0.0f, 1.0f} }; #pragma omp parallel for for (int p = 0; p<outch; p++) { for (int q = 0; q<inch; q++) { const float* kernel0 = (const float*)kernel + p*inch * 9 + q * 9; float* kernel_tm0 = kernel_tm.channel(p).row(q); // transform kernel, transposed const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; // h float tmp[8][3]; for (int i=0; i<8; i++) { tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // v for (int j=0; j<8; j++) { float* tmpp = &tmp[j][0]; for (int i=0; i<8; i++) { kernel_tm0[j*8 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } // interleave // src = 64-inch-outch // dst = 4b-4a-inch/4a-64-outch/4b; kernel_tm_pack4.create(inch/4, 64, outch/4, (size_t)4u*16, 16); for (int q=0; q+3<outch; q+=4) { const Mat k0 = kernel_tm.channel(q); const Mat k1 = kernel_tm.channel(q+1); const Mat k2 = kernel_tm.channel(q+2); const Mat k3 = kernel_tm.channel(q+3); Mat g0 = kernel_tm_pack4.channel(q/4); for (int k=0; k<64; k++) { float* g00 = g0.row(k); for (int p=0; p+3<inch; p+=4) { const float* k00 = k0.row(p); const float* k01 = k0.row(p+1); const float* k02 = k0.row(p+2); const float* k03 = k0.row(p+3); const float* k10 = k1.row(p); const float* k11 = k1.row(p+1); const float* k12 = k1.row(p+2); const float* k13 = k1.row(p+3); const float* k20 = k2.row(p); const float* k21 = k2.row(p+1); const float* k22 = k2.row(p+2); const float* k23 = k2.row(p+3); const float* k30 = k3.row(p); const float* k31 = k3.row(p+1); const float* k32 = k3.row(p+2); const float* k33 = k3.row(p+3); g00[0] = k00[k]; g00[1] = k10[k]; g00[2] = k20[k]; g00[3] = k30[k]; g00[4] = k01[k]; g00[5] = k11[k]; g00[6] = k21[k]; g00[7] = k31[k]; g00[8] = k02[k]; g00[9] = k12[k]; g00[10] = k22[k]; g00[11] = k32[k]; g00[12] = k03[k]; g00[13] = k13[k]; g00[14] = k23[k]; g00[15] = k33[k]; g00 += 16; } } } } static void conv3x3s1_winograd64_pack4_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; // pad to 6n+2 Mat bottom_blob_bordered = bottom_blob; outw = (outw + 5) / 6 * 6; outh = (outh + 5) / 6 * 6; w = outw + 2; h = outh + 2; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt); const float* bias = _bias; // BEGIN transform input Mat bottom_blob_tm; { int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = w_tm/8 * h_tm/8; bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator); // const float itm[8][8] = { // {1.0f, 0.0f, -5.25f, 0.00f, 5.25f, 0.00f, -1.0f, 0.0f}, // // {0.0f, 1.0f, 1.00f, -4.25f, -4.25f, 1.00f, 1.0f, 0.0f}, // {0.0f, -1.0f, 1.00f, 4.25f, -4.25f, -1.00f, 1.0f, 0.0f}, // // {0.0f, 0.5f, 0.25f, -2.50f, -1.25f, 2.00f, 1.0f, 0.0f}, // {0.0f, -0.5f, 0.25f, 2.50f, -1.25f, -2.00f, 1.0f, 0.0f}, // // {0.0f, 2.0f, 4.00f, -2.50f, -5.00f, 0.50f, 1.0f, 0.0f}, // {0.0f, -2.0f, 4.00f, 2.50f, -5.00f, -0.50f, 1.0f, 0.0f}, // // {0.0f, -1.0f, 0.00f, 5.25f, 0.00f, -5.25f, 0.0f, 1.0f} // }; // 0 = r00 - r06 + (r04 - r02) * 5.25 // 7 = r07 - r01 + (r03 - r05) * 5.25 // 1 = (r02 + r06 - r04 * 4.25) + (r01 - r03 * 4.25 + r05) // 2 = (r02 + r06 - r04 * 4.25) - (r01 - r03 * 4.25 + r05) // 3 = (r06 + r02 * 0.25 - r04 * 1.25) + (r01 * 0.5 - r03 * 2.5 + r05 * 2) // 4 = (r06 + r02 * 0.25 - r04 * 1.25) - (r01 * 0.5 - r03 * 2.5 + r05 * 2) // reuse r04 * 1.25 // reuse r03 * 2.5 // 5 = (r06 + (r02 - r04 * 1.25) * 4) + (r01 * 2 - r03 * 2.5 + r05 * 0.5) // 6 = (r06 + (r02 - r04 * 1.25) * 4) - (r01 * 2 - r03 * 2.5 + r05 * 0.5) #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q<inch; q++) { const Mat img0 = bottom_blob_bordered.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); float tmp[8][8][4]; // tile for (int i=0; i<h_tm/8; i++) { for (int j=0; j<w_tm/8; j++) { const float* r0 = img0.row(i * 6) + (j * 6) * 4; for (int m=0; m<8; m++) { float32x4_t _r00 = vld1q_f32(r0); float32x4_t _r01 = vld1q_f32(r0 + 4); float32x4_t _r02 = vld1q_f32(r0 + 8); float32x4_t _r03 = vld1q_f32(r0 + 12); float32x4_t _r04 = vld1q_f32(r0 + 16); float32x4_t _r05 = vld1q_f32(r0 + 20); float32x4_t _r06 = vld1q_f32(r0 + 24); float32x4_t _r07 = vld1q_f32(r0 + 28); float32x4_t _tmp0m = vmlaq_n_f32(vsubq_f32(_r00, _r06), vsubq_f32(_r04, _r02), 5.25f); float32x4_t _tmp7m = vmlaq_n_f32(vsubq_f32(_r07, _r01), vsubq_f32(_r03, _r05), 5.25f); vst1q_f32(tmp[0][m], _tmp0m); vst1q_f32(tmp[7][m], _tmp7m); // tmp[0][m] = r0[0] - r0[6] + (r0[4] - r0[2]) * 5.25; // tmp[7][m] = r0[7] - r0[1] + (r0[3] - r0[5]) * 5.25; float32x4_t _tmp12a = vmlsq_n_f32(vaddq_f32(_r02, _r06), _r04, 4.25f); float32x4_t _tmp12b = vmlsq_n_f32(vaddq_f32(_r01, _r05), _r03, 4.25f); // float tmp12a = (r0[2] + r0[6] - r0[4] * 4.25); // float tmp12b = (r0[1] + r0[5] - r0[3] * 4.25); float32x4_t _tmp1m = vaddq_f32(_tmp12a, _tmp12b); float32x4_t _tmp2m = vsubq_f32(_tmp12a, _tmp12b); vst1q_f32(tmp[1][m], _tmp1m); vst1q_f32(tmp[2][m], _tmp2m); // tmp[1][m] = tmp12a + tmp12b; // tmp[2][m] = tmp12a - tmp12b; float32x4_t _tmp34a = vmlsq_n_f32(vmlaq_n_f32(_r06, _r02, 0.25f), _r04, 1.25f); float32x4_t _tmp34b = vmlaq_n_f32(vmlsq_n_f32(vmulq_n_f32(_r01, 0.5f), _r03, 2.5f), _r05, 2.f); // float tmp34a = (r0[6] + r0[2] * 0.25 - r0[4] * 1.25); // float tmp34b = (r0[1] * 0.5 - r0[3] * 2.5 + r0[5] * 2); float32x4_t _tmp3m = vaddq_f32(_tmp34a, _tmp34b); float32x4_t _tmp4m = vsubq_f32(_tmp34a, _tmp34b); vst1q_f32(tmp[3][m], _tmp3m); vst1q_f32(tmp[4][m], _tmp4m); // tmp[3][m] = tmp34a + tmp34b; // tmp[4][m] = tmp34a - tmp34b; float32x4_t _tmp56a = vmlaq_n_f32(_r06, vmlsq_n_f32(_r02, _r04, 1.25f), 4.f); float32x4_t _tmp56b = vmlaq_n_f32(vmlsq_n_f32(vmulq_n_f32(_r01, 2.f), _r03, 2.5f), _r05, 0.5f); // float tmp56a = (r0[6] + (r0[2] - r0[4] * 1.25) * 4); // float tmp56b = (r0[1] * 2 - r0[3] * 2.5 + r0[5] * 0.5); float32x4_t _tmp5m = vaddq_f32(_tmp56a, _tmp56b); float32x4_t _tmp6m = vsubq_f32(_tmp56a, _tmp56b); vst1q_f32(tmp[5][m], _tmp5m); vst1q_f32(tmp[6][m], _tmp6m); // tmp[5][m] = tmp56a + tmp56b; // tmp[6][m] = tmp56a - tmp56b; r0 += w * 4; } float* r0_tm_0 = (float*)img0_tm + (i * w_tm/8 + j) * 4; float* r0_tm_1 = r0_tm_0 + tiles * 4; float* r0_tm_2 = r0_tm_0 + tiles * 8; float* r0_tm_3 = r0_tm_0 + tiles * 12; float* r0_tm_4 = r0_tm_0 + tiles * 16; float* r0_tm_5 = r0_tm_0 + tiles * 20; float* r0_tm_6 = r0_tm_0 + tiles * 24; float* r0_tm_7 = r0_tm_0 + tiles * 28; for (int m=0; m<8; m++) { float32x4_t _tmp00 = vld1q_f32(tmp[m][0]); float32x4_t _tmp01 = vld1q_f32(tmp[m][1]); float32x4_t _tmp02 = vld1q_f32(tmp[m][2]); float32x4_t _tmp03 = vld1q_f32(tmp[m][3]); float32x4_t _tmp04 = vld1q_f32(tmp[m][4]); float32x4_t _tmp05 = vld1q_f32(tmp[m][5]); float32x4_t _tmp06 = vld1q_f32(tmp[m][6]); float32x4_t _tmp07 = vld1q_f32(tmp[m][7]); float32x4_t _r0tm0 = vmlaq_n_f32(vsubq_f32(_tmp00, _tmp06), vsubq_f32(_tmp04, _tmp02), 5.25f); float32x4_t _r0tm7 = vmlaq_n_f32(vsubq_f32(_tmp07, _tmp01), vsubq_f32(_tmp03, _tmp05), 5.25f); // r0_tm[0] = tmp0[0] - tmp0[6] + (tmp0[4] - tmp0[2]) * 5.25; // r0_tm[7] = tmp0[7] - tmp0[1] + (tmp0[3] - tmp0[5]) * 5.25; float32x4_t _tmp12a = vmlsq_n_f32(vaddq_f32(_tmp02, _tmp06), _tmp04, 4.25f); float32x4_t _tmp12b = vmlsq_n_f32(vaddq_f32(_tmp01, _tmp05), _tmp03, 4.25f); // float tmp12a = (tmp0[2] + tmp0[6] - tmp0[4] * 4.25); // float tmp12b = (tmp0[1] + tmp0[5] - tmp0[3] * 4.25); float32x4_t _r0tm1 = vaddq_f32(_tmp12a, _tmp12b); float32x4_t _r0tm2 = vsubq_f32(_tmp12a, _tmp12b); // r0_tm[1] = tmp12a + tmp12b; // r0_tm[2] = tmp12a - tmp12b; float32x4_t _tmp34a = vmlsq_n_f32(vmlaq_n_f32(_tmp06, _tmp02, 0.25f), _tmp04, 1.25f); float32x4_t _tmp34b = vmlaq_n_f32(vmlsq_n_f32(vmulq_n_f32(_tmp01, 0.5f), _tmp03, 2.5f), _tmp05, 2.f); // float tmp34a = (tmp0[6] + tmp0[2] * 0.25 - tmp0[4] * 1.25); // float tmp34b = (tmp0[1] * 0.5 - tmp0[3] * 2.5 + tmp0[5] * 2); float32x4_t _r0tm3 = vaddq_f32(_tmp34a, _tmp34b); float32x4_t _r0tm4 = vsubq_f32(_tmp34a, _tmp34b); // r0_tm[3] = tmp34a + tmp34b; // r0_tm[4] = tmp34a - tmp34b; float32x4_t _tmp56a = vmlaq_n_f32(_tmp06, vmlsq_n_f32(_tmp02, _tmp04, 1.25f), 4.f); float32x4_t _tmp56b = vmlaq_n_f32(vmlsq_n_f32(vmulq_n_f32(_tmp01, 2.f), _tmp03, 2.5f), _tmp05, 0.5f); // float tmp56a = (tmp0[6] + (tmp0[2] - tmp0[4] * 1.25) * 4); // float tmp56b = (tmp0[1] * 2 - tmp0[3] * 2.5 + tmp0[5] * 0.5); float32x4_t _r0tm5 = vaddq_f32(_tmp56a, _tmp56b); float32x4_t _r0tm6 = vsubq_f32(_tmp56a, _tmp56b); // r0_tm[5] = tmp56a + tmp56b; // r0_tm[6] = tmp56a - tmp56b; vst1q_f32(r0_tm_0, _r0tm0); vst1q_f32(r0_tm_1, _r0tm1); vst1q_f32(r0_tm_2, _r0tm2); vst1q_f32(r0_tm_3, _r0tm3); vst1q_f32(r0_tm_4, _r0tm4); vst1q_f32(r0_tm_5, _r0tm5); vst1q_f32(r0_tm_6, _r0tm6); vst1q_f32(r0_tm_7, _r0tm7); r0_tm_0 += tiles * 32; r0_tm_1 += tiles * 32; r0_tm_2 += tiles * 32; r0_tm_3 += tiles * 32; r0_tm_4 += tiles * 32; r0_tm_5 += tiles * 32; r0_tm_6 += tiles * 32; r0_tm_7 += tiles * 32; } } } } } bottom_blob_bordered = Mat(); // END transform input // BEGIN dot Mat top_blob_tm; { int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = h_tm/8 * w_tm/8; // permute // bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator); #if __aarch64__ Mat bottom_blob_tm2(12 * inch, tiles/12 + (tiles%12)/8 + (tiles%8)/4 + (tiles%4)/2 + tiles%2, 64, elemsize, elempack, opt.workspace_allocator); #else Mat bottom_blob_tm2(8 * inch, tiles/8 + (tiles%8)/4 + (tiles%4)/2 + tiles%2, 64, elemsize, elempack, opt.workspace_allocator); #endif #pragma omp parallel for num_threads(opt.num_threads) for (int r=0; r<64; r++) { Mat tm2 = bottom_blob_tm2.channel(r); // tile int i=0; #if __aarch64__ for (; i+11<tiles; i+=12) { float* tm2p = tm2.row(i/12); const float* r0 = bottom_blob_tm; r0 += (r*tiles + i) * 4; for (int q=0; q<inch; q++) { asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld4 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0], #64 \n" "prfm pldl1keep, [%0, #512] \n" "ld4 {v4.4s, v5.4s, v6.4s, v7.4s}, [%0], #64 \n" "prfm pldl1keep, [%0, #512] \n" "ld4 {v8.4s, v9.4s, v10.4s, v11.4s}, [%0] \n" "st1 {v0.4s}, [%1], #16 \n" "st1 {v4.4s}, [%1], #16 \n" "st1 {v8.4s}, [%1], #16 \n" "sub %0, %0, #128 \n" "st1 {v1.4s}, [%1], #16 \n" "st1 {v5.4s}, [%1], #16 \n" "st1 {v9.4s}, [%1], #16 \n" "st1 {v2.4s}, [%1], #16 \n" "st1 {v6.4s}, [%1], #16 \n" "st1 {v10.4s}, [%1], #16 \n" "st1 {v3.4s}, [%1], #16 \n" "st1 {v7.4s}, [%1], #16 \n" "st1 {v11.4s}, [%1], #16 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11" ); r0 += bottom_blob_tm.cstep * 4; } } #endif for (; i+7<tiles; i+=8) { #if __aarch64__ float* tm2p = tm2.row(i/12 + (i%12)/8); #else float* tm2p = tm2.row(i/8); #endif const float* r0 = bottom_blob_tm; r0 += (r*tiles + i) * 4; for (int q=0; q<inch; q++) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0], #64 \n" "prfm pldl1keep, [%0, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%0] \n" "st1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%1], #64 \n" "sub %0, %0, #64 \n" "st1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%1], #64 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7" ); #else asm volatile( "pld [%0, #512] \n" "vldm %0!, {d0-d7} \n" "pld [%0, #512] \n" "vldm %0, {d16-d23} \n" // transpose 8x4 "vtrn.32 q0, q1 \n" "vtrn.32 q2, q3 \n" "vtrn.32 q8, q9 \n" "vtrn.32 q10, q11 \n" "vswp d1, d4 \n" "vswp d3, d6 \n" "vswp d17, d20 \n" "vswp d19, d22 \n" "vswp q1, q8 \n" "vswp q3, q10 \n" "vst1.f32 {d0-d3}, [%1 :128]! \n" "vst1.f32 {d16-d19}, [%1 :128]! \n" "sub %0, %0, #64 \n" "vst1.f32 {d4-d7}, [%1 :128]! \n" "vst1.f32 {d20-d23}, [%1 :128]! \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "q0", "q1", "q2", "q3", "q8", "q9", "q10", "q11" ); #endif r0 += bottom_blob_tm.cstep * 4; } } for (; i+3<tiles; i+=4) { #if __aarch64__ float* tm2p = tm2.row(i/12 + (i%12)/8 + (i%8)/4); #else float* tm2p = tm2.row(i/8 + (i%8)/4); #endif const float* r0 = bottom_blob_tm; r0 += (r*tiles + i) * 4; for (int q=0; q<inch; q++) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0] \n" "st1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%1], #64 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0", "v1", "v2", "v3" ); #else asm volatile( "pld [%0, #512] \n" "vldm %0, {d0-d7} \n" "vstm %1!, {d0-d7} \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "q0", "q1", "q2", "q3" ); #endif // __aarch64__ r0 += bottom_blob_tm.cstep * 4; } } for (; i+1<tiles; i+=2) { #if __aarch64__ float* tm2p = tm2.row(i/12 + (i%12)/8 + (i%8)/4 + (i%4)/2); #else float* tm2p = tm2.row(i/8 + (i%8)/4 + (i%4)/2); #endif const float* r0 = bottom_blob_tm; r0 += (r*tiles + i) * 4; for (int q=0; q<inch; q++) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #256] \n" "ld1 {v0.4s, v1.4s}, [%0] \n" "st1 {v0.4s, v1.4s}, [%1], #32 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0", "v1" ); #else asm volatile( "pld [%0, #256] \n" "vld1.f32 {d0-d3}, [%0 :128] \n" "vst1.f32 {d0-d3}, [%1 :128]! \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "q0", "q1" ); #endif // __aarch64__ r0 += bottom_blob_tm.cstep * 4; } } for (; i<tiles; i++) { #if __aarch64__ float* tm2p = tm2.row(i/12 + (i%12)/8 + (i%8)/4 + (i%4)/2 + i%2); #else float* tm2p = tm2.row(i/8 + (i%8)/4 + (i%4)/2 + i%2); #endif const float* r0 = bottom_blob_tm; r0 += (r*tiles + i) * 4; for (int q=0; q<inch; q++) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #128] \n" "ld1 {v0.4s}, [%0] \n" "st1 {v0.4s}, [%1], #16 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0" ); #else asm volatile( "pld [%0, #128] \n" "vld1.f32 {d0-d1}, [%0 :128] \n" "vst1.f32 {d0-d1}, [%1 :128]! \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "q0" ); #endif // __aarch64__ r0 += bottom_blob_tm.cstep * 4; } } } bottom_blob_tm = Mat(); // permute end top_blob_tm.create(tiles, 64, outch, elemsize, elempack); int nn_outch = 0; int remain_outch_start = 0; #if __ARM_NEON && __aarch64__ nn_outch = outch >> 1; remain_outch_start = nn_outch << 1; #pragma omp parallel for num_threads(opt.num_threads) for (int pp=0; pp<nn_outch; pp++) { int p = pp * 2; float* output0_tm = top_blob_tm.channel(p); float* output1_tm = top_blob_tm.channel(p+1); const Mat kernel0_tm = kernel_tm.channel(p); const Mat kernel1_tm = kernel_tm.channel(p+1); for (int r=0; r<64; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); int i=0; for (; i+11<tiles; i+=12) { const float* r0 = bb2.row(i/12); const float* k0 = kernel0_tm.row(r); const float* k1 = kernel1_tm.row(r); int nn = inch;// inch always > 0 asm volatile( "eor v8.16b, v8.16b, v8.16b \n" "eor v9.16b, v9.16b, v9.16b \n" "eor v10.16b, v10.16b, v10.16b \n" "eor v11.16b, v11.16b, v11.16b \n" "eor v12.16b, v12.16b, v12.16b \n" "eor v13.16b, v13.16b, v13.16b \n" "eor v14.16b, v14.16b, v14.16b \n" "eor v15.16b, v15.16b, v15.16b \n" "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "eor v20.16b, v20.16b, v20.16b \n" "eor v21.16b, v21.16b, v21.16b \n" "eor v22.16b, v22.16b, v22.16b \n" "eor v23.16b, v23.16b, v23.16b \n" "eor v24.16b, v24.16b, v24.16b \n" "eor v25.16b, v25.16b, v25.16b \n" "eor v26.16b, v26.16b, v26.16b \n" "eor v27.16b, v27.16b, v27.16b \n" "eor v28.16b, v28.16b, v28.16b \n" "eor v29.16b, v29.16b, v29.16b \n" "eor v30.16b, v30.16b, v30.16b \n" "eor v31.16b, v31.16b, v31.16b \n" "0: \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n" "prfm pldl1keep, [%4, #128] \n" "ld1 {v4.4s, v5.4s}, [%4], #32 \n"// w0123_0 "prfm pldl1keep, [%5, #128] \n" "ld1 {v6.4s, v7.4s}, [%5], #32 \n"// w0123_1 "fmla v8.4s, v4.4s, v0.s[0] \n" "fmla v9.4s, v4.4s, v0.s[1] \n" "fmla v10.4s, v4.4s, v0.s[2] \n" "fmla v11.4s, v4.4s, v0.s[3] \n" "fmla v12.4s, v4.4s, v1.s[0] \n" "fmla v13.4s, v4.4s, v1.s[1] \n" "fmla v14.4s, v4.4s, v1.s[2] \n" "fmla v15.4s, v4.4s, v1.s[3] \n" "fmla v16.4s, v4.4s, v2.s[0] \n" "fmla v17.4s, v4.4s, v2.s[1] \n" "fmla v18.4s, v4.4s, v2.s[2] \n" "fmla v19.4s, v4.4s, v2.s[3] \n" "fmla v20.4s, v6.4s, v0.s[0] \n" "fmla v21.4s, v6.4s, v0.s[1] \n" "fmla v22.4s, v6.4s, v0.s[2] \n" "fmla v23.4s, v6.4s, v0.s[3] \n" "fmla v24.4s, v6.4s, v1.s[0] \n" "fmla v25.4s, v6.4s, v1.s[1] \n" "fmla v26.4s, v6.4s, v1.s[2] \n" "fmla v27.4s, v6.4s, v1.s[3] \n" "fmla v28.4s, v6.4s, v2.s[0] \n" "fmla v29.4s, v6.4s, v2.s[1] \n" "fmla v30.4s, v6.4s, v2.s[2] \n" "fmla v31.4s, v6.4s, v2.s[3] \n" "fmla v8.4s, v5.4s, v3.s[0] \n" "fmla v9.4s, v5.4s, v3.s[1] \n" "fmla v10.4s, v5.4s, v3.s[2] \n" "fmla v11.4s, v5.4s, v3.s[3] \n" "fmla v20.4s, v7.4s, v3.s[0] \n" "fmla v21.4s, v7.4s, v3.s[1] \n" "fmla v22.4s, v7.4s, v3.s[2] \n" "fmla v23.4s, v7.4s, v3.s[3] \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n" "fmla v12.4s, v5.4s, v0.s[0] \n" "fmla v13.4s, v5.4s, v0.s[1] \n" "fmla v14.4s, v5.4s, v0.s[2] \n" "fmla v15.4s, v5.4s, v0.s[3] \n" "fmla v16.4s, v5.4s, v1.s[0] \n" "fmla v17.4s, v5.4s, v1.s[1] \n" "fmla v18.4s, v5.4s, v1.s[2] \n" "fmla v19.4s, v5.4s, v1.s[3] \n" "fmla v24.4s, v7.4s, v0.s[0] \n" "fmla v25.4s, v7.4s, v0.s[1] \n" "fmla v26.4s, v7.4s, v0.s[2] \n" "fmla v27.4s, v7.4s, v0.s[3] \n" "fmla v28.4s, v7.4s, v1.s[0] \n" "fmla v29.4s, v7.4s, v1.s[1] \n" "fmla v30.4s, v7.4s, v1.s[2] \n" "fmla v31.4s, v7.4s, v1.s[3] \n" "prfm pldl1keep, [%4, #128] \n" "ld1 {v4.4s, v5.4s}, [%4], #32 \n"// w0123_0 "prfm pldl1keep, [%5, #128] \n" "ld1 {v6.4s, v7.4s}, [%5], #32 \n"// w0123_1 "fmla v8.4s, v4.4s, v2.s[0] \n" "fmla v9.4s, v4.4s, v2.s[1] \n" "fmla v10.4s, v4.4s, v2.s[2] \n" "fmla v11.4s, v4.4s, v2.s[3] \n" "fmla v12.4s, v4.4s, v3.s[0] \n" "fmla v13.4s, v4.4s, v3.s[1] \n" "fmla v14.4s, v4.4s, v3.s[2] \n" "fmla v15.4s, v4.4s, v3.s[3] \n" "fmla v20.4s, v6.4s, v2.s[0] \n" "fmla v21.4s, v6.4s, v2.s[1] \n" "fmla v22.4s, v6.4s, v2.s[2] \n" "fmla v23.4s, v6.4s, v2.s[3] \n" "fmla v24.4s, v6.4s, v3.s[0] \n" "fmla v25.4s, v6.4s, v3.s[1] \n" "fmla v26.4s, v6.4s, v3.s[2] \n" "fmla v27.4s, v6.4s, v3.s[3] \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n" "fmla v16.4s, v4.4s, v0.s[0] \n" "fmla v17.4s, v4.4s, v0.s[1] \n" "fmla v18.4s, v4.4s, v0.s[2] \n" "fmla v19.4s, v4.4s, v0.s[3] \n" "fmla v28.4s, v6.4s, v0.s[0] \n" "fmla v29.4s, v6.4s, v0.s[1] \n" "fmla v30.4s, v6.4s, v0.s[2] \n" "fmla v31.4s, v6.4s, v0.s[3] \n" "fmla v8.4s, v5.4s, v1.s[0] \n" "fmla v9.4s, v5.4s, v1.s[1] \n" "fmla v10.4s, v5.4s, v1.s[2] \n" "fmla v11.4s, v5.4s, v1.s[3] \n" "fmla v12.4s, v5.4s, v2.s[0] \n" "fmla v13.4s, v5.4s, v2.s[1] \n" "fmla v14.4s, v5.4s, v2.s[2] \n" "fmla v15.4s, v5.4s, v2.s[3] \n" "fmla v16.4s, v5.4s, v3.s[0] \n" "fmla v17.4s, v5.4s, v3.s[1] \n" "fmla v18.4s, v5.4s, v3.s[2] \n" "fmla v19.4s, v5.4s, v3.s[3] \n" "subs %w0, %w0, #1 \n" "fmla v20.4s, v7.4s, v1.s[0] \n" "fmla v21.4s, v7.4s, v1.s[1] \n" "fmla v22.4s, v7.4s, v1.s[2] \n" "fmla v23.4s, v7.4s, v1.s[3] \n" "fmla v24.4s, v7.4s, v2.s[0] \n" "fmla v25.4s, v7.4s, v2.s[1] \n" "fmla v26.4s, v7.4s, v2.s[2] \n" "fmla v27.4s, v7.4s, v2.s[3] \n" "fmla v28.4s, v7.4s, v3.s[0] \n" "fmla v29.4s, v7.4s, v3.s[1] \n" "fmla v30.4s, v7.4s, v3.s[2] \n" "fmla v31.4s, v7.4s, v3.s[3] \n" "bne 0b \n" "st1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%1], #64 \n" "st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%2], #64 \n" "st1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%1], #64 \n" "st1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%2], #64 \n" "st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n" "st1 {v28.4s, v29.4s, v30.4s, v31.4s}, [%2], #64 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(r0), // %3 "=r"(k0), // %4 "=r"(k1) // %5 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(r0), "4"(k0), "5"(k1) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31" ); } for (; i+7<tiles; i+=8) { const float* r0 = bb2.row(i/12 + (i%12)/8); const float* k0 = kernel0_tm.row(r); const float* k1 = kernel1_tm.row(r); int nn = inch;// inch always > 0 asm volatile( "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "eor v20.16b, v20.16b, v20.16b \n" "eor v21.16b, v21.16b, v21.16b \n" "eor v22.16b, v22.16b, v22.16b \n" "eor v23.16b, v23.16b, v23.16b \n" "eor v24.16b, v24.16b, v24.16b \n" "eor v25.16b, v25.16b, v25.16b \n" "eor v26.16b, v26.16b, v26.16b \n" "eor v27.16b, v27.16b, v27.16b \n" "eor v28.16b, v28.16b, v28.16b \n" "eor v29.16b, v29.16b, v29.16b \n" "eor v30.16b, v30.16b, v30.16b \n" "eor v31.16b, v31.16b, v31.16b \n" "0: \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n"// r0 r1 r2 r3 "prfm pldl1keep, [%4, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%4], #64 \n"// w0123_0 "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v8.4s, v1.s[0] \n" "fmla v18.4s, v8.4s, v2.s[0] \n" "fmla v19.4s, v8.4s, v3.s[0] \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%3], #64 \n"// r4 r5 r6 r7 "fmla v20.4s, v8.4s, v4.s[0] \n" "fmla v21.4s, v8.4s, v5.s[0] \n" "fmla v22.4s, v8.4s, v6.s[0] \n" "fmla v23.4s, v8.4s, v7.s[0] \n" "fmla v16.4s, v9.4s, v0.s[1] \n" "fmla v17.4s, v9.4s, v1.s[1] \n" "fmla v18.4s, v9.4s, v2.s[1] \n" "fmla v19.4s, v9.4s, v3.s[1] \n" "fmla v20.4s, v9.4s, v4.s[1] \n" "fmla v21.4s, v9.4s, v5.s[1] \n" "fmla v22.4s, v9.4s, v6.s[1] \n" "fmla v23.4s, v9.4s, v7.s[1] \n" "fmla v16.4s, v10.4s, v0.s[2] \n" "fmla v17.4s, v10.4s, v1.s[2] \n" "fmla v18.4s, v10.4s, v2.s[2] \n" "fmla v19.4s, v10.4s, v3.s[2] \n" "fmla v20.4s, v10.4s, v4.s[2] \n" "fmla v21.4s, v10.4s, v5.s[2] \n" "fmla v22.4s, v10.4s, v6.s[2] \n" "fmla v23.4s, v10.4s, v7.s[2] \n" "prfm pldl1keep, [%5, #512] \n" "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%5], #64 \n"// w0123_1 "fmla v16.4s, v11.4s, v0.s[3] \n" "fmla v17.4s, v11.4s, v1.s[3] \n" "fmla v18.4s, v11.4s, v2.s[3] \n" "fmla v19.4s, v11.4s, v3.s[3] \n" "fmla v20.4s, v11.4s, v4.s[3] \n" "fmla v21.4s, v11.4s, v5.s[3] \n" "fmla v22.4s, v11.4s, v6.s[3] \n" "fmla v23.4s, v11.4s, v7.s[3] \n" "fmla v24.4s, v12.4s, v0.s[0] \n" "fmla v25.4s, v12.4s, v1.s[0] \n" "fmla v26.4s, v12.4s, v2.s[0] \n" "fmla v27.4s, v12.4s, v3.s[0] \n" "fmla v28.4s, v12.4s, v4.s[0] \n" "fmla v29.4s, v12.4s, v5.s[0] \n" "fmla v30.4s, v12.4s, v6.s[0] \n" "fmla v31.4s, v12.4s, v7.s[0] \n" "fmla v24.4s, v13.4s, v0.s[1] \n" "fmla v25.4s, v13.4s, v1.s[1] \n" "fmla v26.4s, v13.4s, v2.s[1] \n" "fmla v27.4s, v13.4s, v3.s[1] \n" "fmla v28.4s, v13.4s, v4.s[1] \n" "fmla v29.4s, v13.4s, v5.s[1] \n" "fmla v30.4s, v13.4s, v6.s[1] \n" "fmla v31.4s, v13.4s, v7.s[1] \n" "fmla v24.4s, v14.4s, v0.s[2] \n" "fmla v25.4s, v14.4s, v1.s[2] \n" "fmla v26.4s, v14.4s, v2.s[2] \n" "fmla v27.4s, v14.4s, v3.s[2] \n" "fmla v28.4s, v14.4s, v4.s[2] \n" "fmla v29.4s, v14.4s, v5.s[2] \n" "fmla v30.4s, v14.4s, v6.s[2] \n" "fmla v31.4s, v14.4s, v7.s[2] \n" "subs %w0, %w0, #1 \n" "fmla v24.4s, v15.4s, v0.s[3] \n" "fmla v25.4s, v15.4s, v1.s[3] \n" "fmla v26.4s, v15.4s, v2.s[3] \n" "fmla v27.4s, v15.4s, v3.s[3] \n" "fmla v28.4s, v15.4s, v4.s[3] \n" "fmla v29.4s, v15.4s, v5.s[3] \n" "fmla v30.4s, v15.4s, v6.s[3] \n" "fmla v31.4s, v15.4s, v7.s[3] \n" "bne 0b \n" "st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n" "st1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%2], #64 \n" "st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%1], #64 \n" "st1 {v28.4s, v29.4s, v30.4s, v31.4s}, [%2], #64 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(r0), // %3 "=r"(k0), // %4 "=r"(k1) // %5 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(r0), "4"(k0), "5"(k1) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31" ); } for (; i+3<tiles; i+=4) { const float* r0 = bb2.row(i/12 + (i%12)/8 + (i%8)/4); const float* k0 = kernel0_tm.row(r); const float* k1 = kernel1_tm.row(r); int nn = inch;// inch always > 0 asm volatile( "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "eor v20.16b, v20.16b, v20.16b \n" "eor v21.16b, v21.16b, v21.16b \n" "eor v22.16b, v22.16b, v22.16b \n" "eor v23.16b, v23.16b, v23.16b \n" "0: \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n"// r0 r1 r2 r3 "prfm pldl1keep, [%4, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%4], #64 \n"// w0123_0 "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v8.4s, v1.s[0] \n" "fmla v18.4s, v8.4s, v2.s[0] \n" "fmla v19.4s, v8.4s, v3.s[0] \n" "prfm pldl1keep, [%5, #512] \n" "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%5], #64 \n"// w0123_1 "fmla v20.4s, v12.4s, v0.s[0] \n" "fmla v21.4s, v12.4s, v1.s[0] \n" "fmla v22.4s, v12.4s, v2.s[0] \n" "fmla v23.4s, v12.4s, v3.s[0] \n" "fmla v16.4s, v9.4s, v0.s[1] \n" "fmla v17.4s, v9.4s, v1.s[1] \n" "fmla v18.4s, v9.4s, v2.s[1] \n" "fmla v19.4s, v9.4s, v3.s[1] \n" "fmla v20.4s, v13.4s, v0.s[1] \n" "fmla v21.4s, v13.4s, v1.s[1] \n" "fmla v22.4s, v13.4s, v2.s[1] \n" "fmla v23.4s, v13.4s, v3.s[1] \n" "fmla v16.4s, v10.4s, v0.s[2] \n" "fmla v17.4s, v10.4s, v1.s[2] \n" "fmla v18.4s, v10.4s, v2.s[2] \n" "fmla v19.4s, v10.4s, v3.s[2] \n" "fmla v20.4s, v14.4s, v0.s[2] \n" "fmla v21.4s, v14.4s, v1.s[2] \n" "fmla v22.4s, v14.4s, v2.s[2] \n" "fmla v23.4s, v14.4s, v3.s[2] \n" "subs %w0, %w0, #1 \n" "fmla v16.4s, v11.4s, v0.s[3] \n" "fmla v17.4s, v11.4s, v1.s[3] \n" "fmla v18.4s, v11.4s, v2.s[3] \n" "fmla v19.4s, v11.4s, v3.s[3] \n" "fmla v20.4s, v15.4s, v0.s[3] \n" "fmla v21.4s, v15.4s, v1.s[3] \n" "fmla v22.4s, v15.4s, v2.s[3] \n" "fmla v23.4s, v15.4s, v3.s[3] \n" "bne 0b \n" "st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n" "st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%2], #64 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(r0), // %3 "=r"(k0), // %4 "=r"(k1) // %5 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(r0), "4"(k0), "5"(k1) : "cc", "memory", "v0", "v1", "v2", "v3", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23" ); } for (; i+1<tiles; i+=2) { const float* r0 = bb2.row(i/12 + (i%12)/8 + (i%8)/4 + (i%4)/2); const float* k0 = kernel0_tm.row(r); const float* k1 = kernel1_tm.row(r); int nn = inch;// inch always > 0 asm volatile( "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "0: \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v0.4s, v1.4s}, [%3], #32 \n"// r0 r1 "prfm pldl1keep, [%4, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%4], #64 \n"// w0123_0 "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v8.4s, v1.s[0] \n" "prfm pldl1keep, [%5, #512] \n" "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%5], #64 \n"// w0123_1 "fmla v18.4s, v12.4s, v0.s[0] \n" "fmla v19.4s, v12.4s, v1.s[0] \n" "fmla v16.4s, v9.4s, v0.s[1] \n" "fmla v17.4s, v9.4s, v1.s[1] \n" "fmla v18.4s, v13.4s, v0.s[1] \n" "fmla v19.4s, v13.4s, v1.s[1] \n" "fmla v16.4s, v10.4s, v0.s[2] \n" "fmla v17.4s, v10.4s, v1.s[2] \n" "fmla v18.4s, v14.4s, v0.s[2] \n" "fmla v19.4s, v14.4s, v1.s[2] \n" "subs %w0, %w0, #1 \n" "fmla v16.4s, v11.4s, v0.s[3] \n" "fmla v17.4s, v11.4s, v1.s[3] \n" "fmla v18.4s, v15.4s, v0.s[3] \n" "fmla v19.4s, v15.4s, v1.s[3] \n" "bne 0b \n" "st1 {v16.4s, v17.4s}, [%1], #32 \n" "st1 {v18.4s, v19.4s}, [%2], #32 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(r0), // %3 "=r"(k0), // %4 "=r"(k1) // %5 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(r0), "4"(k0), "5"(k1) : "cc", "memory", "v0", "v1", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19" ); } for (; i<tiles; i++) { const float* r0 = bb2.row(i/12 + (i%12)/8 + (i%8)/4 + (i%4)/2 + i%2); const float* k0 = kernel0_tm.row(r); const float* k1 = kernel1_tm.row(r); int nn = inch;// inch always > 0 asm volatile( "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "0: \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v0.4s}, [%3], #16 \n"// r0 "prfm pldl1keep, [%4, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%4], #64 \n"// w0123_0 "fmla v16.4s, v8.4s, v0.s[0] \n" "prfm pldl1keep, [%5, #512] \n" "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%5], #64 \n"// w0123_1 "fmla v17.4s, v12.4s, v0.s[0] \n" "fmla v16.4s, v9.4s, v0.s[1] \n" "fmla v17.4s, v13.4s, v0.s[1] \n" "fmla v16.4s, v10.4s, v0.s[2] \n" "fmla v17.4s, v14.4s, v0.s[2] \n" "subs %w0, %w0, #1 \n" "fmla v16.4s, v11.4s, v0.s[3] \n" "fmla v17.4s, v15.4s, v0.s[3] \n" "bne 0b \n" "st1 {v16.4s}, [%1], #16 \n" "st1 {v17.4s}, [%2], #16 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(r0), // %3 "=r"(k0), // %4 "=r"(k1) // %5 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(r0), "4"(k0), "5"(k1) : "cc", "memory", "v0", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17" ); } } } #endif // __ARM_NEON && __aarch64__ #pragma omp parallel for num_threads(opt.num_threads) for (int p=remain_outch_start; p<outch; p++) { float* output0_tm = top_blob_tm.channel(p); const Mat kernel0_tm = kernel_tm.channel(p); for (int r=0; r<64; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); int i=0; #if __aarch64__ for (; i+11<tiles; i+=12) { const float* r0 = bb2.row(i/12); const float* k0 = kernel0_tm.row(r); int nn = inch;// inch always > 0 asm volatile( "eor v8.16b, v8.16b, v8.16b \n" "eor v9.16b, v9.16b, v9.16b \n" "eor v10.16b, v10.16b, v10.16b \n" "eor v11.16b, v11.16b, v11.16b \n" "eor v12.16b, v12.16b, v12.16b \n" "eor v13.16b, v13.16b, v13.16b \n" "eor v14.16b, v14.16b, v14.16b \n" "eor v15.16b, v15.16b, v15.16b \n" "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "0: \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%2], #64 \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%3], #64 \n"// w0123_0 "fmla v8.4s, v4.4s, v0.s[0] \n" "fmla v9.4s, v4.4s, v0.s[1] \n" "fmla v10.4s, v4.4s, v0.s[2] \n" "fmla v11.4s, v4.4s, v0.s[3] \n" "fmla v12.4s, v4.4s, v1.s[0] \n" "fmla v13.4s, v4.4s, v1.s[1] \n" "fmla v14.4s, v4.4s, v1.s[2] \n" "fmla v15.4s, v4.4s, v1.s[3] \n" "fmla v16.4s, v4.4s, v2.s[0] \n" "fmla v17.4s, v4.4s, v2.s[1] \n" "fmla v18.4s, v4.4s, v2.s[2] \n" "fmla v19.4s, v4.4s, v2.s[3] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%2], #64 \n" "fmla v8.4s, v5.4s, v3.s[0] \n" "fmla v9.4s, v5.4s, v3.s[1] \n" "fmla v10.4s, v5.4s, v3.s[2] \n" "fmla v11.4s, v5.4s, v3.s[3] \n" "fmla v12.4s, v5.4s, v20.s[0] \n" "fmla v13.4s, v5.4s, v20.s[1] \n" "fmla v14.4s, v5.4s, v20.s[2] \n" "fmla v15.4s, v5.4s, v20.s[3] \n" "fmla v16.4s, v5.4s, v21.s[0] \n" "fmla v17.4s, v5.4s, v21.s[1] \n" "fmla v18.4s, v5.4s, v21.s[2] \n" "fmla v19.4s, v5.4s, v21.s[3] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%2], #64 \n" "fmla v8.4s, v6.4s, v22.s[0] \n" "fmla v9.4s, v6.4s, v22.s[1] \n" "fmla v10.4s, v6.4s, v22.s[2] \n" "fmla v11.4s, v6.4s, v22.s[3] \n" "fmla v12.4s, v6.4s, v23.s[0] \n" "fmla v13.4s, v6.4s, v23.s[1] \n" "fmla v14.4s, v6.4s, v23.s[2] \n" "fmla v15.4s, v6.4s, v23.s[3] \n" "fmla v16.4s, v6.4s, v24.s[0] \n" "fmla v17.4s, v6.4s, v24.s[1] \n" "fmla v18.4s, v6.4s, v24.s[2] \n" "fmla v19.4s, v6.4s, v24.s[3] \n" "subs %w0, %w0, #1 \n" "fmla v8.4s, v7.4s, v25.s[0] \n" "fmla v9.4s, v7.4s, v25.s[1] \n" "fmla v10.4s, v7.4s, v25.s[2] \n" "fmla v11.4s, v7.4s, v25.s[3] \n" "fmla v12.4s, v7.4s, v26.s[0] \n" "fmla v13.4s, v7.4s, v26.s[1] \n" "fmla v14.4s, v7.4s, v26.s[2] \n" "fmla v15.4s, v7.4s, v26.s[3] \n" "fmla v16.4s, v7.4s, v27.s[0] \n" "fmla v17.4s, v7.4s, v27.s[1] \n" "fmla v18.4s, v7.4s, v27.s[2] \n" "fmla v19.4s, v7.4s, v27.s[3] \n" "bne 0b \n" "st1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%1], #64 \n" "st1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%1], #64 \n" "st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(k0) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(k0) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27" ); } #endif for (; i+7<tiles; i+=8) { #if __aarch64__ const float* r0 = bb2.row(i/12 + (i%12)/8); #else const float* r0 = bb2.row(i/8); #endif const float* k0 = kernel0_tm.row(r); int nn = inch;// inch always > 0 #if __aarch64__ asm volatile( "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "eor v20.16b, v20.16b, v20.16b \n" "eor v21.16b, v21.16b, v21.16b \n" "eor v22.16b, v22.16b, v22.16b \n" "eor v23.16b, v23.16b, v23.16b \n" "0: \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%2], #64 \n"// r0 r1 r2 r3 "prfm pldl1keep, [%3, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%3], #64 \n"// w0123 "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v8.4s, v1.s[0] \n" "fmla v18.4s, v8.4s, v2.s[0] \n" "fmla v19.4s, v8.4s, v3.s[0] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%2], #64 \n"// r4 r5 r6 r7 "fmla v20.4s, v8.4s, v4.s[0] \n" "fmla v21.4s, v8.4s, v5.s[0] \n" "fmla v22.4s, v8.4s, v6.s[0] \n" "fmla v23.4s, v8.4s, v7.s[0] \n" "fmla v16.4s, v9.4s, v0.s[1] \n" "fmla v17.4s, v9.4s, v1.s[1] \n" "fmla v18.4s, v9.4s, v2.s[1] \n" "fmla v19.4s, v9.4s, v3.s[1] \n" "fmla v20.4s, v9.4s, v4.s[1] \n" "fmla v21.4s, v9.4s, v5.s[1] \n" "fmla v22.4s, v9.4s, v6.s[1] \n" "fmla v23.4s, v9.4s, v7.s[1] \n" "fmla v16.4s, v10.4s, v0.s[2] \n" "fmla v17.4s, v10.4s, v1.s[2] \n" "fmla v18.4s, v10.4s, v2.s[2] \n" "fmla v19.4s, v10.4s, v3.s[2] \n" "fmla v20.4s, v10.4s, v4.s[2] \n" "fmla v21.4s, v10.4s, v5.s[2] \n" "fmla v22.4s, v10.4s, v6.s[2] \n" "fmla v23.4s, v10.4s, v7.s[2] \n" "subs %w0, %w0, #1 \n" "fmla v16.4s, v11.4s, v0.s[3] \n" "fmla v17.4s, v11.4s, v1.s[3] \n" "fmla v18.4s, v11.4s, v2.s[3] \n" "fmla v19.4s, v11.4s, v3.s[3] \n" "fmla v20.4s, v11.4s, v4.s[3] \n" "fmla v21.4s, v11.4s, v5.s[3] \n" "fmla v22.4s, v11.4s, v6.s[3] \n" "fmla v23.4s, v11.4s, v7.s[3] \n" "bne 0b \n" "st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n" "st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%1], #64 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(k0) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(k0) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23" ); #else asm volatile( "veor q8, q8 \n" "veor q9, q9 \n" "veor q10, q10 \n" "veor q11, q11 \n" "veor q12, q12 \n" "veor q13, q13 \n" "veor q14, q14 \n" "veor q15, q15 \n" "0: \n" "pld [%2, #512] \n" "vldm %2!, {d0-d7} \n" "pld [%3, #512] \n" "vldm %3!, {d8-d15} \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q9, q4, d0[1] \n" "vmla.f32 q10, q4, d1[0] \n" "vmla.f32 q11, q4, d1[1] \n" "vmla.f32 q12, q4, d2[0] \n" "vmla.f32 q13, q4, d2[1] \n" "vmla.f32 q14, q4, d3[0] \n" "vmla.f32 q15, q4, d3[1] \n" "vmla.f32 q8, q5, d4[0] \n" "vmla.f32 q9, q5, d4[1] \n" "vmla.f32 q10, q5, d5[0] \n" "vmla.f32 q11, q5, d5[1] \n" "vmla.f32 q12, q5, d6[0] \n" "vmla.f32 q13, q5, d6[1] \n" "vmla.f32 q14, q5, d7[0] \n" "vmla.f32 q15, q5, d7[1] \n" "pld [%2, #512] \n" "vldm %2!, {d0-d7} \n" "vmla.f32 q8, q6, d0[0] \n" "vmla.f32 q9, q6, d0[1] \n" "vmla.f32 q10, q6, d1[0] \n" "vmla.f32 q11, q6, d1[1] \n" "vmla.f32 q12, q6, d2[0] \n" "vmla.f32 q13, q6, d2[1] \n" "vmla.f32 q14, q6, d3[0] \n" "vmla.f32 q15, q6, d3[1] \n" "subs %0, %0, #1 \n" "vmla.f32 q8, q7, d4[0] \n" "vmla.f32 q9, q7, d4[1] \n" "vmla.f32 q10, q7, d5[0] \n" "vmla.f32 q11, q7, d5[1] \n" "vmla.f32 q12, q7, d6[0] \n" "vmla.f32 q13, q7, d6[1] \n" "vmla.f32 q14, q7, d7[0] \n" "vmla.f32 q15, q7, d7[1] \n" "bne 0b \n" "vstm %1!, {d16-d23} \n" "vstm %1!, {d24-d31} \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(k0) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(k0) : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); #endif } for (; i+3<tiles; i+=4) { #if __aarch64__ const float* r0 = bb2.row(i/12 + (i%12)/8 + (i%8)/4); #else const float* r0 = bb2.row(i/8 + (i%8)/4); #endif const float* k0 = kernel0_tm.row(r); int nn = inch;// inch always > 0 #if __aarch64__ asm volatile( "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "0: \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%2], #64 \n"// r0 r1 r2 r3 "prfm pldl1keep, [%3, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%3], #64 \n"// w0123 "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v8.4s, v1.s[0] \n" "fmla v18.4s, v8.4s, v2.s[0] \n" "fmla v19.4s, v8.4s, v3.s[0] \n" "fmla v16.4s, v9.4s, v0.s[1] \n" "fmla v17.4s, v9.4s, v1.s[1] \n" "fmla v18.4s, v9.4s, v2.s[1] \n" "fmla v19.4s, v9.4s, v3.s[1] \n" "fmla v16.4s, v10.4s, v0.s[2] \n" "fmla v17.4s, v10.4s, v1.s[2] \n" "fmla v18.4s, v10.4s, v2.s[2] \n" "fmla v19.4s, v10.4s, v3.s[2] \n" "subs %w0, %w0, #1 \n" "fmla v16.4s, v11.4s, v0.s[3] \n" "fmla v17.4s, v11.4s, v1.s[3] \n" "fmla v18.4s, v11.4s, v2.s[3] \n" "fmla v19.4s, v11.4s, v3.s[3] \n" "bne 0b \n" "st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(k0) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(k0) : "cc", "memory", "v0", "v1", "v2", "v3", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19" ); #else asm volatile( "veor q8, q8 \n" "veor q9, q9 \n" "veor q10, q10 \n" "veor q11, q11 \n" "0: \n" "pld [%2, #512] \n" "vldm %2!, {d0-d7} \n" "pld [%3, #512] \n" "vldm %3!, {d8-d15} \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q9, q4, d2[0] \n" "vmla.f32 q10, q4, d4[0] \n" "vmla.f32 q11, q4, d6[0] \n" "vmla.f32 q8, q5, d0[1] \n" "vmla.f32 q9, q5, d2[1] \n" "vmla.f32 q10, q5, d4[1] \n" "vmla.f32 q11, q5, d6[1] \n" "vmla.f32 q8, q6, d1[0] \n" "vmla.f32 q9, q6, d3[0] \n" "vmla.f32 q10, q6, d5[0] \n" "vmla.f32 q11, q6, d7[0] \n" "subs %0, %0, #1 \n" "vmla.f32 q8, q7, d1[1] \n" "vmla.f32 q9, q7, d3[1] \n" "vmla.f32 q10, q7, d5[1] \n" "vmla.f32 q11, q7, d7[1] \n" "bne 0b \n" "vstm %1!, {d16-d23} \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(k0) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(k0) : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11" ); #endif } for (; i+1<tiles; i+=2) { #if __aarch64__ const float* r0 = bb2.row(i/12 + (i%12)/8 + (i%8)/4 + (i%4)/2); #else const float* r0 = bb2.row(i/8 + (i%8)/4 + (i%4)/2); #endif const float* k0 = kernel0_tm.row(r); int nn = inch;// inch always > 0 #if __aarch64__ asm volatile( "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "0: \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v0.4s, v1.4s}, [%2], #32 \n"// r0 r1 "prfm pldl1keep, [%3, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%3], #64 \n"// w0123 "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v8.4s, v1.s[0] \n" "fmla v16.4s, v9.4s, v0.s[1] \n" "fmla v17.4s, v9.4s, v1.s[1] \n" "fmla v16.4s, v10.4s, v0.s[2] \n" "fmla v17.4s, v10.4s, v1.s[2] \n" "subs %w0, %w0, #1 \n" "fmla v16.4s, v11.4s, v0.s[3] \n" "fmla v17.4s, v11.4s, v1.s[3] \n" "bne 0b \n" "st1 {v16.4s, v17.4s}, [%1], #32 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(k0) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(k0) : "cc", "memory", "v0", "v1", "v8", "v9", "v10", "v11", "v16", "v17" ); #else asm volatile( "veor q8, q8 \n" "veor q9, q9 \n" "0: \n" "pld [%2, #256] \n" "vld1.f32 {d0-d3}, [%2 :128]! \n" "pld [%3, #512] \n" "vldm %3!, {d8-d15} \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q9, q4, d2[0] \n" "vmla.f32 q8, q5, d0[1] \n" "vmla.f32 q9, q5, d2[1] \n" "vmla.f32 q8, q6, d1[0] \n" "vmla.f32 q9, q6, d3[0] \n" "subs %0, %0, #1 \n" "vmla.f32 q8, q7, d1[1] \n" "vmla.f32 q9, q7, d3[1] \n" "bne 0b \n" "vst1.f32 {d16-d19}, [%1 :128]! \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(k0) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(k0) : "cc", "memory", "q0", "q1", "q4", "q5", "q6", "q7", "q8", "q9" ); #endif } for (; i<tiles; i++) { #if __aarch64__ const float* r0 = bb2.row(i/12 + (i%12)/8 + (i%8)/4 + (i%4)/2 + i%2); #else const float* r0 = bb2.row(i/8 + (i%8)/4 + (i%4)/2 + i%2); #endif const float* k0 = kernel0_tm.row(r); int nn = inch;// inch always > 0 #if __aarch64__ asm volatile( "eor v16.16b, v16.16b, v16.16b \n" "0: \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v0.4s}, [%2], #16 \n"// r0 "prfm pldl1keep, [%3, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%3], #64 \n"// w0123 "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v16.4s, v9.4s, v0.s[1] \n" "subs %w0, %w0, #1 \n" "fmla v16.4s, v10.4s, v0.s[2] \n" "fmla v16.4s, v11.4s, v0.s[3] \n" "bne 0b \n" "st1 {v16.4s}, [%1], #16 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(k0) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(k0) : "cc", "memory", "v0", "v8", "v9", "v10", "v11", "v16" ); #else asm volatile( "veor q8, q8 \n" "0: \n" "pld [%2, #128] \n" "vld1.f32 {d0-d1}, [%2 :128]! \n" "pld [%3, #512] \n" "vldm %3!, {d8-d15} \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q8, q5, d0[1] \n" "subs %0, %0, #1 \n" "vmla.f32 q8, q6, d1[0] \n" "vmla.f32 q8, q7, d1[1] \n" "bne 0b \n" "vst1.f32 {d16-d17}, [%1 :128]! \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(k0) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(k0) : "cc", "memory", "q0", "q4", "q5", "q6", "q7", "q8" ); #endif } } } } bottom_blob_tm = Mat(); // END dot // BEGIN transform output Mat top_blob_bordered; top_blob_bordered.create(outw, outh, outch, elemsize, elempack); { // const float otm[6][8] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 32.0f, 32.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 16.0f,-16.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 8.0f, 8.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 4.0f, -4.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 16.0f, 16.0f, 2.0f, 2.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 32.0f, -32.0f, 1.0f, -1.0f, 1.0f} // }; // 0 = r0 + (r1 + r2) + (r3 + r4) + (r5 + r6) * 32 // 1 = (r1 - r2) + (r3 - r4) * 2 + (r5 - r6) * 16 // 2 = (r1 + r2) + (r3 + r4) * 4 + (r5 + r6) * 8 // 3 = (r1 - r2) + (r3 - r4) * 8 + (r5 - r6) * 4 // 4 = (r1 + r2) + (r3 + r4) * 16+ (r5 + r6) * 2 // 5 = r7 + (r1 - r2) + (r3 - r4) * 32+ (r5 - r6) int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = w_tm/8 * h_tm/8; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p<outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob_bordered.channel(p); // const float bias0 = bias ? bias[p] : 0.f; float32x4_t _bias0 = bias ? vld1q_f32( (const float*)bias + p * 4) : vdupq_n_f32(0.f); float tmp[6][8][4]; // tile for (int i=0; i<outh/6; i++) { for (int j=0; j<outw/6; j++) { // top_blob_tm.create(tiles, 64, outch, elemsize, elempack); const float* output0_tm_0 = (const float*)out0_tm + (i * w_tm/8 + j) * 4; const float* output0_tm_1 = output0_tm_0 + tiles * 4; const float* output0_tm_2 = output0_tm_0 + tiles * 8; const float* output0_tm_3 = output0_tm_0 + tiles * 12; const float* output0_tm_4 = output0_tm_0 + tiles * 16; const float* output0_tm_5 = output0_tm_0 + tiles * 20; const float* output0_tm_6 = output0_tm_0 + tiles * 24; const float* output0_tm_7 = output0_tm_0 + tiles * 28; float* output0 = out0.row(i * 6) + (j * 6) * 4; // TODO neon optimize for (int m=0; m<8; m++) { float32x4_t _out0tm0 = vld1q_f32(output0_tm_0); float32x4_t _out0tm1 = vld1q_f32(output0_tm_1); float32x4_t _out0tm2 = vld1q_f32(output0_tm_2); float32x4_t _out0tm3 = vld1q_f32(output0_tm_3); float32x4_t _out0tm4 = vld1q_f32(output0_tm_4); float32x4_t _out0tm5 = vld1q_f32(output0_tm_5); float32x4_t _out0tm6 = vld1q_f32(output0_tm_6); float32x4_t _out0tm7 = vld1q_f32(output0_tm_7); float32x4_t _tmp024a = vaddq_f32(_out0tm1, _out0tm2); float32x4_t _tmp135a = vsubq_f32(_out0tm1, _out0tm2); // float tmp024a = output0_tm[1] + output0_tm[2]; // float tmp135a = output0_tm[1] - output0_tm[2]; float32x4_t _tmp024b = vaddq_f32(_out0tm3, _out0tm4); float32x4_t _tmp135b = vsubq_f32(_out0tm3, _out0tm4); // float tmp024b = output0_tm[3] + output0_tm[4]; // float tmp135b = output0_tm[3] - output0_tm[4]; float32x4_t _tmp024c = vaddq_f32(_out0tm5, _out0tm6); float32x4_t _tmp135c = vsubq_f32(_out0tm5, _out0tm6); // float tmp024c = output0_tm[5] + output0_tm[6]; // float tmp135c = output0_tm[5] - output0_tm[6]; float32x4_t _tmp0m = vaddq_f32(vaddq_f32(_out0tm0, _tmp024a), vmlaq_n_f32(_tmp024b, _tmp024c, 32.f)); float32x4_t _tmp2m = vmlaq_n_f32(vmlaq_n_f32(_tmp024a, _tmp024b, 4.f), _tmp024c, 8.f); float32x4_t _tmp4m = vmlaq_n_f32(vmlaq_n_f32(_tmp024a, _tmp024b, 16.f), _tmp024c, 2.f); vst1q_f32(tmp[0][m], _tmp0m); vst1q_f32(tmp[2][m], _tmp2m); vst1q_f32(tmp[4][m], _tmp4m); // tmp[0][m] = output0_tm[0] + tmp024a + tmp024b + tmp024c * 32; // tmp[2][m] = tmp024a + tmp024b * 4 + tmp024c * 8; // tmp[4][m] = tmp024a + tmp024b * 16 + tmp024c + tmp024c; float32x4_t _tmp1m = vmlaq_n_f32(vmlaq_n_f32(_tmp135a, _tmp135b, 2.f), _tmp135c, 16.f); float32x4_t _tmp3m = vmlaq_n_f32(vmlaq_n_f32(_tmp135a, _tmp135b, 8.f), _tmp135c, 4.f); float32x4_t _tmp5m = vaddq_f32(vaddq_f32(_out0tm7, _tmp135a), vmlaq_n_f32(_tmp135c, _tmp135b, 32.f)); vst1q_f32(tmp[1][m], _tmp1m); vst1q_f32(tmp[3][m], _tmp3m); vst1q_f32(tmp[5][m], _tmp5m); // tmp[1][m] = tmp135a + tmp135b + tmp135b + tmp135c * 16; // tmp[3][m] = tmp135a + tmp135b * 8 + tmp135c * 4; // tmp[5][m] = output0_tm[7] + tmp135a + tmp135b * 32 + tmp135c; output0_tm_0 += tiles * 32; output0_tm_1 += tiles * 32; output0_tm_2 += tiles * 32; output0_tm_3 += tiles * 32; output0_tm_4 += tiles * 32; output0_tm_5 += tiles * 32; output0_tm_6 += tiles * 32; output0_tm_7 += tiles * 32; } for (int m=0; m<6; m++) { float32x4_t _tmp00 = vld1q_f32(tmp[m][0]); float32x4_t _tmp01 = vld1q_f32(tmp[m][1]); float32x4_t _tmp02 = vld1q_f32(tmp[m][2]); float32x4_t _tmp03 = vld1q_f32(tmp[m][3]); float32x4_t _tmp04 = vld1q_f32(tmp[m][4]); float32x4_t _tmp05 = vld1q_f32(tmp[m][5]); float32x4_t _tmp06 = vld1q_f32(tmp[m][6]); float32x4_t _tmp07 = vld1q_f32(tmp[m][7]); float32x4_t _tmp024a = vaddq_f32(_tmp01, _tmp02); float32x4_t _tmp135a = vsubq_f32(_tmp01, _tmp02); // float tmp024a = tmp0[1] + tmp0[2]; // float tmp135a = tmp0[1] - tmp0[2]; float32x4_t _tmp024b = vaddq_f32(_tmp03, _tmp04); float32x4_t _tmp135b = vsubq_f32(_tmp03, _tmp04); // float tmp024b = tmp0[3] + tmp0[4]; // float tmp135b = tmp0[3] - tmp0[4]; float32x4_t _tmp024c = vaddq_f32(_tmp05, _tmp06); float32x4_t _tmp135c = vsubq_f32(_tmp05, _tmp06); // float tmp024c = tmp0[5] + tmp0[6]; // float tmp135c = tmp0[5] - tmp0[6]; float32x4_t _out00 = vaddq_f32(_bias0, vaddq_f32(vaddq_f32(_tmp00, _tmp024a), vmlaq_n_f32(_tmp024b, _tmp024c, 32.f))); float32x4_t _out02 = vaddq_f32(_bias0, vmlaq_n_f32(vmlaq_n_f32(_tmp024a, _tmp024b, 4.f), _tmp024c, 8.f)); float32x4_t _out04 = vaddq_f32(_bias0, vmlaq_n_f32(vmlaq_n_f32(_tmp024a, _tmp024b, 16.f), _tmp024c, 2.f)); vst1q_f32(output0, _out00); vst1q_f32(output0 + 8, _out02); vst1q_f32(output0 + 16, _out04); // output0[0] = bias0 + tmp0[0] + tmp024a + tmp024b + tmp024c * 32; // output0[2] = bias0 + tmp024a + tmp024b * 4 + tmp024c * 8; // output0[4] = bias0 + tmp024a + tmp024b * 16 + tmp024c + tmp024c; float32x4_t _out01 = vaddq_f32(_bias0, vmlaq_n_f32(vmlaq_n_f32(_tmp135a, _tmp135b, 2.f), _tmp135c, 16.f)); float32x4_t _out03 = vaddq_f32(_bias0, vmlaq_n_f32(vmlaq_n_f32(_tmp135a, _tmp135b, 8.f), _tmp135c, 4.f)); float32x4_t _out05 = vaddq_f32(_bias0, vaddq_f32(vaddq_f32(_tmp07, _tmp135a), vmlaq_n_f32(_tmp135c, _tmp135b, 32.f))); vst1q_f32(output0 + 4, _out01); vst1q_f32(output0 + 12, _out03); vst1q_f32(output0 + 20, _out05); // output0[1] = bias0 + tmp135a + tmp135b + tmp135b + tmp135c * 16; // output0[3] = bias0 + tmp135a + tmp135b * 8 + tmp135c * 4; // output0[5] = bias0 + tmp0[7] + tmp135a + tmp135b * 32 + tmp135c; output0 += outw * 4; } } } } } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt); }
main.c
// // Created by Ramirez, Roger on 17/09/20. //%cflags: fopenmp #include <stdio.h> #include <omp.h> void main() { #pragma omp parallel num_threads(8) { int id = omp_get_thread_num(); printf("hello (%d)", id); printf("world (%d) \n", id); } }
effect.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % EEEEE FFFFF FFFFF EEEEE CCCC TTTTT % % E F F E C T % % EEE FFF FFF EEE C T % % E F F E C T % % EEEEE F F EEEEE CCCC T % % % % % % MagickCore Image Effects Methods % % % % Software Design % % Cristy % % October 1996 % % % % % % Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/accelerate-private.h" #include "magick/blob.h" #include "magick/cache-view.h" #include "magick/color.h" #include "magick/color-private.h" #include "magick/colorspace.h" #include "magick/constitute.h" #include "magick/decorate.h" #include "magick/distort.h" #include "magick/draw.h" #include "magick/enhance.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/effect.h" #include "magick/fx.h" #include "magick/gem.h" #include "magick/geometry.h" #include "magick/image-private.h" #include "magick/list.h" #include "magick/log.h" #include "magick/matrix.h" #include "magick/memory_.h" #include "magick/memory-private.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/montage.h" #include "magick/morphology.h" #include "magick/morphology-private.h" #include "magick/opencl-private.h" #include "magick/paint.h" #include "magick/pixel-accessor.h" #include "magick/pixel-private.h" #include "magick/property.h" #include "magick/quantize.h" #include "magick/quantum.h" #include "magick/random_.h" #include "magick/random-private.h" #include "magick/resample.h" #include "magick/resample-private.h" #include "magick/resize.h" #include "magick/resource_.h" #include "magick/segment.h" #include "magick/shear.h" #include "magick/signature-private.h" #include "magick/statistic.h" #include "magick/string_.h" #include "magick/thread-private.h" #include "magick/transform.h" #include "magick/threshold.h" #ifdef MAGICKCORE_CLPERFMARKER #include "CLPerfMarker.h" #endif /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A d a p t i v e B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AdaptiveBlurImage() adaptively blurs the image by blurring less % intensely near image edges and more intensely far from edges. We blur the % image with a Gaussian operator of the given radius and standard deviation % (sigma). For reasonable results, radius should be larger than sigma. Use a % radius of 0 and AdaptiveBlurImage() selects a suitable radius for you. % % The format of the AdaptiveBlurImage method is: % % Image *AdaptiveBlurImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % Image *AdaptiveBlurImageChannel(const Image *image, % const ChannelType channel,double radius,const double sigma, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel type. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Laplacian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AdaptiveBlurImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { Image *blur_image; blur_image=AdaptiveBlurImageChannel(image,DefaultChannels,radius,sigma, exception); return(blur_image); } MagickExport Image *AdaptiveBlurImageChannel(const Image *image, const ChannelType channel,const double radius,const double sigma, ExceptionInfo *exception) { #define AdaptiveBlurImageTag "Convolve/Image" #define MagickSigma (fabs(sigma) < MagickEpsilon ? MagickEpsilon : sigma) CacheView *blur_view, *edge_view, *image_view; double **kernel, normalize; Image *blur_image, *edge_image, *gaussian_image; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket bias; register ssize_t i; size_t width; ssize_t j, k, u, v, y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); blur_image=CloneImage(image,0,0,MagickTrue,exception); if (blur_image == (Image *) NULL) return((Image *) NULL); if (fabs(sigma) <= MagickEpsilon) return(blur_image); if (SetImageStorageClass(blur_image,DirectClass) == MagickFalse) { InheritException(exception,&blur_image->exception); blur_image=DestroyImage(blur_image); return((Image *) NULL); } /* Edge detect the image brighness channel, level, blur, and level again. */ edge_image=EdgeImage(image,radius,exception); if (edge_image == (Image *) NULL) { blur_image=DestroyImage(blur_image); return((Image *) NULL); } (void) AutoLevelImage(edge_image); gaussian_image=BlurImage(edge_image,radius,sigma,exception); if (gaussian_image != (Image *) NULL) { edge_image=DestroyImage(edge_image); edge_image=gaussian_image; } (void) AutoLevelImage(edge_image); /* Create a set of kernels from maximum (radius,sigma) to minimum. */ width=GetOptimalKernelWidth2D(radius,sigma); kernel=(double **) MagickAssumeAligned(AcquireAlignedMemory((size_t) width, sizeof(*kernel))); if (kernel == (double **) NULL) { edge_image=DestroyImage(edge_image); blur_image=DestroyImage(blur_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } (void) memset(kernel,0,(size_t) width*sizeof(*kernel)); for (i=0; i < (ssize_t) width; i+=2) { kernel[i]=(double *) MagickAssumeAligned(AcquireAlignedMemory((size_t) (width-i),(width-i)*sizeof(**kernel))); if (kernel[i] == (double *) NULL) break; normalize=0.0; j=(ssize_t) (width-i-1)/2; k=0; for (v=(-j); v <= j; v++) { for (u=(-j); u <= j; u++) { kernel[i][k]=(double) (exp(-((double) u*u+v*v)/(2.0*MagickSigma* MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma)); normalize+=kernel[i][k]; k++; } } kernel[i][(k-1)/2]+=(1.0-normalize); if (sigma < MagickEpsilon) kernel[i][(k-1)/2]=1.0; } if (i < (ssize_t) width) { for (i-=2; i >= 0; i-=2) kernel[i]=(double *) RelinquishAlignedMemory(kernel[i]); kernel=(double **) RelinquishAlignedMemory(kernel); edge_image=DestroyImage(edge_image); blur_image=DestroyImage(blur_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } /* Adaptively blur image. */ status=MagickTrue; progress=0; GetMagickPixelPacket(image,&bias); SetMagickPixelPacketBias(image,&bias); image_view=AcquireVirtualCacheView(image,exception); edge_view=AcquireVirtualCacheView(edge_image,exception); blur_view=AcquireAuthenticCacheView(blur_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,blur_image,blur_image->rows,1) #endif for (y=0; y < (ssize_t) blur_image->rows; y++) { register const IndexPacket *magick_restrict indexes; register const PixelPacket *magick_restrict p, *magick_restrict r; register IndexPacket *magick_restrict blur_indexes; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; r=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns,1,exception); q=QueueCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1, exception); if ((r == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } blur_indexes=GetCacheViewAuthenticIndexQueue(blur_view); for (x=0; x < (ssize_t) blur_image->columns; x++) { double alpha, gamma; DoublePixelPacket pixel; register const double *magick_restrict k; register ssize_t i, u, v; gamma=0.0; i=(ssize_t) ceil((double) width*QuantumScale* GetPixelIntensity(edge_image,r)-0.5); if (i < 0) i=0; else if (i > (ssize_t) width) i=(ssize_t) width; if ((i & 0x01) != 0) i--; p=GetCacheViewVirtualPixels(image_view,x-((ssize_t) (width-i)/2L),y- (ssize_t) ((width-i)/2L),width-i,width-i,exception); if (p == (const PixelPacket *) NULL) break; indexes=GetCacheViewVirtualIndexQueue(image_view); pixel.red=bias.red; pixel.green=bias.green; pixel.blue=bias.blue; pixel.opacity=bias.opacity; pixel.index=bias.index; k=kernel[i]; for (v=0; v < (ssize_t) (width-i); v++) { for (u=0; u < (ssize_t) (width-i); u++) { alpha=1.0; if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) alpha=(MagickRealType) (QuantumScale*GetPixelAlpha(p)); if ((channel & RedChannel) != 0) pixel.red+=(*k)*alpha*GetPixelRed(p); if ((channel & GreenChannel) != 0) pixel.green+=(*k)*alpha*GetPixelGreen(p); if ((channel & BlueChannel) != 0) pixel.blue+=(*k)*alpha*GetPixelBlue(p); if ((channel & OpacityChannel) != 0) pixel.opacity+=(*k)*GetPixelOpacity(p); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) pixel.index+=(*k)*alpha*GetPixelIndex(indexes+x+(width-i)*v+u); gamma+=(*k)*alpha; k++; p++; } } gamma=PerceptibleReciprocal(gamma); if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(gamma*pixel.red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(gamma*pixel.green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(gamma*pixel.blue)); if ((channel & OpacityChannel) != 0) SetPixelOpacity(q,ClampToQuantum(pixel.opacity)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(blur_indexes+x,ClampToQuantum(gamma*pixel.index)); q++; r++; } if (SyncCacheViewAuthenticPixels(blur_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,AdaptiveBlurImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } blur_image->type=image->type; blur_view=DestroyCacheView(blur_view); edge_view=DestroyCacheView(edge_view); image_view=DestroyCacheView(image_view); edge_image=DestroyImage(edge_image); for (i=0; i < (ssize_t) width; i+=2) kernel[i]=(double *) RelinquishAlignedMemory(kernel[i]); kernel=(double **) RelinquishAlignedMemory(kernel); if (status == MagickFalse) blur_image=DestroyImage(blur_image); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A d a p t i v e S h a r p e n I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AdaptiveSharpenImage() adaptively sharpens the image by sharpening more % intensely near image edges and less intensely far from edges. We sharpen the % image with a Gaussian operator of the given radius and standard deviation % (sigma). For reasonable results, radius should be larger than sigma. Use a % radius of 0 and AdaptiveSharpenImage() selects a suitable radius for you. % % The format of the AdaptiveSharpenImage method is: % % Image *AdaptiveSharpenImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % Image *AdaptiveSharpenImageChannel(const Image *image, % const ChannelType channel,double radius,const double sigma, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel type. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Laplacian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AdaptiveSharpenImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { Image *sharp_image; sharp_image=AdaptiveSharpenImageChannel(image,DefaultChannels,radius,sigma, exception); return(sharp_image); } MagickExport Image *AdaptiveSharpenImageChannel(const Image *image, const ChannelType channel,const double radius,const double sigma, ExceptionInfo *exception) { #define AdaptiveSharpenImageTag "Convolve/Image" #define MagickSigma (fabs(sigma) < MagickEpsilon ? MagickEpsilon : sigma) CacheView *sharp_view, *edge_view, *image_view; double **kernel, normalize; Image *sharp_image, *edge_image, *gaussian_image; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket bias; register ssize_t i; size_t width; ssize_t j, k, u, v, y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); sharp_image=CloneImage(image,0,0,MagickTrue,exception); if (sharp_image == (Image *) NULL) return((Image *) NULL); if (fabs(sigma) <= MagickEpsilon) return(sharp_image); if (SetImageStorageClass(sharp_image,DirectClass) == MagickFalse) { InheritException(exception,&sharp_image->exception); sharp_image=DestroyImage(sharp_image); return((Image *) NULL); } /* Edge detect the image brighness channel, level, sharp, and level again. */ edge_image=EdgeImage(image,radius,exception); if (edge_image == (Image *) NULL) { sharp_image=DestroyImage(sharp_image); return((Image *) NULL); } (void) AutoLevelImage(edge_image); gaussian_image=BlurImage(edge_image,radius,sigma,exception); if (gaussian_image != (Image *) NULL) { edge_image=DestroyImage(edge_image); edge_image=gaussian_image; } (void) AutoLevelImage(edge_image); /* Create a set of kernels from maximum (radius,sigma) to minimum. */ width=GetOptimalKernelWidth2D(radius,sigma); kernel=(double **) MagickAssumeAligned(AcquireAlignedMemory((size_t) width, sizeof(*kernel))); if (kernel == (double **) NULL) { edge_image=DestroyImage(edge_image); sharp_image=DestroyImage(sharp_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } (void) memset(kernel,0,(size_t) width*sizeof(*kernel)); for (i=0; i < (ssize_t) width; i+=2) { kernel[i]=(double *) MagickAssumeAligned(AcquireAlignedMemory((size_t) (width-i),(width-i)*sizeof(**kernel))); if (kernel[i] == (double *) NULL) break; normalize=0.0; j=(ssize_t) (width-i-1)/2; k=0; for (v=(-j); v <= j; v++) { for (u=(-j); u <= j; u++) { kernel[i][k]=(double) (-exp(-((double) u*u+v*v)/(2.0*MagickSigma* MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma)); normalize+=kernel[i][k]; k++; } } kernel[i][(k-1)/2]=(double) ((-2.0)*normalize); if (sigma < MagickEpsilon) kernel[i][(k-1)/2]=1.0; } if (i < (ssize_t) width) { for (i-=2; i >= 0; i-=2) kernel[i]=(double *) RelinquishAlignedMemory(kernel[i]); kernel=(double **) RelinquishAlignedMemory(kernel); edge_image=DestroyImage(edge_image); sharp_image=DestroyImage(sharp_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } /* Adaptively sharpen image. */ status=MagickTrue; progress=0; GetMagickPixelPacket(image,&bias); SetMagickPixelPacketBias(image,&bias); image_view=AcquireVirtualCacheView(image,exception); edge_view=AcquireVirtualCacheView(edge_image,exception); sharp_view=AcquireAuthenticCacheView(sharp_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,sharp_image,sharp_image->rows,1) #endif for (y=0; y < (ssize_t) sharp_image->rows; y++) { register const IndexPacket *magick_restrict indexes; register const PixelPacket *magick_restrict p, *magick_restrict r; register IndexPacket *magick_restrict sharp_indexes; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; r=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns,1,exception); q=QueueCacheViewAuthenticPixels(sharp_view,0,y,sharp_image->columns,1, exception); if ((r == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } sharp_indexes=GetCacheViewAuthenticIndexQueue(sharp_view); for (x=0; x < (ssize_t) sharp_image->columns; x++) { double alpha, gamma; DoublePixelPacket pixel; register const double *magick_restrict k; register ssize_t i, u, v; gamma=0.0; i=(ssize_t) ceil((double) width*(1.0-QuantumScale* GetPixelIntensity(edge_image,r))-0.5); if (i < 0) i=0; else if (i > (ssize_t) width) i=(ssize_t) width; if ((i & 0x01) != 0) i--; p=GetCacheViewVirtualPixels(image_view,x-((ssize_t) (width-i)/2L),y- (ssize_t) ((width-i)/2L),width-i,width-i,exception); if (p == (const PixelPacket *) NULL) break; indexes=GetCacheViewVirtualIndexQueue(image_view); k=kernel[i]; pixel.red=bias.red; pixel.green=bias.green; pixel.blue=bias.blue; pixel.opacity=bias.opacity; pixel.index=bias.index; for (v=0; v < (ssize_t) (width-i); v++) { for (u=0; u < (ssize_t) (width-i); u++) { alpha=1.0; if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) alpha=(MagickRealType) (QuantumScale*GetPixelAlpha(p)); if ((channel & RedChannel) != 0) pixel.red+=(*k)*alpha*GetPixelRed(p); if ((channel & GreenChannel) != 0) pixel.green+=(*k)*alpha*GetPixelGreen(p); if ((channel & BlueChannel) != 0) pixel.blue+=(*k)*alpha*GetPixelBlue(p); if ((channel & OpacityChannel) != 0) pixel.opacity+=(*k)*GetPixelOpacity(p); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) pixel.index+=(*k)*alpha*GetPixelIndex(indexes+x+(width-i)*v+u); gamma+=(*k)*alpha; k++; p++; } } gamma=PerceptibleReciprocal(gamma); if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(gamma*pixel.red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(gamma*pixel.green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(gamma*pixel.blue)); if ((channel & OpacityChannel) != 0) SetPixelOpacity(q,ClampToQuantum(pixel.opacity)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(sharp_indexes+x,ClampToQuantum(gamma*pixel.index)); q++; r++; } if (SyncCacheViewAuthenticPixels(sharp_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,AdaptiveSharpenImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } sharp_image->type=image->type; sharp_view=DestroyCacheView(sharp_view); edge_view=DestroyCacheView(edge_view); image_view=DestroyCacheView(image_view); edge_image=DestroyImage(edge_image); for (i=0; i < (ssize_t) width; i+=2) kernel[i]=(double *) RelinquishAlignedMemory(kernel[i]); kernel=(double **) RelinquishAlignedMemory(kernel); if (status == MagickFalse) sharp_image=DestroyImage(sharp_image); return(sharp_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % BlurImage() blurs an image. We convolve the image with a Gaussian operator % of the given radius and standard deviation (sigma). For reasonable results, % the radius should be larger than sigma. Use a radius of 0 and BlurImage() % selects a suitable radius for you. % % The format of the BlurImage method is: % % Image *BlurImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % Image *BlurImageChannel(const Image *image,const ChannelType channel, % const double radius,const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel type. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *BlurImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { Image *blur_image; blur_image=BlurImageChannel(image,DefaultChannels,radius,sigma,exception); return(blur_image); } MagickExport Image *BlurImageChannel(const Image *image, const ChannelType channel,const double radius,const double sigma, ExceptionInfo *exception) { char geometry[MaxTextExtent]; KernelInfo *kernel_info; Image *blur_image = NULL; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) blur_image=AccelerateBlurImage(image,channel,radius,sigma,exception); if (blur_image != (Image *) NULL) return(blur_image); #endif (void) FormatLocaleString(geometry,MaxTextExtent, "blur:%.20gx%.20g;blur:%.20gx%.20g+90",radius,sigma,radius,sigma); kernel_info=AcquireKernelInfo(geometry); if (kernel_info == (KernelInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); blur_image=MorphologyImageChannel(image,channel,ConvolveMorphology,1, kernel_info,exception); kernel_info=DestroyKernelInfo(kernel_info); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o n v o l v e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ConvolveImage() applies a custom convolution kernel to the image. % % The format of the ConvolveImage method is: % % Image *ConvolveImage(const Image *image,const size_t order, % const double *kernel,ExceptionInfo *exception) % Image *ConvolveImageChannel(const Image *image,const ChannelType channel, % const size_t order,const double *kernel,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel type. % % o order: the number of columns and rows in the filter kernel. % % o kernel: An array of double representing the convolution kernel. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ConvolveImage(const Image *image,const size_t order, const double *kernel,ExceptionInfo *exception) { Image *convolve_image; #ifdef MAGICKCORE_CLPERFMARKER clBeginPerfMarkerAMD(__FUNCTION__,""); #endif convolve_image=ConvolveImageChannel(image,DefaultChannels,order,kernel, exception); #ifdef MAGICKCORE_CLPERFMARKER clEndPerfMarkerAMD(); #endif return(convolve_image); } MagickExport Image *ConvolveImageChannel(const Image *image, const ChannelType channel,const size_t order,const double *kernel, ExceptionInfo *exception) { Image *convolve_image; KernelInfo *kernel_info; register ssize_t i; kernel_info=AcquireKernelInfo((const char *) NULL); if (kernel_info == (KernelInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); kernel_info->width=order; kernel_info->height=order; kernel_info->x=(ssize_t) (order-1)/2; kernel_info->y=(ssize_t) (order-1)/2; kernel_info->signature=MagickCoreSignature; kernel_info->values=(double *) MagickAssumeAligned(AcquireAlignedMemory( kernel_info->width,kernel_info->width*sizeof(*kernel_info->values))); if (kernel_info->values == (double *) NULL) { kernel_info=DestroyKernelInfo(kernel_info); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } for (i=0; i < (ssize_t) (order*order); i++) kernel_info->values[i]=kernel[i]; convolve_image=(Image *) NULL; #if defined(MAGICKCORE_OPENCL_SUPPORT) convolve_image=AccelerateConvolveImageChannel(image,channel,kernel_info, exception); #endif if (convolve_image == (Image *) NULL) convolve_image=MorphologyImageChannel(image,channel,ConvolveMorphology,1, kernel_info,exception); kernel_info=DestroyKernelInfo(kernel_info); return(convolve_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s p e c k l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DespeckleImage() reduces the speckle noise in an image while perserving the % edges of the original image. A speckle removing filter uses a complementary % hulling technique (raising pixels that are darker than their surrounding % neighbors, then complementarily lowering pixels that are brighter than their % surrounding neighbors) to reduce the speckle index of that image (reference % Crimmins speckle removal). % % The format of the DespeckleImage method is: % % Image *DespeckleImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ static void Hull(const Image *image,const ssize_t x_offset, const ssize_t y_offset,const size_t columns,const size_t rows, const int polarity,Quantum *magick_restrict f,Quantum *magick_restrict g) { register Quantum *p, *q, *r, *s; ssize_t y; assert(f != (Quantum *) NULL); assert(g != (Quantum *) NULL); p=f+(columns+2); q=g+(columns+2); r=p+(y_offset*(columns+2)+x_offset); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) \ magick_number_threads(image,image,rows,1) #endif for (y=0; y < (ssize_t) rows; y++) { register ssize_t i, x; SignedQuantum v; i=(2*y+1)+y*columns; if (polarity > 0) for (x=0; x < (ssize_t) columns; x++) { v=(SignedQuantum) p[i]; if ((SignedQuantum) r[i] >= (v+ScaleCharToQuantum(2))) v+=ScaleCharToQuantum(1); q[i]=(Quantum) v; i++; } else for (x=0; x < (ssize_t) columns; x++) { v=(SignedQuantum) p[i]; if ((SignedQuantum) r[i] <= (v-ScaleCharToQuantum(2))) v-=ScaleCharToQuantum(1); q[i]=(Quantum) v; i++; } } p=f+(columns+2); q=g+(columns+2); r=q+(y_offset*(columns+2)+x_offset); s=q-(y_offset*(columns+2)+x_offset); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) \ magick_number_threads(image,image,rows,1) #endif for (y=0; y < (ssize_t) rows; y++) { register ssize_t i, x; SignedQuantum v; i=(2*y+1)+y*columns; if (polarity > 0) for (x=0; x < (ssize_t) columns; x++) { v=(SignedQuantum) q[i]; if (((SignedQuantum) s[i] >= (v+ScaleCharToQuantum(2))) && ((SignedQuantum) r[i] > v)) v+=ScaleCharToQuantum(1); p[i]=(Quantum) v; i++; } else for (x=0; x < (ssize_t) columns; x++) { v=(SignedQuantum) q[i]; if (((SignedQuantum) s[i] <= (v-ScaleCharToQuantum(2))) && ((SignedQuantum) r[i] < v)) v-=ScaleCharToQuantum(1); p[i]=(Quantum) v; i++; } } } MagickExport Image *DespeckleImage(const Image *image,ExceptionInfo *exception) { #define DespeckleImageTag "Despeckle/Image" CacheView *despeckle_view, *image_view; Image *despeckle_image; MagickBooleanType status; MemoryInfo *buffer_info, *pixel_info; register ssize_t i; Quantum *magick_restrict buffer, *magick_restrict pixels; size_t length, number_channels; static const ssize_t X[4] = {0, 1, 1,-1}, Y[4] = {1, 0, 1, 1}; /* Allocate despeckled image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) despeckle_image=AccelerateDespeckleImage(image, exception); if (despeckle_image != (Image *) NULL) return(despeckle_image); #endif despeckle_image=CloneImage(image,0,0,MagickTrue,exception); if (despeckle_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(despeckle_image,DirectClass) == MagickFalse) { InheritException(exception,&despeckle_image->exception); despeckle_image=DestroyImage(despeckle_image); return((Image *) NULL); } /* Allocate image buffer. */ length=(size_t) ((image->columns+2)*(image->rows+2)); pixel_info=AcquireVirtualMemory(length,sizeof(*pixels)); buffer_info=AcquireVirtualMemory(length,sizeof(*buffer)); if ((pixel_info == (MemoryInfo *) NULL) || (buffer_info == (MemoryInfo *) NULL)) { if (buffer_info != (MemoryInfo *) NULL) buffer_info=RelinquishVirtualMemory(buffer_info); if (pixel_info != (MemoryInfo *) NULL) pixel_info=RelinquishVirtualMemory(pixel_info); despeckle_image=DestroyImage(despeckle_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } pixels=(Quantum *) GetVirtualMemoryBlob(pixel_info); buffer=(Quantum *) GetVirtualMemoryBlob(buffer_info); /* Reduce speckle in the image. */ status=MagickTrue; number_channels=(size_t) (image->colorspace == CMYKColorspace ? 5 : 4); image_view=AcquireVirtualCacheView(image,exception); despeckle_view=AcquireAuthenticCacheView(despeckle_image,exception); for (i=0; i < (ssize_t) number_channels; i++) { register ssize_t k, x; ssize_t j, y; if (status == MagickFalse) continue; if ((image->matte == MagickFalse) && (i == 3)) continue; (void) memset(pixels,0,length*sizeof(*pixels)); j=(ssize_t) image->columns+2; for (y=0; y < (ssize_t) image->rows; y++) { register const IndexPacket *magick_restrict indexes; register const PixelPacket *magick_restrict p; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; indexes=GetCacheViewVirtualIndexQueue(image_view); j++; for (x=0; x < (ssize_t) image->columns; x++) { switch (i) { case 0: pixels[j]=GetPixelRed(p); break; case 1: pixels[j]=GetPixelGreen(p); break; case 2: pixels[j]=GetPixelBlue(p); break; case 3: pixels[j]=GetPixelOpacity(p); break; case 4: pixels[j]=GetPixelBlack(indexes+x); break; default: break; } p++; j++; } j++; } (void) memset(buffer,0,length*sizeof(*buffer)); for (k=0; k < 4; k++) { Hull(image,X[k],Y[k],image->columns,image->rows,1,pixels,buffer); Hull(image,-X[k],-Y[k],image->columns,image->rows,1,pixels,buffer); Hull(image,-X[k],-Y[k],image->columns,image->rows,-1,pixels,buffer); Hull(image,X[k],Y[k],image->columns,image->rows,-1,pixels,buffer); } j=(ssize_t) image->columns+2; for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register IndexPacket *magick_restrict indexes; register PixelPacket *magick_restrict q; q=GetCacheViewAuthenticPixels(despeckle_view,0,y,despeckle_image->columns, 1,exception); if (q == (PixelPacket *) NULL) break; indexes=GetCacheViewAuthenticIndexQueue(despeckle_view); j++; for (x=0; x < (ssize_t) image->columns; x++) { switch (i) { case 0: SetPixelRed(q,pixels[j]); break; case 1: SetPixelGreen(q,pixels[j]); break; case 2: SetPixelBlue(q,pixels[j]); break; case 3: SetPixelOpacity(q,pixels[j]); break; case 4: SetPixelIndex(indexes+x,pixels[j]); break; default: break; } q++; j++; } sync=SyncCacheViewAuthenticPixels(despeckle_view,exception); if (sync == MagickFalse) { status=MagickFalse; break; } j++; } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,DespeckleImageTag,(MagickOffsetType) i, number_channels); if (proceed == MagickFalse) status=MagickFalse; } } despeckle_view=DestroyCacheView(despeckle_view); image_view=DestroyCacheView(image_view); buffer_info=RelinquishVirtualMemory(buffer_info); pixel_info=RelinquishVirtualMemory(pixel_info); despeckle_image->type=image->type; if (status == MagickFalse) despeckle_image=DestroyImage(despeckle_image); return(despeckle_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E d g e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % EdgeImage() finds edges in an image. Radius defines the radius of the % convolution filter. Use a radius of 0 and EdgeImage() selects a suitable % radius for you. % % The format of the EdgeImage method is: % % Image *EdgeImage(const Image *image,const double radius, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the pixel neighborhood. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *EdgeImage(const Image *image,const double radius, ExceptionInfo *exception) { Image *edge_image; KernelInfo *kernel_info; register ssize_t i; size_t width; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); width=GetOptimalKernelWidth1D(radius,0.5); kernel_info=AcquireKernelInfo((const char *) NULL); if (kernel_info == (KernelInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); (void) memset(kernel_info,0,sizeof(*kernel_info)); kernel_info->width=width; kernel_info->height=width; kernel_info->x=(ssize_t) (kernel_info->width-1)/2; kernel_info->y=(ssize_t) (kernel_info->height-1)/2; kernel_info->signature=MagickCoreSignature; kernel_info->values=(double *) MagickAssumeAligned(AcquireAlignedMemory( kernel_info->width,kernel_info->height*sizeof(*kernel_info->values))); if (kernel_info->values == (double *) NULL) { kernel_info=DestroyKernelInfo(kernel_info); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++) kernel_info->values[i]=(-1.0); kernel_info->values[i/2]=(double) kernel_info->width*kernel_info->height-1.0; edge_image=(Image *) NULL; #if defined(MAGICKCORE_OPENCL_SUPPORT) edge_image=AccelerateConvolveImageChannel(image,DefaultChannels,kernel_info, exception); #endif if (edge_image == (Image *) NULL) edge_image=MorphologyImageChannel(image,DefaultChannels,ConvolveMorphology, 1,kernel_info,exception); kernel_info=DestroyKernelInfo(kernel_info); return(edge_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E m b o s s I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % EmbossImage() returns a grayscale image with a three-dimensional effect. % We convolve the image with a Gaussian operator of the given radius and % standard deviation (sigma). For reasonable results, radius should be % larger than sigma. Use a radius of 0 and Emboss() selects a suitable % radius for you. % % The format of the EmbossImage method is: % % Image *EmbossImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the pixel neighborhood. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *EmbossImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { double gamma, normalize; Image *emboss_image; KernelInfo *kernel_info; register ssize_t i; size_t width; ssize_t j, k, u, v; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); width=GetOptimalKernelWidth1D(radius,sigma); kernel_info=AcquireKernelInfo((const char *) NULL); if (kernel_info == (KernelInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); kernel_info->width=width; kernel_info->height=width; kernel_info->x=(ssize_t) (width-1)/2; kernel_info->y=(ssize_t) (width-1)/2; kernel_info->values=(double *) MagickAssumeAligned(AcquireAlignedMemory( kernel_info->width,kernel_info->width*sizeof(*kernel_info->values))); if (kernel_info->values == (double *) NULL) { kernel_info=DestroyKernelInfo(kernel_info); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } j=(ssize_t) (kernel_info->width-1)/2; k=j; i=0; for (v=(-j); v <= j; v++) { for (u=(-j); u <= j; u++) { kernel_info->values[i]=(double) (((u < 0) || (v < 0) ? -8.0 : 8.0)*exp(-((double) u*u+v*v)/(2.0*MagickSigma*MagickSigma))/ (2.0*MagickPI*MagickSigma*MagickSigma)); if (u != k) kernel_info->values[i]=0.0; i++; } k--; } normalize=0.0; for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++) normalize+=kernel_info->values[i]; gamma=PerceptibleReciprocal(normalize); for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++) kernel_info->values[i]*=gamma; emboss_image=(Image *) NULL; #if defined(MAGICKCORE_OPENCL_SUPPORT) emboss_image=AccelerateConvolveImageChannel(image,DefaultChannels,kernel_info, exception); #endif if (emboss_image == (Image *) NULL) emboss_image=MorphologyImageChannel(image,DefaultChannels, ConvolveMorphology,1,kernel_info,exception); kernel_info=DestroyKernelInfo(kernel_info); if (emboss_image != (Image *) NULL) (void) EqualizeImageChannel(emboss_image,(ChannelType) (AllChannels &~ SyncChannels)); return(emboss_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % F i l t e r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FilterImage() applies a custom convolution kernel to the image. % % The format of the FilterImage method is: % % Image *FilterImage(const Image *image,const KernelInfo *kernel, % ExceptionInfo *exception) % Image *FilterImageChannel(const Image *image,const ChannelType channel, % const KernelInfo *kernel,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel type. % % o kernel: the filtering kernel. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *FilterImage(const Image *image,const KernelInfo *kernel, ExceptionInfo *exception) { Image *filter_image; filter_image=FilterImageChannel(image,DefaultChannels,kernel,exception); return(filter_image); } MagickExport Image *FilterImageChannel(const Image *image, const ChannelType channel,const KernelInfo *kernel,ExceptionInfo *exception) { #define FilterImageTag "Filter/Image" CacheView *filter_view, *image_view; Image *filter_image; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket bias; MagickRealType *filter_kernel; register ssize_t i; ssize_t y; #ifdef MAGICKCORE_CLPERFMARKER clBeginPerfMarkerAMD(__FUNCTION__,""); #endif /* Initialize filter image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if ((kernel->width % 2) == 0) ThrowImageException(OptionError,"KernelWidthMustBeAnOddNumber"); if (image->debug != MagickFalse) { char format[MaxTextExtent], *message; register const double *k; ssize_t u, v; (void) LogMagickEvent(TransformEvent,GetMagickModule(), " FilterImage with %.20gx%.20g kernel:",(double) kernel->width,(double) kernel->height); message=AcquireString(""); k=kernel->values; for (v=0; v < (ssize_t) kernel->height; v++) { *message='\0'; (void) FormatLocaleString(format,MaxTextExtent,"%.20g: ",(double) v); (void) ConcatenateString(&message,format); for (u=0; u < (ssize_t) kernel->width; u++) { (void) FormatLocaleString(format,MaxTextExtent,"%g ",*k++); (void) ConcatenateString(&message,format); } (void) LogMagickEvent(TransformEvent,GetMagickModule(),"%s",message); } message=DestroyString(message); } #if defined(MAGICKCORE_OPENCL_SUPPORT) filter_image=AccelerateConvolveImageChannel(image,channel,kernel,exception); if (filter_image != (Image *) NULL) { #ifdef MAGICKCORE_CLPERFMARKER clEndPerfMarkerAMD(); #endif return(filter_image); } #endif filter_image=CloneImage(image,0,0,MagickTrue,exception); if (filter_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(filter_image,DirectClass) == MagickFalse) { InheritException(exception,&filter_image->exception); filter_image=DestroyImage(filter_image); return((Image *) NULL); } /* Normalize kernel. */ filter_kernel=(MagickRealType *) MagickAssumeAligned(AcquireAlignedMemory( kernel->width,kernel->height*sizeof(*filter_kernel))); if (filter_kernel == (MagickRealType *) NULL) { filter_image=DestroyImage(filter_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } for (i=0; i < (ssize_t) (kernel->width*kernel->height); i++) filter_kernel[i]=(MagickRealType) kernel->values[i]; /* Filter image. */ status=MagickTrue; progress=0; GetMagickPixelPacket(image,&bias); SetMagickPixelPacketBias(image,&bias); image_view=AcquireVirtualCacheView(image,exception); filter_view=AcquireAuthenticCacheView(filter_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,filter_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register const IndexPacket *magick_restrict indexes; register const PixelPacket *magick_restrict p; register IndexPacket *magick_restrict filter_indexes; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-((ssize_t) (kernel->width-1)/2L),y- (ssize_t) ((kernel->height-1)/2L),image->columns+kernel->width, kernel->height,exception); q=GetCacheViewAuthenticPixels(filter_view,0,y,filter_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); filter_indexes=GetCacheViewAuthenticIndexQueue(filter_view); for (x=0; x < (ssize_t) image->columns; x++) { DoublePixelPacket pixel; register const MagickRealType *magick_restrict k; register const PixelPacket *magick_restrict kernel_pixels; register ssize_t u; ssize_t v; pixel.red=bias.red; pixel.green=bias.green; pixel.blue=bias.blue; pixel.opacity=bias.opacity; pixel.index=bias.index; k=filter_kernel; kernel_pixels=p; if (((channel & OpacityChannel) == 0) || (image->matte == MagickFalse)) { for (v=0; v < (ssize_t) kernel->width; v++) { for (u=0; u < (ssize_t) kernel->height; u++) { pixel.red+=(*k)*kernel_pixels[u].red; pixel.green+=(*k)*kernel_pixels[u].green; pixel.blue+=(*k)*kernel_pixels[u].blue; k++; } kernel_pixels+=image->columns+kernel->width; } if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(pixel.red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(pixel.green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(pixel.blue)); if ((channel & OpacityChannel) != 0) { k=filter_kernel; kernel_pixels=p; for (v=0; v < (ssize_t) kernel->width; v++) { for (u=0; u < (ssize_t) kernel->height; u++) { pixel.opacity+=(*k)*kernel_pixels[u].opacity; k++; } kernel_pixels+=image->columns+kernel->width; } SetPixelOpacity(q,ClampToQuantum(pixel.opacity)); } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) { register const IndexPacket *magick_restrict kernel_indexes; k=filter_kernel; kernel_indexes=indexes; for (v=0; v < (ssize_t) kernel->width; v++) { for (u=0; u < (ssize_t) kernel->height; u++) { pixel.index+=(*k)*GetPixelIndex(kernel_indexes+u); k++; } kernel_indexes+=image->columns+kernel->width; } SetPixelIndex(filter_indexes+x,ClampToQuantum(pixel.index)); } } else { double alpha, gamma; gamma=0.0; for (v=0; v < (ssize_t) kernel->width; v++) { for (u=0; u < (ssize_t) kernel->height; u++) { alpha=(MagickRealType) (QuantumScale*(QuantumRange- GetPixelOpacity(kernel_pixels+u))); pixel.red+=(*k)*alpha*GetPixelRed(kernel_pixels+u); pixel.green+=(*k)*alpha*GetPixelGreen(kernel_pixels+u); pixel.blue+=(*k)*alpha*GetPixelBlue(kernel_pixels+u); gamma+=(*k)*alpha; k++; } kernel_pixels+=image->columns+kernel->width; } gamma=PerceptibleReciprocal(gamma); if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(gamma*pixel.red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(gamma*pixel.green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(gamma*pixel.blue)); if ((channel & OpacityChannel) != 0) { k=filter_kernel; kernel_pixels=p; for (v=0; v < (ssize_t) kernel->width; v++) { for (u=0; u < (ssize_t) kernel->height; u++) { pixel.opacity+=(*k)*GetPixelOpacity(kernel_pixels+u); k++; } kernel_pixels+=image->columns+kernel->width; } SetPixelOpacity(q,ClampToQuantum(pixel.opacity)); } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) { register const IndexPacket *magick_restrict kernel_indexes; k=filter_kernel; kernel_pixels=p; kernel_indexes=indexes; for (v=0; v < (ssize_t) kernel->width; v++) { for (u=0; u < (ssize_t) kernel->height; u++) { alpha=(MagickRealType) (QuantumScale*(QuantumRange- kernel_pixels[u].opacity)); pixel.index+=(*k)*alpha*GetPixelIndex(kernel_indexes+u); k++; } kernel_pixels+=image->columns+kernel->width; kernel_indexes+=image->columns+kernel->width; } SetPixelIndex(filter_indexes+x,ClampToQuantum(gamma*pixel.index)); } } indexes++; p++; q++; } sync=SyncCacheViewAuthenticPixels(filter_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,FilterImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } filter_image->type=image->type; filter_view=DestroyCacheView(filter_view); image_view=DestroyCacheView(image_view); filter_kernel=(MagickRealType *) RelinquishAlignedMemory(filter_kernel); if (status == MagickFalse) filter_image=DestroyImage(filter_image); #ifdef MAGICKCORE_CLPERFMARKER clEndPerfMarkerAMD(); #endif return(filter_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G a u s s i a n B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GaussianBlurImage() blurs an image. We convolve the image with a % Gaussian operator of the given radius and standard deviation (sigma). % For reasonable results, the radius should be larger than sigma. Use a % radius of 0 and GaussianBlurImage() selects a suitable radius for you % % The format of the GaussianBlurImage method is: % % Image *GaussianBlurImage(const Image *image,onst double radius, % const double sigma,ExceptionInfo *exception) % Image *GaussianBlurImageChannel(const Image *image, % const ChannelType channel,const double radius,const double sigma, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel type. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *GaussianBlurImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { Image *blur_image; blur_image=GaussianBlurImageChannel(image,DefaultChannels,radius,sigma, exception); return(blur_image); } MagickExport Image *GaussianBlurImageChannel(const Image *image, const ChannelType channel,const double radius,const double sigma, ExceptionInfo *exception) { char geometry[MaxTextExtent]; KernelInfo *kernel_info; Image *blur_image; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); (void) FormatLocaleString(geometry,MaxTextExtent,"gaussian:%.20gx%.20g", radius,sigma); kernel_info=AcquireKernelInfo(geometry); if (kernel_info == (KernelInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); blur_image=(Image *) NULL; #if defined(MAGICKCORE_OPENCL_SUPPORT) blur_image=AccelerateConvolveImageChannel(image,channel,kernel_info, exception); #endif if (blur_image == (Image *) NULL) blur_image=MorphologyImageChannel(image,channel,ConvolveMorphology,1, kernel_info,exception); kernel_info=DestroyKernelInfo(kernel_info); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M o t i o n B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MotionBlurImage() simulates motion blur. We convolve the image with a % Gaussian operator of the given radius and standard deviation (sigma). % For reasonable results, radius should be larger than sigma. Use a % radius of 0 and MotionBlurImage() selects a suitable radius for you. % Angle gives the angle of the blurring motion. % % Andrew Protano contributed this effect. % % The format of the MotionBlurImage method is: % % Image *MotionBlurImage(const Image *image,const double radius, % const double sigma,const double angle,ExceptionInfo *exception) % Image *MotionBlurImageChannel(const Image *image,const ChannelType channel, % const double radius,const double sigma,const double angle, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel type. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o angle: Apply the effect along this angle. % % o exception: return any errors or warnings in this structure. % */ static double *GetMotionBlurKernel(const size_t width,const double sigma) { double *kernel, normalize; register ssize_t i; /* Generate a 1-D convolution kernel. */ (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); kernel=(double *) MagickAssumeAligned(AcquireAlignedMemory((size_t) width, sizeof(*kernel))); if (kernel == (double *) NULL) return(kernel); normalize=0.0; for (i=0; i < (ssize_t) width; i++) { kernel[i]=(double) (exp((-((double) i*i)/(double) (2.0*MagickSigma* MagickSigma)))/(MagickSQ2PI*MagickSigma)); normalize+=kernel[i]; } for (i=0; i < (ssize_t) width; i++) kernel[i]/=normalize; return(kernel); } MagickExport Image *MotionBlurImage(const Image *image,const double radius, const double sigma,const double angle,ExceptionInfo *exception) { Image *motion_blur; motion_blur=MotionBlurImageChannel(image,DefaultChannels,radius,sigma,angle, exception); return(motion_blur); } MagickExport Image *MotionBlurImageChannel(const Image *image, const ChannelType channel,const double radius,const double sigma, const double angle,ExceptionInfo *exception) { #define BlurImageTag "Blur/Image" CacheView *blur_view, *image_view; double *kernel; Image *blur_image; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket bias; OffsetInfo *offset; PointInfo point; register ssize_t i; size_t width; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); width=GetOptimalKernelWidth1D(radius,sigma); kernel=GetMotionBlurKernel(width,sigma); if (kernel == (double *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); offset=(OffsetInfo *) AcquireQuantumMemory(width,sizeof(*offset)); if (offset == (OffsetInfo *) NULL) { kernel=(double *) RelinquishAlignedMemory(kernel); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } point.x=(double) width*sin(DegreesToRadians(angle)); point.y=(double) width*cos(DegreesToRadians(angle)); for (i=0; i < (ssize_t) width; i++) { offset[i].x=(ssize_t) ceil((double) (i*point.y)/hypot(point.x,point.y)-0.5); offset[i].y=(ssize_t) ceil((double) (i*point.x)/hypot(point.x,point.y)-0.5); } /* Motion blur image. */ #if defined(MAGICKCORE_OPENCL_SUPPORT) blur_image=AccelerateMotionBlurImage(image,channel,kernel,width,offset, exception); if (blur_image != (Image *) NULL) return blur_image; #endif blur_image=CloneImage(image,0,0,MagickTrue,exception); if (blur_image == (Image *) NULL) { kernel=(double *) RelinquishAlignedMemory(kernel); offset=(OffsetInfo *) RelinquishMagickMemory(offset); return((Image *) NULL); } if (SetImageStorageClass(blur_image,DirectClass) == MagickFalse) { kernel=(double *) RelinquishAlignedMemory(kernel); offset=(OffsetInfo *) RelinquishMagickMemory(offset); InheritException(exception,&blur_image->exception); blur_image=DestroyImage(blur_image); return((Image *) NULL); } status=MagickTrue; progress=0; GetMagickPixelPacket(image,&bias); image_view=AcquireVirtualCacheView(image,exception); blur_view=AcquireAuthenticCacheView(blur_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,blur_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *magick_restrict blur_indexes; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } blur_indexes=GetCacheViewAuthenticIndexQueue(blur_view); for (x=0; x < (ssize_t) image->columns; x++) { MagickPixelPacket qixel; PixelPacket pixel; register const IndexPacket *magick_restrict indexes; register double *magick_restrict k; register ssize_t i; k=kernel; qixel=bias; if (((channel & OpacityChannel) == 0) || (image->matte == MagickFalse)) { for (i=0; i < (ssize_t) width; i++) { (void) GetOneCacheViewVirtualPixel(image_view,x+offset[i].x,y+ offset[i].y,&pixel,exception); qixel.red+=(*k)*pixel.red; qixel.green+=(*k)*pixel.green; qixel.blue+=(*k)*pixel.blue; qixel.opacity+=(*k)*pixel.opacity; if (image->colorspace == CMYKColorspace) { indexes=GetCacheViewVirtualIndexQueue(image_view); qixel.index+=(*k)*(*indexes); } k++; } if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(qixel.red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(qixel.green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(qixel.blue)); if ((channel & OpacityChannel) != 0) SetPixelOpacity(q,ClampToQuantum(qixel.opacity)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(blur_indexes+x,ClampToQuantum(qixel.index)); } else { double alpha, gamma; alpha=0.0; gamma=0.0; for (i=0; i < (ssize_t) width; i++) { (void) GetOneCacheViewVirtualPixel(image_view,x+offset[i].x,y+ offset[i].y,&pixel,exception); alpha=(MagickRealType) (QuantumScale*GetPixelAlpha(&pixel)); qixel.red+=(*k)*alpha*pixel.red; qixel.green+=(*k)*alpha*pixel.green; qixel.blue+=(*k)*alpha*pixel.blue; qixel.opacity+=(*k)*pixel.opacity; if (image->colorspace == CMYKColorspace) { indexes=GetCacheViewVirtualIndexQueue(image_view); qixel.index+=(*k)*alpha*GetPixelIndex(indexes); } gamma+=(*k)*alpha; k++; } gamma=PerceptibleReciprocal(gamma); if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(gamma*qixel.red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(gamma*qixel.green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(gamma*qixel.blue)); if ((channel & OpacityChannel) != 0) SetPixelOpacity(q,ClampToQuantum(qixel.opacity)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(blur_indexes+x,ClampToQuantum(gamma*qixel.index)); } q++; } if (SyncCacheViewAuthenticPixels(blur_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,BlurImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } blur_view=DestroyCacheView(blur_view); image_view=DestroyCacheView(image_view); kernel=(double *) RelinquishAlignedMemory(kernel); offset=(OffsetInfo *) RelinquishMagickMemory(offset); if (status == MagickFalse) blur_image=DestroyImage(blur_image); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % K u w a h a r a I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % KuwaharaImage() is an edge preserving noise reduction filter. % % The format of the KuwaharaImage method is: % % Image *KuwaharaImage(const Image *image,const double width, % const double sigma,ExceptionInfo *exception) % Image *KuwaharaImageChannel(const Image *image,const ChannelType channel, % const double width,const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel type. % % o radius: the square window radius. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *KuwaharaImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { Image *kuwahara_image; kuwahara_image=KuwaharaImageChannel(image,DefaultChannels,radius,sigma, exception); return(kuwahara_image); } MagickExport Image *KuwaharaImageChannel(const Image *image, const ChannelType channel,const double radius,const double sigma, ExceptionInfo *exception) { #define KuwaharaImageTag "Kiwahara/Image" CacheView *image_view, *kuwahara_view; Image *gaussian_image, *kuwahara_image; MagickBooleanType status; MagickOffsetType progress; size_t width; ssize_t y; /* Initialize Kuwahara image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); (void) channel; width=(size_t) radius+1; gaussian_image=BlurImage(image,radius,sigma,exception); if (gaussian_image == (Image *) NULL) return((Image *) NULL); kuwahara_image=CloneImage(image,0,0,MagickTrue,exception); if (kuwahara_image == (Image *) NULL) { gaussian_image=DestroyImage(gaussian_image); return((Image *) NULL); } if (SetImageStorageClass(kuwahara_image,DirectClass) == MagickFalse) { InheritException(exception,&kuwahara_image->exception); gaussian_image=DestroyImage(gaussian_image); kuwahara_image=DestroyImage(kuwahara_image); return((Image *) NULL); } /* Edge preserving noise reduction filter. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(gaussian_image,exception); kuwahara_view=AcquireAuthenticCacheView(kuwahara_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,kuwahara_image,kuwahara_image->rows,1) #endif for (y=0; y < (ssize_t) kuwahara_image->rows; y++) { register IndexPacket *magick_restrict kuwahara_indexes; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(kuwahara_view,0,y,kuwahara_image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } kuwahara_indexes=GetCacheViewAuthenticIndexQueue(kuwahara_view); for (x=0; x < (ssize_t) kuwahara_image->columns; x++) { double min_variance; MagickPixelPacket pixel; RectangleInfo quadrant, target; register ssize_t i; min_variance=MagickMaximumValue; SetGeometry(gaussian_image,&target); quadrant.width=width; quadrant.height=width; for (i=0; i < 4; i++) { const PixelPacket *magick_restrict p; double variance; MagickPixelPacket mean; register const PixelPacket *magick_restrict k; register ssize_t n; quadrant.x=x; quadrant.y=y; switch (i) { case 0: { quadrant.x=x-(ssize_t) (width-1); quadrant.y=y-(ssize_t) (width-1); break; } case 1: { quadrant.y=y-(ssize_t) (width-1); break; } case 2: { quadrant.x=x-(ssize_t) (width-1); break; } default: break; } p=GetCacheViewVirtualPixels(image_view,quadrant.x,quadrant.y, quadrant.width,quadrant.height,exception); if (p == (const PixelPacket *) NULL) break; GetMagickPixelPacket(image,&mean); k=p; for (n=0; n < (ssize_t) (width*width); n++) { mean.red+=(double) k->red; mean.green+=(double) k->green; mean.blue+=(double) k->blue; k++; } mean.red/=(double) (width*width); mean.green/=(double) (width*width); mean.blue/=(double) (width*width); k=p; variance=0.0; for (n=0; n < (ssize_t) (width*width); n++) { double luma; luma=GetPixelLuma(image,k); variance+=(luma-MagickPixelLuma(&mean))*(luma-MagickPixelLuma(&mean)); k++; } if (variance < min_variance) { min_variance=variance; target=quadrant; } } if (i < 4) { status=MagickFalse; break; } status=InterpolateMagickPixelPacket(gaussian_image,image_view, UndefinedInterpolatePixel,(double) target.x+target.width/2.0, (double) target.y+target.height/2.0,&pixel,exception); if (status == MagickFalse) break; SetPixelPacket(kuwahara_image,&pixel,q,kuwahara_indexes+x); q++; } if (SyncCacheViewAuthenticPixels(kuwahara_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,KuwaharaImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } kuwahara_view=DestroyCacheView(kuwahara_view); image_view=DestroyCacheView(image_view); gaussian_image=DestroyImage(gaussian_image); if (status == MagickFalse) kuwahara_image=DestroyImage(kuwahara_image); return(kuwahara_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L o c a l C o n t r a s t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LocalContrastImage() attempts to increase the appearance of large-scale % light-dark transitions. Local contrast enhancement works similarly to % sharpening with an unsharp mask, however the mask is instead created using % an image with a greater blur distance. % % The format of the LocalContrastImage method is: % % Image *LocalContrastImage(const Image *image, const double radius, % const double strength, ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian blur, in percentage with 100% % resulting in a blur radius of 20% of largest dimension. % % o strength: the strength of the blur mask in percentage. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *LocalContrastImage(const Image *image,const double radius, const double strength,ExceptionInfo *exception) { #define LocalContrastImageTag "LocalContrast/Image" CacheView *image_view, *contrast_view; float *interImage, *scanLinePixels, totalWeight; Image *contrast_image; MagickBooleanType status; MemoryInfo *scanLinePixels_info, *interImage_info; ssize_t scanLineSize, width; /* Initialize contrast image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) contrast_image=AccelerateLocalContrastImage(image,radius,strength,exception); if (contrast_image != (Image *) NULL) return(contrast_image); #endif contrast_image=CloneImage(image,0,0,MagickTrue,exception); if (contrast_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(contrast_image,DirectClass) == MagickFalse) { InheritException(exception,&contrast_image->exception); contrast_image=DestroyImage(contrast_image); return((Image *) NULL); } image_view=AcquireVirtualCacheView(image,exception); contrast_view=AcquireAuthenticCacheView(contrast_image,exception); scanLineSize=(ssize_t) MagickMax(image->columns,image->rows); width=(ssize_t) scanLineSize*0.002f*fabs(radius); scanLineSize+=(2*width); scanLinePixels_info=AcquireVirtualMemory(GetOpenMPMaximumThreads()* scanLineSize,sizeof(*scanLinePixels)); if (scanLinePixels_info == (MemoryInfo *) NULL) { contrast_view=DestroyCacheView(contrast_view); image_view=DestroyCacheView(image_view); contrast_image=DestroyImage(contrast_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } scanLinePixels=(float *) GetVirtualMemoryBlob(scanLinePixels_info); /* Create intermediate buffer. */ interImage_info=AcquireVirtualMemory(image->rows*(image->columns+(2*width)), sizeof(*interImage)); if (interImage_info == (MemoryInfo *) NULL) { scanLinePixels_info=RelinquishVirtualMemory(scanLinePixels_info); contrast_view=DestroyCacheView(contrast_view); image_view=DestroyCacheView(image_view); contrast_image=DestroyImage(contrast_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } interImage=(float *) GetVirtualMemoryBlob(interImage_info); totalWeight=(width+1)*(width+1); /* Vertical pass. */ status=MagickTrue; { ssize_t x; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) \ magick_number_threads(image,image,image->columns,1) #endif for (x=0; x < (ssize_t) image->columns; x++) { const int id = GetOpenMPThreadId(); const PixelPacket *magick_restrict p; float *out, *pix, *pixels; register ssize_t y; ssize_t i; if (status == MagickFalse) continue; pixels=scanLinePixels; pixels+=id*scanLineSize; pix=pixels; p=GetCacheViewVirtualPixels(image_view,x,-width,1,image->rows+(2*width), exception); if (p == (const PixelPacket *) NULL) { status=MagickFalse; continue; } for (y=0; y < (ssize_t) image->rows+(2*width); y++) { *pix++=(float)GetPixelLuma(image,p); p++; } out=interImage+x+width; for (y=0; y < (ssize_t) image->rows; y++) { float sum, weight; weight=1.0f; sum=0; pix=pixels+y; for (i=0; i < width; i++) { sum+=weight*(*pix++); weight+=1.0f; } for (i=width+1; i < (2*width); i++) { sum+=weight*(*pix++); weight-=1.0f; } /* write to output */ *out=sum/totalWeight; /* mirror into padding */ if (x <= width && x != 0) *(out-(x*2))=*out; if ((x > (ssize_t) image->columns-width-2) && (x != (ssize_t) image->columns-1)) *(out+((image->columns-x-1)*2))=*out; out+=image->columns+(width*2); } } } /* Horizontal pass. */ { ssize_t y; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); const PixelPacket *magick_restrict p; float *pix, *pixels; register PixelPacket *magick_restrict q; register ssize_t x; ssize_t i; if (status == MagickFalse) continue; pixels=scanLinePixels; pixels+=id*scanLineSize; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1, exception); q=GetCacheViewAuthenticPixels(contrast_view,0,y,image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } memcpy(pixels,interImage+(y*(image->columns+(2*width))),(image->columns+ (2*width))*sizeof(float)); for (x=0; x < (ssize_t) image->columns; x++) { float mult, srcVal, sum, weight; weight=1.0f; sum=0; pix=pixels+x; for (i=0; i < width; i++) { sum+=weight*(*pix++); weight+=1.0f; } for (i=width+1; i < (2*width); i++) { sum+=weight*(*pix++); weight-=1.0f; } /* Apply and write */ srcVal=(float) GetPixelLuma(image,p); mult=(srcVal-(sum/totalWeight))*(strength/100.0f); mult=(srcVal+mult)/srcVal; SetPixelRed(q,ClampToQuantum(GetPixelRed(p)*mult)); SetPixelGreen(q,ClampToQuantum(GetPixelGreen(p)*mult)); SetPixelBlue(q,ClampToQuantum(GetPixelBlue(p)*mult)); p++; q++; } if (SyncCacheViewAuthenticPixels(contrast_view,exception) == MagickFalse) status=MagickFalse; } } scanLinePixels_info=RelinquishVirtualMemory(scanLinePixels_info); interImage_info=RelinquishVirtualMemory(interImage_info); contrast_view=DestroyCacheView(contrast_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) contrast_image=DestroyImage(contrast_image); return(contrast_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P r e v i e w I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PreviewImage() tiles 9 thumbnails of the specified image with an image % processing operation applied with varying parameters. This may be helpful % pin-pointing an appropriate parameter for a particular image processing % operation. % % The format of the PreviewImages method is: % % Image *PreviewImages(const Image *image,const PreviewType preview, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o preview: the image processing operation. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *PreviewImage(const Image *image,const PreviewType preview, ExceptionInfo *exception) { #define NumberTiles 9 #define PreviewImageTag "Preview/Image" #define DefaultPreviewGeometry "204x204+10+10" char factor[MaxTextExtent], label[MaxTextExtent]; double degrees, gamma, percentage, radius, sigma, threshold; Image *images, *montage_image, *preview_image, *thumbnail; ImageInfo *preview_info; MagickBooleanType proceed; MontageInfo *montage_info; QuantizeInfo quantize_info; RectangleInfo geometry; register ssize_t i, x; size_t colors; ssize_t y; /* Open output image file. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); colors=2; degrees=0.0; gamma=(-0.2f); preview_info=AcquireImageInfo(); SetGeometry(image,&geometry); (void) ParseMetaGeometry(DefaultPreviewGeometry,&geometry.x,&geometry.y, &geometry.width,&geometry.height); images=NewImageList(); percentage=12.5; GetQuantizeInfo(&quantize_info); radius=0.0; sigma=1.0; threshold=0.0; x=0; y=0; for (i=0; i < NumberTiles; i++) { thumbnail=ThumbnailImage(image,geometry.width,geometry.height,exception); if (thumbnail == (Image *) NULL) break; (void) SetImageProgressMonitor(thumbnail,(MagickProgressMonitor) NULL, (void *) NULL); (void) SetImageProperty(thumbnail,"label",DefaultTileLabel); if (i == (NumberTiles/2)) { (void) QueryColorDatabase("#dfdfdf",&thumbnail->matte_color,exception); AppendImageToList(&images,thumbnail); continue; } switch (preview) { case RotatePreview: { degrees+=45.0; preview_image=RotateImage(thumbnail,degrees,exception); (void) FormatLocaleString(label,MaxTextExtent,"rotate %g",degrees); break; } case ShearPreview: { degrees+=5.0; preview_image=ShearImage(thumbnail,degrees,degrees,exception); (void) FormatLocaleString(label,MaxTextExtent,"shear %gx%g", degrees,2.0*degrees); break; } case RollPreview: { x=(ssize_t) ((i+1)*thumbnail->columns)/NumberTiles; y=(ssize_t) ((i+1)*thumbnail->rows)/NumberTiles; preview_image=RollImage(thumbnail,x,y,exception); (void) FormatLocaleString(label,MaxTextExtent,"roll %+.20gx%+.20g", (double) x,(double) y); break; } case HuePreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; (void) FormatLocaleString(factor,MaxTextExtent,"100,100,%g", 2.0*percentage); (void) ModulateImage(preview_image,factor); (void) FormatLocaleString(label,MaxTextExtent,"modulate %s",factor); break; } case SaturationPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; (void) FormatLocaleString(factor,MaxTextExtent,"100,%g",2.0*percentage); (void) ModulateImage(preview_image,factor); (void) FormatLocaleString(label,MaxTextExtent,"modulate %s",factor); break; } case BrightnessPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; (void) FormatLocaleString(factor,MaxTextExtent,"%g",2.0*percentage); (void) ModulateImage(preview_image,factor); (void) FormatLocaleString(label,MaxTextExtent,"modulate %s",factor); break; } case GammaPreview: default: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; gamma+=0.4f; (void) GammaImageChannel(preview_image,DefaultChannels,gamma); (void) FormatLocaleString(label,MaxTextExtent,"gamma %g",gamma); break; } case SpiffPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image != (Image *) NULL) for (x=0; x < i; x++) (void) ContrastImage(preview_image,MagickTrue); (void) FormatLocaleString(label,MaxTextExtent,"contrast (%.20g)", (double) i+1); break; } case DullPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; for (x=0; x < i; x++) (void) ContrastImage(preview_image,MagickFalse); (void) FormatLocaleString(label,MaxTextExtent,"+contrast (%.20g)", (double) i+1); break; } case GrayscalePreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; colors<<=1; quantize_info.number_colors=colors; quantize_info.colorspace=GRAYColorspace; (void) QuantizeImage(&quantize_info,preview_image); (void) FormatLocaleString(label,MaxTextExtent, "-colorspace gray -colors %.20g",(double) colors); break; } case QuantizePreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; colors<<=1; quantize_info.number_colors=colors; (void) QuantizeImage(&quantize_info,preview_image); (void) FormatLocaleString(label,MaxTextExtent,"colors %.20g",(double) colors); break; } case DespecklePreview: { for (x=0; x < (i-1); x++) { preview_image=DespeckleImage(thumbnail,exception); if (preview_image == (Image *) NULL) break; thumbnail=DestroyImage(thumbnail); thumbnail=preview_image; } preview_image=DespeckleImage(thumbnail,exception); if (preview_image == (Image *) NULL) break; (void) FormatLocaleString(label,MaxTextExtent,"despeckle (%.20g)", (double) i+1); break; } case ReduceNoisePreview: { preview_image=StatisticImage(thumbnail,NonpeakStatistic,(size_t) radius, (size_t) radius,exception); (void) FormatLocaleString(label,MaxTextExtent,"noise %g",radius); break; } case AddNoisePreview: { switch ((int) i) { case 0: { (void) CopyMagickString(factor,"uniform",MaxTextExtent); break; } case 1: { (void) CopyMagickString(factor,"gaussian",MaxTextExtent); break; } case 2: { (void) CopyMagickString(factor,"multiplicative",MaxTextExtent); break; } case 3: { (void) CopyMagickString(factor,"impulse",MaxTextExtent); break; } case 5: { (void) CopyMagickString(factor,"laplacian",MaxTextExtent); break; } case 6: { (void) CopyMagickString(factor,"poisson",MaxTextExtent); break; } default: { (void) CopyMagickString(thumbnail->magick,"NULL",MaxTextExtent); break; } } preview_image=StatisticImage(thumbnail,NonpeakStatistic,(size_t) i, (size_t) i,exception); (void) FormatLocaleString(label,MaxTextExtent,"+noise %s",factor); break; } case SharpenPreview: { preview_image=SharpenImage(thumbnail,radius,sigma,exception); (void) FormatLocaleString(label,MaxTextExtent,"sharpen %gx%g", radius,sigma); break; } case BlurPreview: { preview_image=BlurImage(thumbnail,radius,sigma,exception); (void) FormatLocaleString(label,MaxTextExtent,"blur %gx%g",radius, sigma); break; } case ThresholdPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; (void) BilevelImage(thumbnail, (double) (percentage*((MagickRealType) QuantumRange+1.0))/100.0); (void) FormatLocaleString(label,MaxTextExtent,"threshold %g", (double) (percentage*((MagickRealType) QuantumRange+1.0))/100.0); break; } case EdgeDetectPreview: { preview_image=EdgeImage(thumbnail,radius,exception); (void) FormatLocaleString(label,MaxTextExtent,"edge %g",radius); break; } case SpreadPreview: { preview_image=SpreadImage(thumbnail,radius,exception); (void) FormatLocaleString(label,MaxTextExtent,"spread %g", radius+0.5); break; } case SolarizePreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; (void) SolarizeImage(preview_image,(double) QuantumRange* percentage/100.0); (void) FormatLocaleString(label,MaxTextExtent,"solarize %g", (QuantumRange*percentage)/100.0); break; } case ShadePreview: { degrees+=10.0; preview_image=ShadeImage(thumbnail,MagickTrue,degrees,degrees, exception); (void) FormatLocaleString(label,MaxTextExtent,"shade %gx%g", degrees,degrees); break; } case RaisePreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; geometry.width=(size_t) (2*i+2); geometry.height=(size_t) (2*i+2); geometry.x=(i-1)/2; geometry.y=(i-1)/2; (void) RaiseImage(preview_image,&geometry,MagickTrue); (void) FormatLocaleString(label,MaxTextExtent, "raise %.20gx%.20g%+.20g%+.20g",(double) geometry.width,(double) geometry.height,(double) geometry.x,(double) geometry.y); break; } case SegmentPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; threshold+=0.4f; (void) SegmentImage(preview_image,sRGBColorspace,MagickFalse,threshold, threshold); (void) FormatLocaleString(label,MaxTextExtent,"segment %gx%g", threshold,threshold); break; } case SwirlPreview: { preview_image=SwirlImage(thumbnail,degrees,exception); (void) FormatLocaleString(label,MaxTextExtent,"swirl %g",degrees); degrees+=45.0; break; } case ImplodePreview: { degrees+=0.1f; preview_image=ImplodeImage(thumbnail,degrees,exception); (void) FormatLocaleString(label,MaxTextExtent,"implode %g",degrees); break; } case WavePreview: { degrees+=5.0f; preview_image=WaveImage(thumbnail,0.5*degrees,2.0*degrees,exception); (void) FormatLocaleString(label,MaxTextExtent,"wave %gx%g", 0.5*degrees,2.0*degrees); break; } case OilPaintPreview: { preview_image=OilPaintImage(thumbnail,(double) radius,exception); (void) FormatLocaleString(label,MaxTextExtent,"paint %g",radius); break; } case CharcoalDrawingPreview: { preview_image=CharcoalImage(thumbnail,(double) radius,(double) sigma, exception); (void) FormatLocaleString(label,MaxTextExtent,"charcoal %gx%g", radius,sigma); break; } case JPEGPreview: { char filename[MaxTextExtent]; int file; MagickBooleanType status; preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; preview_info->quality=(size_t) percentage; (void) FormatLocaleString(factor,MaxTextExtent,"%.20g",(double) preview_info->quality); file=AcquireUniqueFileResource(filename); if (file != -1) file=close(file)-1; (void) FormatLocaleString(preview_image->filename,MaxTextExtent, "jpeg:%s",filename); status=WriteImage(preview_info,preview_image); if (status != MagickFalse) { Image *quality_image; (void) CopyMagickString(preview_info->filename, preview_image->filename,MaxTextExtent); quality_image=ReadImage(preview_info,exception); if (quality_image != (Image *) NULL) { preview_image=DestroyImage(preview_image); preview_image=quality_image; } } (void) RelinquishUniqueFileResource(preview_image->filename); if ((GetBlobSize(preview_image)/1024) >= 1024) (void) FormatLocaleString(label,MaxTextExtent,"quality %s\n%gmb ", factor,(double) ((MagickOffsetType) GetBlobSize(preview_image))/ 1024.0/1024.0); else if (GetBlobSize(preview_image) >= 1024) (void) FormatLocaleString(label,MaxTextExtent, "quality %s\n%gkb ",factor,(double) ((MagickOffsetType) GetBlobSize(preview_image))/1024.0); else (void) FormatLocaleString(label,MaxTextExtent,"quality %s\n%.20gb ", factor,(double) ((MagickOffsetType) GetBlobSize(thumbnail))); break; } } thumbnail=DestroyImage(thumbnail); percentage+=12.5; radius+=0.5; sigma+=0.25; if (preview_image == (Image *) NULL) break; (void) DeleteImageProperty(preview_image,"label"); (void) SetImageProperty(preview_image,"label",label); AppendImageToList(&images,preview_image); proceed=SetImageProgress(image,PreviewImageTag,(MagickOffsetType) i, NumberTiles); if (proceed == MagickFalse) break; } if (images == (Image *) NULL) { preview_info=DestroyImageInfo(preview_info); return((Image *) NULL); } /* Create the montage. */ montage_info=CloneMontageInfo(preview_info,(MontageInfo *) NULL); (void) CopyMagickString(montage_info->filename,image->filename,MaxTextExtent); montage_info->shadow=MagickTrue; (void) CloneString(&montage_info->tile,"3x3"); (void) CloneString(&montage_info->geometry,DefaultPreviewGeometry); (void) CloneString(&montage_info->frame,DefaultTileFrame); montage_image=MontageImages(images,montage_info,exception); montage_info=DestroyMontageInfo(montage_info); images=DestroyImageList(images); if (montage_image == (Image *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); if (montage_image->montage != (char *) NULL) { /* Free image directory. */ montage_image->montage=(char *) RelinquishMagickMemory( montage_image->montage); if (image->directory != (char *) NULL) montage_image->directory=(char *) RelinquishMagickMemory( montage_image->directory); } preview_info=DestroyImageInfo(preview_info); return(montage_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R o t a t i o n a l B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RotationalBlurImage() applies a rotational blur to the image. % % Andrew Protano contributed this effect. % % The format of the RotationalBlurImage method is: % % Image *RotationalBlurImage(const Image *image,const double angle, % ExceptionInfo *exception) % Image *RotationalBlurImageChannel(const Image *image, % const ChannelType channel,const double angle,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel type. % % o angle: the angle of the rotational blur. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *RotationalBlurImage(const Image *image,const double angle, ExceptionInfo *exception) { Image *blur_image; blur_image=RotationalBlurImageChannel(image,DefaultChannels,angle,exception); return(blur_image); } MagickExport Image *RotationalBlurImageChannel(const Image *image, const ChannelType channel,const double angle,ExceptionInfo *exception) { CacheView *blur_view, *image_view; Image *blur_image; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket bias; MagickRealType blur_radius, *cos_theta, offset, *sin_theta, theta; PointInfo blur_center; register ssize_t i; size_t n; ssize_t y; /* Allocate blur image. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) blur_image=AccelerateRadialBlurImage(image,channel,angle,exception); if (blur_image != (Image *) NULL) return(blur_image); #endif blur_image=CloneImage(image,0,0,MagickTrue,exception); if (blur_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(blur_image,DirectClass) == MagickFalse) { InheritException(exception,&blur_image->exception); blur_image=DestroyImage(blur_image); return((Image *) NULL); } blur_center.x=(double) (image->columns-1)/2.0; blur_center.y=(double) (image->rows-1)/2.0; blur_radius=hypot(blur_center.x,blur_center.y); n=(size_t) fabs(4.0*DegreesToRadians(angle)*sqrt((double) blur_radius)+2UL); theta=DegreesToRadians(angle)/(MagickRealType) (n-1); cos_theta=(MagickRealType *) AcquireQuantumMemory((size_t) n, sizeof(*cos_theta)); sin_theta=(MagickRealType *) AcquireQuantumMemory((size_t) n, sizeof(*sin_theta)); if ((cos_theta == (MagickRealType *) NULL) || (sin_theta == (MagickRealType *) NULL)) { if (cos_theta != (double *) NULL) cos_theta=(double *) RelinquishMagickMemory(cos_theta); if (sin_theta != (double *) NULL) sin_theta=(double *) RelinquishMagickMemory(sin_theta); blur_image=DestroyImage(blur_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } offset=theta*(MagickRealType) (n-1)/2.0; for (i=0; i < (ssize_t) n; i++) { cos_theta[i]=cos((double) (theta*i-offset)); sin_theta[i]=sin((double) (theta*i-offset)); } /* Radial blur image. */ status=MagickTrue; progress=0; GetMagickPixelPacket(image,&bias); image_view=AcquireVirtualCacheView(image,exception); blur_view=AcquireAuthenticCacheView(blur_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,blur_image,blur_image->rows,1) #endif for (y=0; y < (ssize_t) blur_image->rows; y++) { register const IndexPacket *magick_restrict indexes; register IndexPacket *magick_restrict blur_indexes; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } blur_indexes=GetCacheViewAuthenticIndexQueue(blur_view); for (x=0; x < (ssize_t) blur_image->columns; x++) { MagickPixelPacket qixel; MagickRealType normalize, radius; PixelPacket pixel; PointInfo center; register ssize_t i; size_t step; center.x=(double) x-blur_center.x; center.y=(double) y-blur_center.y; radius=hypot((double) center.x,center.y); if (radius == 0) step=1; else { step=(size_t) (blur_radius/radius); if (step == 0) step=1; else if (step >= n) step=n-1; } normalize=0.0; qixel=bias; if (((channel & OpacityChannel) == 0) || (image->matte == MagickFalse)) { for (i=0; i < (ssize_t) n; i+=(ssize_t) step) { (void) GetOneCacheViewVirtualPixel(image_view,(ssize_t) (blur_center.x+center.x*cos_theta[i]-center.y*sin_theta[i]+0.5), (ssize_t) (blur_center.y+center.x*sin_theta[i]+center.y* cos_theta[i]+0.5),&pixel,exception); qixel.red+=pixel.red; qixel.green+=pixel.green; qixel.blue+=pixel.blue; qixel.opacity+=pixel.opacity; if (image->colorspace == CMYKColorspace) { indexes=GetCacheViewVirtualIndexQueue(image_view); qixel.index+=(*indexes); } normalize+=1.0; } normalize=PerceptibleReciprocal(normalize); if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(normalize*qixel.red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(normalize*qixel.green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(normalize*qixel.blue)); if ((channel & OpacityChannel) != 0) SetPixelOpacity(q,ClampToQuantum(normalize*qixel.opacity)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(blur_indexes+x,ClampToQuantum(normalize*qixel.index)); } else { double alpha, gamma; alpha=1.0; gamma=0.0; for (i=0; i < (ssize_t) n; i+=(ssize_t) step) { (void) GetOneCacheViewVirtualPixel(image_view,(ssize_t) (blur_center.x+center.x*cos_theta[i]-center.y*sin_theta[i]+0.5), (ssize_t) (blur_center.y+center.x*sin_theta[i]+center.y* cos_theta[i]+0.5),&pixel,exception); alpha=(MagickRealType) (QuantumScale*GetPixelAlpha(&pixel)); qixel.red+=alpha*pixel.red; qixel.green+=alpha*pixel.green; qixel.blue+=alpha*pixel.blue; qixel.opacity+=pixel.opacity; if (image->colorspace == CMYKColorspace) { indexes=GetCacheViewVirtualIndexQueue(image_view); qixel.index+=alpha*(*indexes); } gamma+=alpha; normalize+=1.0; } gamma=PerceptibleReciprocal(gamma); normalize=PerceptibleReciprocal(normalize); if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(gamma*qixel.red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(gamma*qixel.green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(gamma*qixel.blue)); if ((channel & OpacityChannel) != 0) SetPixelOpacity(q,ClampToQuantum(normalize*qixel.opacity)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(blur_indexes+x,ClampToQuantum(gamma*qixel.index)); } q++; } if (SyncCacheViewAuthenticPixels(blur_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,BlurImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } blur_view=DestroyCacheView(blur_view); image_view=DestroyCacheView(image_view); cos_theta=(MagickRealType *) RelinquishMagickMemory(cos_theta); sin_theta=(MagickRealType *) RelinquishMagickMemory(sin_theta); if (status == MagickFalse) blur_image=DestroyImage(blur_image); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e l e c t i v e B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SelectiveBlurImage() selectively blur pixels within a contrast threshold. % It is similar to the unsharpen mask that sharpens everything with contrast % above a certain threshold. % % The format of the SelectiveBlurImage method is: % % Image *SelectiveBlurImage(const Image *image,const double radius, % const double sigma,const double threshold,ExceptionInfo *exception) % Image *SelectiveBlurImageChannel(const Image *image, % const ChannelType channel,const double radius,const double sigma, % const double threshold,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel type. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o threshold: only pixels within this contrast threshold are included % in the blur operation. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SelectiveBlurImage(const Image *image,const double radius, const double sigma,const double threshold,ExceptionInfo *exception) { Image *blur_image; blur_image=SelectiveBlurImageChannel(image,DefaultChannels,radius,sigma, threshold,exception); return(blur_image); } MagickExport Image *SelectiveBlurImageChannel(const Image *image, const ChannelType channel,const double radius,const double sigma, const double threshold,ExceptionInfo *exception) { #define SelectiveBlurImageTag "SelectiveBlur/Image" CacheView *blur_view, *image_view, *luminance_view; double *kernel; Image *blur_image, *luminance_image; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket bias; register ssize_t i; size_t width; ssize_t center, j, u, v, y; /* Initialize blur image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); width=GetOptimalKernelWidth1D(radius,sigma); kernel=(double *) MagickAssumeAligned(AcquireAlignedMemory((size_t) width, width*sizeof(*kernel))); if (kernel == (double *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); j=(ssize_t) (width-1)/2; i=0; for (v=(-j); v <= j; v++) { for (u=(-j); u <= j; u++) kernel[i++]=(double) (exp(-((double) u*u+v*v)/(2.0*MagickSigma* MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma)); } if (image->debug != MagickFalse) { char format[MaxTextExtent], *message; register const double *k; ssize_t u, v; (void) LogMagickEvent(TransformEvent,GetMagickModule(), " SelectiveBlurImage with %.20gx%.20g kernel:",(double) width,(double) width); message=AcquireString(""); k=kernel; for (v=0; v < (ssize_t) width; v++) { *message='\0'; (void) FormatLocaleString(format,MaxTextExtent,"%.20g: ",(double) v); (void) ConcatenateString(&message,format); for (u=0; u < (ssize_t) width; u++) { (void) FormatLocaleString(format,MaxTextExtent,"%+f ",*k++); (void) ConcatenateString(&message,format); } (void) LogMagickEvent(TransformEvent,GetMagickModule(),"%s",message); } message=DestroyString(message); } blur_image=CloneImage(image,0,0,MagickTrue,exception); if (blur_image == (Image *) NULL) { kernel=(double *) RelinquishAlignedMemory(kernel); return((Image *) NULL); } if (SetImageStorageClass(blur_image,DirectClass) == MagickFalse) { kernel=(double *) RelinquishAlignedMemory(kernel); InheritException(exception,&blur_image->exception); blur_image=DestroyImage(blur_image); return((Image *) NULL); } luminance_image=CloneImage(image,0,0,MagickTrue,exception); if (luminance_image == (Image *) NULL) { kernel=(double *) RelinquishAlignedMemory(kernel); blur_image=DestroyImage(blur_image); return((Image *) NULL); } status=TransformImageColorspace(luminance_image,GRAYColorspace); if (status == MagickFalse) { InheritException(exception,&luminance_image->exception); kernel=(double *) RelinquishAlignedMemory(kernel); blur_image=DestroyImage(blur_image); luminance_image=DestroyImage(luminance_image); return((Image *) NULL); } /* Threshold blur image. */ status=MagickTrue; progress=0; center=(ssize_t) ((image->columns+width)*((width-1)/2L)+((width-1)/2L)); GetMagickPixelPacket(image,&bias); SetMagickPixelPacketBias(image,&bias); image_view=AcquireVirtualCacheView(image,exception); luminance_view=AcquireVirtualCacheView(luminance_image,exception); blur_view=AcquireAuthenticCacheView(blur_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,blur_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { double gamma; MagickBooleanType sync; register const IndexPacket *magick_restrict indexes; register const PixelPacket *magick_restrict l, *magick_restrict p; register IndexPacket *magick_restrict blur_indexes; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-((ssize_t) (width-1)/2L),y-(ssize_t) ((width-1)/2L),image->columns+width,width,exception); l=GetCacheViewVirtualPixels(luminance_view,-((ssize_t) (width-1)/2L),y- (ssize_t) ((width-1)/2L),luminance_image->columns+width,width,exception); q=GetCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (l == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); blur_indexes=GetCacheViewAuthenticIndexQueue(blur_view); for (x=0; x < (ssize_t) image->columns; x++) { double contrast; DoublePixelPacket pixel; MagickRealType intensity; register const double *magick_restrict k; register ssize_t u; ssize_t j, v; pixel.red=bias.red; pixel.green=bias.green; pixel.blue=bias.blue; pixel.opacity=bias.opacity; pixel.index=bias.index; k=kernel; intensity=GetPixelIntensity(image,p+center); gamma=0.0; j=0; if (((channel & OpacityChannel) == 0) || (image->matte == MagickFalse)) { for (v=0; v < (ssize_t) width; v++) { for (u=0; u < (ssize_t) width; u++) { contrast=GetPixelIntensity(luminance_image,l+u+j)-intensity; if (fabs(contrast) < threshold) { pixel.red+=(*k)*GetPixelRed(p+u+j); pixel.green+=(*k)*GetPixelGreen(p+u+j); pixel.blue+=(*k)*GetPixelBlue(p+u+j); gamma+=(*k); } k++; } j+=(ssize_t) (image->columns+width); } if (gamma != 0.0) { gamma=PerceptibleReciprocal(gamma); if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(gamma*pixel.red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(gamma*pixel.green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(gamma*pixel.blue)); } if ((channel & OpacityChannel) != 0) { gamma=0.0; j=0; for (v=0; v < (ssize_t) width; v++) { for (u=0; u < (ssize_t) width; u++) { contrast=GetPixelIntensity(luminance_image,l+u+j)-intensity; if (fabs(contrast) < threshold) { pixel.opacity+=(*k)*(p+u+j)->opacity; gamma+=(*k); } k++; } j+=(ssize_t) (image->columns+width); } gamma=PerceptibleReciprocal(gamma); SetPixelOpacity(q,ClampToQuantum(gamma*pixel.opacity)); } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) { gamma=0.0; j=0; for (v=0; v < (ssize_t) width; v++) { for (u=0; u < (ssize_t) width; u++) { contrast=GetPixelIntensity(luminance_image,l+u+j)-intensity; if (fabs(contrast) < threshold) { pixel.index+=(*k)*GetPixelIndex(indexes+x+u+j); gamma+=(*k); } k++; } j+=(ssize_t) (image->columns+width); } gamma=PerceptibleReciprocal(gamma); SetPixelIndex(blur_indexes+x,ClampToQuantum(gamma*pixel.index)); } } else { MagickRealType alpha; for (v=0; v < (ssize_t) width; v++) { for (u=0; u < (ssize_t) width; u++) { contrast=GetPixelIntensity(luminance_image,l+u+j)-intensity; if (fabs(contrast) < threshold) { alpha=(MagickRealType) (QuantumScale*GetPixelAlpha(p+u+j)); pixel.red+=(*k)*alpha*GetPixelRed(p+u+j); pixel.green+=(*k)*alpha*GetPixelGreen(p+u+j); pixel.blue+=(*k)*alpha*GetPixelBlue(p+u+j); pixel.opacity+=(*k)*GetPixelOpacity(p+u+j); gamma+=(*k)*alpha; } k++; } j+=(ssize_t) (image->columns+width); } if (gamma != 0.0) { gamma=PerceptibleReciprocal(gamma); if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(gamma*pixel.red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(gamma*pixel.green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(gamma*pixel.blue)); } if ((channel & OpacityChannel) != 0) { j=0; for (v=0; v < (ssize_t) width; v++) { for (u=0; u < (ssize_t) width; u++) { contrast=GetPixelIntensity(luminance_image,l+u+j)-intensity; if (fabs(contrast) < threshold) pixel.opacity+=(*k)*GetPixelOpacity(p+u+j); k++; } j+=(ssize_t) (image->columns+width); } SetPixelOpacity(q,ClampToQuantum(pixel.opacity)); } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) { gamma=0.0; j=0; for (v=0; v < (ssize_t) width; v++) { for (u=0; u < (ssize_t) width; u++) { contrast=GetPixelIntensity(luminance_image,l+u+j)-intensity; if (fabs(contrast) < threshold) { alpha=(MagickRealType) (QuantumScale* GetPixelAlpha(p+u+j)); pixel.index+=(*k)*alpha*GetPixelIndex(indexes+x+u+j); gamma+=(*k); } k++; } j+=(ssize_t) (image->columns+width); } gamma=PerceptibleReciprocal(gamma); SetPixelIndex(blur_indexes+x,ClampToQuantum(gamma*pixel.index)); } } p++; l++; q++; } sync=SyncCacheViewAuthenticPixels(blur_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,SelectiveBlurImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } blur_image->type=image->type; blur_view=DestroyCacheView(blur_view); luminance_view=DestroyCacheView(luminance_view); image_view=DestroyCacheView(image_view); luminance_image=DestroyImage(luminance_image); kernel=(double *) RelinquishAlignedMemory(kernel); if (status == MagickFalse) blur_image=DestroyImage(blur_image); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S h a d e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ShadeImage() shines a distant light on an image to create a % three-dimensional effect. You control the positioning of the light with % azimuth and elevation; azimuth is measured in degrees off the x axis % and elevation is measured in pixels above the Z axis. % % The format of the ShadeImage method is: % % Image *ShadeImage(const Image *image,const MagickBooleanType gray, % const double azimuth,const double elevation,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o gray: A value other than zero shades the intensity of each pixel. % % o azimuth, elevation: Define the light source direction. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ShadeImage(const Image *image,const MagickBooleanType gray, const double azimuth,const double elevation,ExceptionInfo *exception) { #define ShadeImageTag "Shade/Image" CacheView *image_view, *shade_view; Image *linear_image, *shade_image; MagickBooleanType status; MagickOffsetType progress; PrimaryInfo light; ssize_t y; /* Initialize shaded image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); linear_image=CloneImage(image,0,0,MagickTrue,exception); shade_image=CloneImage(image,0,0,MagickTrue,exception); if ((linear_image == (Image *) NULL) || (shade_image == (Image *) NULL)) { if (linear_image != (Image *) NULL) linear_image=DestroyImage(linear_image); if (shade_image != (Image *) NULL) shade_image=DestroyImage(shade_image); return((Image *) NULL); } if (SetImageStorageClass(shade_image,DirectClass) == MagickFalse) { InheritException(exception,&shade_image->exception); linear_image=DestroyImage(linear_image); shade_image=DestroyImage(shade_image); return((Image *) NULL); } /* Compute the light vector. */ light.x=(double) QuantumRange*cos(DegreesToRadians(azimuth))* cos(DegreesToRadians(elevation)); light.y=(double) QuantumRange*sin(DegreesToRadians(azimuth))* cos(DegreesToRadians(elevation)); light.z=(double) QuantumRange*sin(DegreesToRadians(elevation)); /* Shade image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(linear_image,exception); shade_view=AcquireAuthenticCacheView(shade_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(linear_image,shade_image,linear_image->rows,1) #endif for (y=0; y < (ssize_t) linear_image->rows; y++) { MagickRealType distance, normal_distance, shade; PrimaryInfo normal; register const PixelPacket *magick_restrict p, *magick_restrict s0, *magick_restrict s1, *magick_restrict s2; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-1,y-1,linear_image->columns+2,3, exception); q=QueueCacheViewAuthenticPixels(shade_view,0,y,shade_image->columns,1, exception); if ((p == (PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } /* Shade this row of pixels. */ normal.z=2.0*(double) QuantumRange; /* constant Z of surface normal */ for (x=0; x < (ssize_t) linear_image->columns; x++) { /* Determine the surface normal and compute shading. */ s0=p+1; s1=s0+image->columns+2; s2=s1+image->columns+2; normal.x=(double) (GetPixelIntensity(linear_image,s0-1)+ GetPixelIntensity(linear_image,s1-1)+ GetPixelIntensity(linear_image,s2-1)- GetPixelIntensity(linear_image,s0+1)- GetPixelIntensity(linear_image,s1+1)- GetPixelIntensity(linear_image,s2+1)); normal.y=(double) (GetPixelIntensity(linear_image,s2-1)+ GetPixelIntensity(linear_image,s2)+ GetPixelIntensity(linear_image,s2+1)- GetPixelIntensity(linear_image,s0-1)- GetPixelIntensity(linear_image,s0)- GetPixelIntensity(linear_image,s0+1)); if ((fabs(normal.x) <= MagickEpsilon) && (fabs(normal.y) <= MagickEpsilon)) shade=light.z; else { shade=0.0; distance=normal.x*light.x+normal.y*light.y+normal.z*light.z; if (distance > MagickEpsilon) { normal_distance=normal.x*normal.x+normal.y*normal.y+normal.z* normal.z; if (normal_distance > (MagickEpsilon*MagickEpsilon)) shade=distance/sqrt((double) normal_distance); } } if (gray != MagickFalse) { SetPixelRed(q,shade); SetPixelGreen(q,shade); SetPixelBlue(q,shade); } else { SetPixelRed(q,ClampToQuantum(QuantumScale*shade*GetPixelRed(s1))); SetPixelGreen(q,ClampToQuantum(QuantumScale*shade*GetPixelGreen(s1))); SetPixelBlue(q,ClampToQuantum(QuantumScale*shade*GetPixelBlue(s1))); } q->opacity=s1->opacity; p++; q++; } if (SyncCacheViewAuthenticPixels(shade_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,ShadeImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } shade_view=DestroyCacheView(shade_view); image_view=DestroyCacheView(image_view); linear_image=DestroyImage(linear_image); if (status == MagickFalse) shade_image=DestroyImage(shade_image); return(shade_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S h a r p e n I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SharpenImage() sharpens the image. We convolve the image with a Gaussian % operator of the given radius and standard deviation (sigma). For % reasonable results, radius should be larger than sigma. Use a radius of 0 % and SharpenImage() selects a suitable radius for you. % % Using a separable kernel would be faster, but the negative weights cancel % out on the corners of the kernel producing often undesirable ringing in the % filtered result; this can be avoided by using a 2D gaussian shaped image % sharpening kernel instead. % % The format of the SharpenImage method is: % % Image *SharpenImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % Image *SharpenImageChannel(const Image *image,const ChannelType channel, % const double radius,const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel type. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Laplacian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SharpenImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { Image *sharp_image; sharp_image=SharpenImageChannel(image,DefaultChannels,radius,sigma,exception); return(sharp_image); } MagickExport Image *SharpenImageChannel(const Image *image, const ChannelType channel,const double radius,const double sigma, ExceptionInfo *exception) { double gamma, normalize; Image *sharp_image; KernelInfo *kernel_info; register ssize_t i; size_t width; ssize_t j, u, v; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); width=GetOptimalKernelWidth2D(radius,sigma); kernel_info=AcquireKernelInfo((const char *) NULL); if (kernel_info == (KernelInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); (void) memset(kernel_info,0,sizeof(*kernel_info)); kernel_info->width=width; kernel_info->height=width; kernel_info->x=(ssize_t) (width-1)/2; kernel_info->y=(ssize_t) (width-1)/2; kernel_info->signature=MagickCoreSignature; kernel_info->values=(double *) MagickAssumeAligned(AcquireAlignedMemory( kernel_info->width,kernel_info->height*sizeof(*kernel_info->values))); if (kernel_info->values == (double *) NULL) { kernel_info=DestroyKernelInfo(kernel_info); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } normalize=0.0; j=(ssize_t) (kernel_info->width-1)/2; i=0; for (v=(-j); v <= j; v++) { for (u=(-j); u <= j; u++) { kernel_info->values[i]=(double) (-exp(-((double) u*u+v*v)/(2.0* MagickSigma*MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma)); normalize+=kernel_info->values[i]; i++; } } kernel_info->values[i/2]=(double) ((-2.0)*normalize); normalize=0.0; for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++) normalize+=kernel_info->values[i]; gamma=PerceptibleReciprocal(normalize); for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++) kernel_info->values[i]*=gamma; sharp_image=MorphologyImageChannel(image,channel,ConvolveMorphology,1, kernel_info,exception); kernel_info=DestroyKernelInfo(kernel_info); return(sharp_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S p r e a d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SpreadImage() is a special effects method that randomly displaces each % pixel in a block defined by the radius parameter. % % The format of the SpreadImage method is: % % Image *SpreadImage(const Image *image,const double radius, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: Choose a random pixel in a neighborhood of this extent. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SpreadImage(const Image *image,const double radius, ExceptionInfo *exception) { #define SpreadImageTag "Spread/Image" CacheView *image_view, *spread_view; Image *spread_image; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket bias; RandomInfo **magick_restrict random_info; size_t width; ssize_t y; #if defined(MAGICKCORE_OPENMP_SUPPORT) unsigned long key; #endif /* Initialize spread image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); spread_image=CloneImage(image,0,0,MagickTrue,exception); if (spread_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(spread_image,DirectClass) == MagickFalse) { InheritException(exception,&spread_image->exception); spread_image=DestroyImage(spread_image); return((Image *) NULL); } /* Spread image. */ status=MagickTrue; progress=0; GetMagickPixelPacket(spread_image,&bias); width=GetOptimalKernelWidth1D(radius,0.5); random_info=AcquireRandomInfoThreadSet(); image_view=AcquireVirtualCacheView(image,exception); spread_view=AcquireAuthenticCacheView(spread_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) key=GetRandomSecretKey(random_info[0]); #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,spread_image,spread_image->rows,key == ~0UL) #endif for (y=0; y < (ssize_t) spread_image->rows; y++) { const int id = GetOpenMPThreadId(); MagickPixelPacket pixel; register IndexPacket *magick_restrict indexes; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(spread_view,0,y,spread_image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(spread_view); pixel=bias; for (x=0; x < (ssize_t) spread_image->columns; x++) { PointInfo point; point.x=GetPseudoRandomValue(random_info[id]); point.y=GetPseudoRandomValue(random_info[id]); status=InterpolateMagickPixelPacket(image,image_view,image->interpolate, (double) x+width*(point.x-0.5),(double) y+width*(point.y-0.5),&pixel, exception); if (status == MagickFalse) break; SetPixelPacket(spread_image,&pixel,q,indexes+x); q++; } if (SyncCacheViewAuthenticPixels(spread_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,SpreadImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } spread_view=DestroyCacheView(spread_view); image_view=DestroyCacheView(image_view); random_info=DestroyRandomInfoThreadSet(random_info); if (status == MagickFalse) spread_image=DestroyImage(spread_image); return(spread_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U n s h a r p M a s k I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UnsharpMaskImage() sharpens one or more image channels. We convolve the % image with a Gaussian operator of the given radius and standard deviation % (sigma). For reasonable results, radius should be larger than sigma. Use a % radius of 0 and UnsharpMaskImage() selects a suitable radius for you. % % The format of the UnsharpMaskImage method is: % % Image *UnsharpMaskImage(const Image *image,const double radius, % const double sigma,const double amount,const double threshold, % ExceptionInfo *exception) % Image *UnsharpMaskImageChannel(const Image *image, % const ChannelType channel,const double radius,const double sigma, % const double gain,const double threshold,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel type. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o gain: the percentage of the difference between the original and the % blur image that is added back into the original. % % o threshold: the threshold in pixels needed to apply the diffence gain. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *UnsharpMaskImage(const Image *image,const double radius, const double sigma,const double gain,const double threshold, ExceptionInfo *exception) { Image *sharp_image; sharp_image=UnsharpMaskImageChannel(image,DefaultChannels,radius,sigma,gain, threshold,exception); return(sharp_image); } MagickExport Image *UnsharpMaskImageChannel(const Image *image, const ChannelType channel,const double radius,const double sigma, const double gain,const double threshold,ExceptionInfo *exception) { #define SharpenImageTag "Sharpen/Image" CacheView *image_view, *unsharp_view; Image *unsharp_image; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket bias; MagickRealType quantum_threshold; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); #if defined(MAGICKCORE_OPENCL_SUPPORT) unsharp_image=AccelerateUnsharpMaskImage(image,channel,radius,sigma,gain, threshold,exception); if (unsharp_image != (Image *) NULL) return(unsharp_image); #endif unsharp_image=BlurImageChannel(image,(ChannelType) (channel &~ SyncChannels), radius,sigma,exception); if (unsharp_image == (Image *) NULL) return((Image *) NULL); quantum_threshold=(MagickRealType) QuantumRange*threshold; /* Unsharp-mask image. */ status=MagickTrue; progress=0; GetMagickPixelPacket(image,&bias); image_view=AcquireVirtualCacheView(image,exception); unsharp_view=AcquireAuthenticCacheView(unsharp_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,unsharp_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { DoublePixelPacket pixel; register const IndexPacket *magick_restrict indexes; register const PixelPacket *magick_restrict p; register IndexPacket *magick_restrict unsharp_indexes; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=GetCacheViewAuthenticPixels(unsharp_view,0,y,unsharp_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); unsharp_indexes=GetCacheViewAuthenticIndexQueue(unsharp_view); pixel.red=bias.red; pixel.green=bias.green; pixel.blue=bias.blue; pixel.opacity=bias.opacity; pixel.index=bias.index; for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) { pixel.red=GetPixelRed(p)-(MagickRealType) GetPixelRed(q); if (fabs(2.0*pixel.red) < quantum_threshold) pixel.red=(MagickRealType) GetPixelRed(p); else pixel.red=(MagickRealType) GetPixelRed(p)+(pixel.red*gain); SetPixelRed(q,ClampToQuantum(pixel.red)); } if ((channel & GreenChannel) != 0) { pixel.green=GetPixelGreen(p)-(MagickRealType) q->green; if (fabs(2.0*pixel.green) < quantum_threshold) pixel.green=(MagickRealType) GetPixelGreen(p); else pixel.green=(MagickRealType) GetPixelGreen(p)+(pixel.green*gain); SetPixelGreen(q,ClampToQuantum(pixel.green)); } if ((channel & BlueChannel) != 0) { pixel.blue=GetPixelBlue(p)-(MagickRealType) q->blue; if (fabs(2.0*pixel.blue) < quantum_threshold) pixel.blue=(MagickRealType) GetPixelBlue(p); else pixel.blue=(MagickRealType) GetPixelBlue(p)+(pixel.blue*gain); SetPixelBlue(q,ClampToQuantum(pixel.blue)); } if ((channel & OpacityChannel) != 0) { pixel.opacity=GetPixelOpacity(p)-(MagickRealType) q->opacity; if (fabs(2.0*pixel.opacity) < quantum_threshold) pixel.opacity=(MagickRealType) GetPixelOpacity(p); else pixel.opacity=GetPixelOpacity(p)+(pixel.opacity*gain); SetPixelOpacity(q,ClampToQuantum(pixel.opacity)); } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) { pixel.index=GetPixelIndex(indexes+x)-(MagickRealType) GetPixelIndex(unsharp_indexes+x); if (fabs(2.0*pixel.index) < quantum_threshold) pixel.index=(MagickRealType) GetPixelIndex(indexes+x); else pixel.index=(MagickRealType) GetPixelIndex(indexes+x)+ (pixel.index*gain); SetPixelIndex(unsharp_indexes+x,ClampToQuantum(pixel.index)); } p++; q++; } if (SyncCacheViewAuthenticPixels(unsharp_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,SharpenImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } unsharp_image->type=image->type; unsharp_view=DestroyCacheView(unsharp_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) unsharp_image=DestroyImage(unsharp_image); return(unsharp_image); }
conv1x1s2_neon.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. #include "option.h" #include "mat.h" namespace ncnn { static void conv1x1s2_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int tailstep = w - 2*outw + w; const float* kernel = _kernel; const float* bias = _bias; int nn_outch = outch >> 2; int remain_outch_start = nn_outch << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int pp=0; pp<nn_outch; pp++) { int p = pp * 4; Mat out0 = top_blob.channel(p); Mat out1 = top_blob.channel(p+1); Mat out2 = top_blob.channel(p+2); Mat out3 = top_blob.channel(p+3); const float bias0 = bias ? bias[p] : 0.f; const float bias1 = bias ? bias[p+1] : 0.f; const float bias2 = bias ? bias[p+2] : 0.f; const float bias3 = bias ? bias[p+3] : 0.f; out0.fill(bias0); out1.fill(bias1); out2.fill(bias2); out3.fill(bias3); int q = 0; for (; q+3<inch; q+=4) { float* outptr0 = out0; float* outptr1 = out1; float* outptr2 = out2; float* outptr3 = out3; const float* img0 = bottom_blob.channel(q); const float* img1 = bottom_blob.channel(q+1); const float* img2 = bottom_blob.channel(q+2); const float* img3 = bottom_blob.channel(q+3); const float* kernel0 = kernel + p*inch + q; const float* kernel1 = kernel + (p+1)*inch + q; const float* kernel2 = kernel + (p+2)*inch + q; const float* kernel3 = kernel + (p+3)*inch + q; const float* r0 = img0; const float* r1 = img1; const float* r2 = img2; const float* r3 = img3; for (int i = 0; i < outh; i++) { int size = outw; #if __ARM_NEON int nn = size >> 3; int remain = size & 7; #else int remain = size; #endif // __ARM_NEON #if __ARM_NEON float32x4_t _k0 = vld1q_f32(kernel0); float32x4_t _k1 = vld1q_f32(kernel1); float32x4_t _k2 = vld1q_f32(kernel2); float32x4_t _k3 = vld1q_f32(kernel3); #if __aarch64__ if (nn > 0) { asm volatile( "0: \n" "prfm pldl1keep, [%5, #512] \n" "ld2 {v4.4s, v5.4s}, [%5], #32 \n" "ld2 {v6.4s, v7.4s}, [%5], #32 \n" "and v5.16b, v6.16b, v6.16b \n"// v4 v5 "prfm pldl1keep, [%1, #256] \n" "ld1 {v8.4s, v9.4s}, [%1] \n" "fmla v8.4s, v4.4s, %18.s[0] \n" "fmla v9.4s, v5.4s, %18.s[0] \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v10.4s, v11.4s}, [%2] \n" "fmla v10.4s, v4.4s, %19.s[0] \n" "fmla v11.4s, v5.4s, %19.s[0] \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v12.4s, v13.4s}, [%3] \n" "fmla v12.4s, v4.4s, %20.s[0] \n" "fmla v13.4s, v5.4s, %20.s[0] \n" "prfm pldl1keep, [%4, #256] \n" "ld1 {v14.4s, v15.4s}, [%4] \n" "prfm pldl1keep, [%6, #512] \n" "ld2 {v6.4s, v7.4s}, [%6], #32 \n" "fmla v14.4s, v4.4s, %21.s[0] \n" "fmla v15.4s, v5.4s, %21.s[0] \n" "ld2 {v4.4s, v5.4s}, [%6], #32 \n" "and v7.16b, v4.16b, v4.16b \n"// v6 v7 "fmla v8.4s, v6.4s, %18.s[1] \n" "fmla v9.4s, v7.4s, %18.s[1] \n" "fmla v10.4s, v6.4s, %19.s[1] \n" "fmla v11.4s, v7.4s, %19.s[1] \n" "fmla v12.4s, v6.4s, %20.s[1] \n" "fmla v13.4s, v7.4s, %20.s[1] \n" "prfm pldl1keep, [%7, #512] \n" "ld2 {v4.4s, v5.4s}, [%7], #32 \n" "fmla v14.4s, v6.4s, %21.s[1] \n" "fmla v15.4s, v7.4s, %21.s[1] \n" "ld2 {v6.4s, v7.4s}, [%7], #32 \n" "and v5.16b, v6.16b, v6.16b \n"// v4 v5 "fmla v8.4s, v4.4s, %18.s[2] \n" "fmla v9.4s, v5.4s, %18.s[2] \n" "fmla v10.4s, v4.4s, %19.s[2] \n" "fmla v11.4s, v5.4s, %19.s[2] \n" "fmla v12.4s, v4.4s, %20.s[2] \n" "fmla v13.4s, v5.4s, %20.s[2] \n" "prfm pldl1keep, [%8, #512] \n" "ld2 {v6.4s, v7.4s}, [%8], #32 \n" "fmla v14.4s, v4.4s, %21.s[2] \n" "fmla v15.4s, v5.4s, %21.s[2] \n" "ld2 {v4.4s, v5.4s}, [%8], #32 \n" "and v7.16b, v4.16b, v4.16b \n"// v6 v7 "fmla v8.4s, v6.4s, %18.s[3] \n" "fmla v9.4s, v7.4s, %18.s[3] \n" "fmla v10.4s, v6.4s, %19.s[3] \n" "fmla v11.4s, v7.4s, %19.s[3] \n" "st1 {v8.4s, v9.4s}, [%1], #32 \n" "fmla v12.4s, v6.4s, %20.s[3] \n" "fmla v13.4s, v7.4s, %20.s[3] \n" "st1 {v10.4s, v11.4s}, [%2], #32 \n" "fmla v14.4s, v6.4s, %21.s[3] \n" "fmla v15.4s, v7.4s, %21.s[3] \n" "st1 {v12.4s, v13.4s}, [%3], #32 \n" "subs %w0, %w0, #1 \n" "st1 {v14.4s, v15.4s}, [%4], #32 \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr0),// %1 "=r"(outptr1),// %2 "=r"(outptr2),// %3 "=r"(outptr3),// %4 "=r"(r0), // %5 "=r"(r1), // %6 "=r"(r2), // %7 "=r"(r3) // %8 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(outptr2), "4"(outptr3), "5"(r0), "6"(r1), "7"(r2), "8"(r3), "w"(_k0), // %18 "w"(_k1), // %19 "w"(_k2), // %20 "w"(_k3) // %21 : "cc", "memory", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15" ); } #else if (nn > 0) { asm volatile( "0: \n" "pld [%5, #512] \n" "vld2.f32 {d8-d11}, [%5]! \n" "vld2.f32 {d12-d15}, [%5]! \n" "vand q5, q6, q6 \n"// q4 q5 "pld [%1, #256] \n" "vld1.f32 {d16-d19}, [%1] \n" "vmla.f32 q8, q4, %e18[0] \n" "vmla.f32 q9, q5, %e18[0] \n" "pld [%2, #256] \n" "vld1.f32 {d20-d23}, [%2] \n" "vmla.f32 q10, q4, %e19[0] \n" "vmla.f32 q11, q5, %e19[0] \n" "pld [%3, #256] \n" "vld1.f32 {d24-d27}, [%3] \n" "vmla.f32 q12, q4, %e20[0] \n" "vmla.f32 q13, q5, %e20[0] \n" "pld [%4, #256] \n" "vld1.f32 {d28-d31}, [%4] \n" "pld [%6, #512] \n" "vld2.f32 {d12-d15}, [%6]! \n" "vmla.f32 q14, q4, %e21[0] \n" "vmla.f32 q15, q5, %e21[0] \n" "vld2.f32 {d8-d11}, [%6]! \n" "vand q7, q4, q4 \n"// q6 q7 "vmla.f32 q8, q6, %e18[1] \n" "vmla.f32 q9, q7, %e18[1] \n" "vmla.f32 q10, q6, %e19[1] \n" "vmla.f32 q11, q7, %e19[1] \n" "vmla.f32 q12, q6, %e20[1] \n" "vmla.f32 q13, q7, %e20[1] \n" "pld [%7, #512] \n" "vld2.f32 {d8-d11}, [%7]! \n" "vmla.f32 q14, q6, %e21[1] \n" "vmla.f32 q15, q7, %e21[1] \n" "vld2.f32 {d12-d15}, [%7]! \n" "vand q5, q6, q6 \n"// q4 q5 "vmla.f32 q8, q4, %f18[0] \n" "vmla.f32 q9, q5, %f18[0] \n" "vmla.f32 q10, q4, %f19[0] \n" "vmla.f32 q11, q5, %f19[0] \n" "vmla.f32 q12, q4, %f20[0] \n" "vmla.f32 q13, q5, %f20[0] \n" "pld [%8, #512] \n" "vld2.f32 {d12-d15}, [%8]! \n" "vmla.f32 q14, q4, %f21[0] \n" "vmla.f32 q15, q5, %f21[0] \n" "vld2.f32 {d8-d11}, [%8]! \n" "vand q7, q4, q4 \n"// q6 q7 "vmla.f32 q8, q6, %f18[1] \n" "vmla.f32 q9, q7, %f18[1] \n" "vmla.f32 q10, q6, %f19[1] \n" "vmla.f32 q11, q7, %f19[1] \n" "vst1.f32 {d16-d19}, [%1]! \n" "vmla.f32 q12, q6, %f20[1] \n" "vmla.f32 q13, q7, %f20[1] \n" "vst1.f32 {d20-d23}, [%2]! \n" "vmla.f32 q14, q6, %f21[1] \n" "vmla.f32 q15, q7, %f21[1] \n" "vst1.f32 {d24-d27}, [%3]! \n" "subs %0, #1 \n" "vst1.f32 {d28-d31}, [%4]! \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr0),// %1 "=r"(outptr1),// %2 "=r"(outptr2),// %3 "=r"(outptr3),// %4 "=r"(r0), // %5 "=r"(r1), // %6 "=r"(r2), // %7 "=r"(r3) // %8 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(outptr2), "4"(outptr3), "5"(r0), "6"(r1), "7"(r2), "8"(r3), "w"(_k0), // %18 "w"(_k1), // %19 "w"(_k2), // %20 "w"(_k3) // %21 : "cc", "memory", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { // TODO neon optimize float sum0 = *r0 * kernel0[0] + *r1 * kernel0[1] + *r2 * kernel0[2] + *r3 * kernel0[3]; float sum1 = *r0 * kernel1[0] + *r1 * kernel1[1] + *r2 * kernel1[2] + *r3 * kernel1[3]; float sum2 = *r0 * kernel2[0] + *r1 * kernel2[1] + *r2 * kernel2[2] + *r3 * kernel2[3]; float sum3 = *r0 * kernel3[0] + *r1 * kernel3[1] + *r2 * kernel3[2] + *r3 * kernel3[3]; *outptr0 += sum0; *outptr1 += sum1; *outptr2 += sum2; *outptr3 += sum3; r0 += 2; r1 += 2; r2 += 2; r3 += 2; outptr0++; outptr1++; outptr2++; outptr3++; } r0 += tailstep; r1 += tailstep; r2 += tailstep; r3 += tailstep; } } for (; q<inch; q++) { float* outptr0 = out0; float* outptr1 = out1; float* outptr2 = out2; float* outptr3 = out3; const float* img0 = bottom_blob.channel(q); const float* kernel0 = kernel + p*inch + q; const float* kernel1 = kernel + (p+1)*inch + q; const float* kernel2 = kernel + (p+2)*inch + q; const float* kernel3 = kernel + (p+3)*inch + q; const float k0 = kernel0[0]; const float k1 = kernel1[0]; const float k2 = kernel2[0]; const float k3 = kernel3[0]; const float* r0 = img0; for (int i = 0; i < outh; i++) { int size = outw; #if __ARM_NEON int nn = size >> 3; int remain = size & 7; #else int remain = size; #endif // __ARM_NEON #if __ARM_NEON float32x4_t _k0 = vdupq_n_f32(k0); float32x4_t _k1 = vdupq_n_f32(k1); float32x4_t _k2 = vdupq_n_f32(k2); float32x4_t _k3 = vdupq_n_f32(k3); #if __aarch64__ if (nn > 0) { asm volatile( "0: \n" "prfm pldl1keep, [%5, #512] \n" "ld2 {v4.4s, v5.4s}, [%5], #32 \n" "ld2 {v6.4s, v7.4s}, [%5], #32 \n" "and v5.16b, v6.16b, v6.16b \n" "prfm pldl1keep, [%1, #256] \n" "ld1 {v8.4s, v9.4s}, [%1] \n" "fmla v8.4s, v4.4s, %12.4s \n" "fmla v9.4s, v5.4s, %12.4s \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v10.4s, v11.4s}, [%2] \n" "fmla v10.4s, v4.4s, %13.4s \n" "fmla v11.4s, v5.4s, %13.4s \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v12.4s, v13.4s}, [%3] \n" "st1 {v8.4s, v9.4s}, [%1], #32 \n" "fmla v12.4s, v4.4s, %14.4s \n" "fmla v13.4s, v5.4s, %14.4s \n" "prfm pldl1keep, [%4, #256] \n" "ld1 {v14.4s, v15.4s}, [%4] \n" "st1 {v10.4s, v11.4s}, [%2], #32 \n" "fmla v14.4s, v4.4s, %15.4s \n" "fmla v15.4s, v5.4s, %15.4s \n" "st1 {v12.4s, v13.4s}, [%3], #32 \n" "subs %w0, %w0, #1 \n" "st1 {v14.4s, v15.4s}, [%4], #32 \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr0),// %1 "=r"(outptr1),// %2 "=r"(outptr2),// %3 "=r"(outptr3),// %4 "=r"(r0) // %5 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(outptr2), "4"(outptr3), "5"(r0), "w"(_k0), // %12 "w"(_k1), // %13 "w"(_k2), // %14 "w"(_k3) // %15 : "cc", "memory", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15" ); } #else if (nn > 0) { asm volatile( "0: \n" "pld [%5, #512] \n" "vld2.f32 {d8-d11}, [%5]! \n" "vld2.f32 {d12-d15}, [%5]! \n" "vand q5, q6, q6 \n"// q4 q5 "pld [%1, #256] \n" "vld1.f32 {d16-d19}, [%1] \n" "vmla.f32 q8, q4, %q12 \n" "vmla.f32 q9, q5, %q12 \n" "pld [%2, #256] \n" "vld1.f32 {d20-d23}, [%2] \n" "vmla.f32 q10, q4, %q13 \n" "vmla.f32 q11, q5, %q13 \n" "pld [%3, #256] \n" "vld1.f32 {d24-d27}, [%3] \n" "vst1.f32 {d16-d19}, [%1]! \n" "vmla.f32 q12, q4, %q14 \n" "vmla.f32 q13, q5, %q14 \n" "pld [%4, #256] \n" "vld1.f32 {d28-d31}, [%4] \n" "vst1.f32 {d20-d23}, [%2]! \n" "vmla.f32 q14, q4, %q15 \n" "vmla.f32 q15, q5, %q15 \n" "vst1.f32 {d24-d27}, [%3]! \n" "subs %0, #1 \n" "vst1.f32 {d28-d31}, [%4]! \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr0),// %1 "=r"(outptr1),// %2 "=r"(outptr2),// %3 "=r"(outptr3),// %4 "=r"(r0) // %5 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(outptr2), "4"(outptr3), "5"(r0), "w"(_k0), // %12 "w"(_k1), // %13 "w"(_k2), // %14 "w"(_k3) // %15 : "cc", "memory", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { // TODO neon optimize float sum0 = *r0 * k0; float sum1 = *r0 * k1; float sum2 = *r0 * k2; float sum3 = *r0 * k3; *outptr0 += sum0; *outptr1 += sum1; *outptr2 += sum2; *outptr3 += sum3; r0 += 2; outptr0++; outptr1++; outptr2++; outptr3++; } r0 += tailstep; } } } #pragma omp parallel for num_threads(opt.num_threads) for (int p=remain_outch_start; p<outch; p++) { Mat out = top_blob.channel(p); const float bias0 = bias ? bias[p] : 0.f; out.fill(bias0); int q = 0; for (; q+3<inch; q+=4) { float* outptr = out; const float* img0 = bottom_blob.channel(q); const float* img1 = bottom_blob.channel(q+1); const float* img2 = bottom_blob.channel(q+2); const float* img3 = bottom_blob.channel(q+3); const float* kernel0 = kernel + p*inch + q; const float k0 = kernel0[0]; const float k1 = kernel0[1]; const float k2 = kernel0[2]; const float k3 = kernel0[3]; const float* r0 = img0; const float* r1 = img1; const float* r2 = img2; const float* r3 = img3; for (int i = 0; i < outh; i++) { #if __ARM_NEON int nn = outw >> 3; int remain = outw & 7; #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON float32x4_t _k0 = vdupq_n_f32(k0); float32x4_t _k1 = vdupq_n_f32(k1); float32x4_t _k2 = vdupq_n_f32(k2); float32x4_t _k3 = vdupq_n_f32(k3); #if __aarch64__ if (nn > 0) { asm volatile( "prfm pldl1keep, [%2, #512] \n" "ld2 {v2.4s, v3.4s}, [%2], #32 \n" "ld2 {v8.4s, v9.4s}, [%2], #32 \n" "0: \n" "prfm pldl1keep, [%1, #256] \n" "ld1 {v0.4s, v1.4s}, [%1] \n" "fmla v0.4s, v2.4s, %12.4s \n" "fmla v1.4s, v8.4s, %12.4s \n" "prfm pldl1keep, [%3, #512] \n" "ld2 {v2.4s, v3.4s}, [%3], #32 \n" "ld2 {v8.4s, v9.4s}, [%3], #32 \n" "fmla v0.4s, v2.4s, %13.4s \n" "fmla v1.4s, v8.4s, %13.4s \n" "prfm pldl1keep, [%4, #512] \n" "ld2 {v2.4s, v3.4s}, [%4], #32 \n" "ld2 {v8.4s, v9.4s}, [%4], #32 \n" "fmla v0.4s, v2.4s, %14.4s \n" "fmla v1.4s, v8.4s, %14.4s \n" "prfm pldl1keep, [%5, #512] \n" "ld2 {v2.4s, v3.4s}, [%5], #32 \n" "ld2 {v8.4s, v9.4s}, [%5], #32 \n" "fmla v0.4s, v2.4s, %15.4s \n" "fmla v1.4s, v8.4s, %15.4s \n" "prfm pldl1keep, [%2, #512] \n" "ld2 {v2.4s, v3.4s}, [%2], #32 \n" "ld2 {v8.4s, v9.4s}, [%2], #32 \n" "subs %w0, %w0, #1 \n" "st1 {v0.4s, v1.4s}, [%1], #32 \n" "bne 0b \n" "sub %2, %2, #64 \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(r3) // %5 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "5"(r3), "w"(_k0), // %12 "w"(_k1), // %13 "w"(_k2), // %14 "w"(_k3) // %15 : "cc", "memory", "v0", "v1", "v2", "v3", "v8", "v9" ); } #else if (nn > 0) { asm volatile( "pld [%2, #512] \n" "vld2.f32 {d4-d7}, [%2]! \n" "vld2.f32 {d16-d19}, [%2]! \n" "0: \n" "pld [%1, #256] \n" "vld1.f32 {d0-d3}, [%1] \n" "vmla.f32 q0, q2, %q12 \n" "vmla.f32 q1, q8, %q12 \n" "pld [%3, #512] \n" "vld2.f32 {d4-d7}, [%3]! \n" "vld2.f32 {d16-d19}, [%3]! \n" "vmla.f32 q0, q2, %q13 \n" "vmla.f32 q1, q8, %q13 \n" "pld [%4, #512] \n" "vld2.f32 {d4-d7}, [%4]! \n" "vld2.f32 {d16-d19}, [%4]! \n" "vmla.f32 q0, q2, %q14 \n" "vmla.f32 q1, q8, %q14 \n" "pld [%5, #512] \n" "vld2.f32 {d4-d7}, [%5]! \n" "vld2.f32 {d16-d19}, [%5]! \n" "vmla.f32 q0, q2, %q15 \n" "vmla.f32 q1, q8, %q15 \n" "pld [%2, #512] \n" "vld2.f32 {d4-d7}, [%2]! \n" "vld2.f32 {d16-d19}, [%2]! \n" "subs %0, #1 \n" "vst1.f32 {d0-d3}, [%1]! \n" "bne 0b \n" "sub %2, #64 \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(r3) // %5 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "5"(r3), "w"(_k0), // %12 "w"(_k1), // %13 "w"(_k2), // %14 "w"(_k3) // %15 : "cc", "memory", "q0", "q1", "q2", "q3", "q8", "q9" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { float sum = *r0 * k0; float sum1 = *r1 * k1; float sum2 = *r2 * k2; float sum3 = *r3 * k3; *outptr += sum + sum1 + sum2 + sum3; r0 += 2; r1 += 2; r2 += 2; r3 += 2; outptr++; } r0 += tailstep; r1 += tailstep; r2 += tailstep; r3 += tailstep; } } for (; q<inch; q++) { float* outptr = out; const float* img0 = bottom_blob.channel(q); const float* kernel0 = kernel + p*inch + q; const float k0 = kernel0[0]; const float* r0 = img0; for (int i = 0; i < outh; i++) { #if __ARM_NEON int nn = outw >> 3; int remain = outw & 7; #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON float32x4_t _k0 = vdupq_n_f32(k0); #if __aarch64__ if (nn > 0) { asm volatile( "prfm pldl1keep, [%2, #512] \n" "ld2 {v2.4s, v3.4s}, [%2], #32 \n" "ld2 {v8.4s, v9.4s}, [%2], #32 \n" "0: \n" "prfm pldl1keep, [%1, #256] \n" "ld1 {v0.4s, v1.4s}, [%1] \n" "fmla v0.4s, v2.4s, %6.4s \n" "fmla v1.4s, v8.4s, %6.4s \n" "prfm pldl1keep, [%2, #512] \n" "ld2 {v2.4s, v3.4s}, [%2], #32 \n" "ld2 {v8.4s, v9.4s}, [%2], #32 \n" "subs %w0, %w0, #1 \n" "st1 {v0.4s, v1.4s}, [%1], #32 \n" "bne 0b \n" "sub %2, %2, #64 \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0) // %2 : "0"(nn), "1"(outptr), "2"(r0), "w"(_k0) // %6 : "cc", "memory", "v0", "v1", "v2", "v3", "v8", "v9" ); } #else if (nn > 0) { asm volatile( "pld [%2, #512] \n" "vld2.f32 {d4-d7}, [%2]! \n" "vld2.f32 {d16-d19}, [%2]! \n" "0: \n" "pld [%1, #256] \n" "vld1.f32 {d0-d3}, [%1] \n" "vmla.f32 q0, q2, %q6 \n" "vmla.f32 q1, q8, %q6 \n" "pld [%2, #512] \n" "vld2.f32 {d4-d7}, [%2]! \n" "vld2.f32 {d16-d19}, [%2]! \n" "subs %0, #1 \n" "vst1.f32 {d0-d3}, [%1]! \n" "bne 0b \n" "sub %2, #64 \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0) // %2 : "0"(nn), "1"(outptr), "2"(r0), "w"(_k0) // %6 : "cc", "memory", "q0", "q1", "q2", "q3", "q8", "q9" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { float sum = *r0 * k0; *outptr += sum; r0 += 2; outptr++; } r0 += tailstep; } } } } }
JeeIOrbitalSoA.h
////////////////////////////////////////////////////////////////////////////////////// // This file is distributed under the University of Illinois/NCSA Open Source License. // See LICENSE file in top directory for details. // // Copyright (c) 2016 Jeongnim Kim and QMCPACK developers. // // File developed by: Ye Luo, yeluo@anl.gov, Argonne National Laboratory // // File created by: Ye Luo, yeluo@anl.gov, Argonne National Laboratory ////////////////////////////////////////////////////////////////////////////////////// #ifndef QMCPLUSPLUS_EEIJASTROW_OPTIMIZED_SOA_H #define QMCPLUSPLUS_EEIJASTROW_OPTIMIZED_SOA_H #include "Configuration.h" #if !defined(QMC_BUILD_SANDBOX_ONLY) #include "QMCWaveFunctions/WaveFunctionComponent.h" #endif #include "Particle/DistanceTableData.h" #include <simd/allocator.hpp> #include <simd/algorithm.hpp> #include <map> #include <numeric> namespace qmcplusplus { /** @ingroup WaveFunctionComponent * @brief Specialization for three-body Jastrow function using multiple functors * *Each pair-type can have distinct function \f$u(r_{ij})\f$. *For electrons, distinct pair correlation functions are used *for spins up-up/down-down and up-down/down-up. */ template<class FT> class JeeIOrbitalSoA : public WaveFunctionComponent { ///type of each component U, dU, d2U; using valT = typename FT::real_type; ///element position type using posT = TinyVector<valT, OHMMS_DIM>; ///use the same container using RowContainer = DistanceTableData::RowContainer; ///table index for el-el const int ee_Table_ID_; ///table index for i-el const int ei_Table_ID_; //nuber of particles int Nelec, Nion; ///number of particles + padded size_t Nelec_padded; //number of groups of the target particleset int eGroups, iGroups; ///reference to the sources (ions) const ParticleSet& Ions; ///diff value RealType DiffVal; ///\f$Uat[i] = sum_(j) u_{i,j}\f$ Vector<valT> Uat, oldUk, newUk; ///\f$dUat[i] = sum_(j) du_{i,j}\f$ using gContainer_type = VectorSoaContainer<valT, OHMMS_DIM>; gContainer_type dUat, olddUk, newdUk; ///\f$d2Uat[i] = sum_(j) d2u_{i,j}\f$ Vector<valT> d2Uat, oldd2Uk, newd2Uk; /// current values during PbyP valT cur_Uat, cur_d2Uat; posT cur_dUat, dUat_temp; ///container for the Jastrow functions Array<FT*, 3> F; std::map<std::string, FT*> J3Unique; //YYYY std::map<FT*, int> J3UniqueIndex; /// the cutoff for e-I pairs std::vector<valT> Ion_cutoff; /// the electrons around ions within the cutoff radius, grouped by species Array<std::vector<int>, 2> elecs_inside; Array<std::vector<valT>, 2> elecs_inside_dist; Array<std::vector<posT>, 2> elecs_inside_displ; /// the ids of ions within the cutoff radius of an electron on which a move is proposed std::vector<int> ions_nearby_old, ions_nearby_new; /// work buffer size size_t Nbuffer; /// compressed distances aligned_vector<valT> Distjk_Compressed, DistkI_Compressed, DistjI_Compressed; std::vector<int> DistIndice_k; /// compressed displacements gContainer_type Disp_jk_Compressed, Disp_jI_Compressed, Disp_kI_Compressed; /// work result buffer VectorSoaContainer<valT, 9> mVGL; // Used for evaluating derivatives with respect to the parameters int NumVars; Array<std::pair<int, int>, 3> VarOffset; Vector<RealType> dLogPsi; Array<PosType, 2> gradLogPsi; Array<RealType, 2> lapLogPsi; // Temporary store for parameter derivatives of functor // The first index is the functor index in J3Unique. The second is the parameter index w.r.t. to that // functor std::vector<std::vector<RealType>> du_dalpha; std::vector<std::vector<PosType>> dgrad_dalpha; std::vector<std::vector<Tensor<RealType, 3>>> dhess_dalpha; public: ///alias FuncType using FuncType = FT; JeeIOrbitalSoA(const ParticleSet& ions, ParticleSet& elecs, bool is_master = false) : ee_Table_ID_(elecs.addTable(elecs, DT_SOA)), ei_Table_ID_(elecs.addTable(ions, DT_SOA, true)), Ions(ions), NumVars(0) { ClassName = "JeeIOrbitalSoA"; init(elecs); } ~JeeIOrbitalSoA() {} WaveFunctionComponentPtr makeClone(ParticleSet& elecs) const { JeeIOrbitalSoA<FT>* eeIcopy = new JeeIOrbitalSoA<FT>(Ions, elecs, false); std::map<const FT*, FT*> fcmap; for (int iG = 0; iG < iGroups; iG++) for (int eG1 = 0; eG1 < eGroups; eG1++) for (int eG2 = 0; eG2 < eGroups; eG2++) { if (F(iG, eG1, eG2) == 0) continue; typename std::map<const FT*, FT*>::iterator fit = fcmap.find(F(iG, eG1, eG2)); if (fit == fcmap.end()) { FT* fc = new FT(*F(iG, eG1, eG2)); eeIcopy->addFunc(iG, eG1, eG2, fc); fcmap[F(iG, eG1, eG2)] = fc; } } // Ye: I don't like the following memory allocated by default. eeIcopy->myVars.clear(); eeIcopy->myVars.insertFrom(myVars); eeIcopy->NumVars = NumVars; eeIcopy->dLogPsi.resize(NumVars); eeIcopy->gradLogPsi.resize(NumVars, Nelec); eeIcopy->lapLogPsi.resize(NumVars, Nelec); eeIcopy->VarOffset = VarOffset; eeIcopy->Optimizable = Optimizable; return eeIcopy; } void init(ParticleSet& p) { Nelec = p.getTotalNum(); Nelec_padded = getAlignedSize<valT>(Nelec); Nion = Ions.getTotalNum(); iGroups = Ions.getSpeciesSet().getTotalNum(); eGroups = p.groups(); Uat.resize(Nelec); dUat.resize(Nelec); d2Uat.resize(Nelec); oldUk.resize(Nelec); olddUk.resize(Nelec); oldd2Uk.resize(Nelec); newUk.resize(Nelec); newdUk.resize(Nelec); newd2Uk.resize(Nelec); F.resize(iGroups, eGroups, eGroups); F = nullptr; elecs_inside.resize(eGroups, Nion); elecs_inside_dist.resize(eGroups, Nion); elecs_inside_displ.resize(eGroups, Nion); ions_nearby_old.resize(Nion); ions_nearby_new.resize(Nion); Ion_cutoff.resize(Nion, 0.0); //initialize buffers Nbuffer = Nelec; mVGL.resize(Nbuffer); Distjk_Compressed.resize(Nbuffer); DistjI_Compressed.resize(Nbuffer); DistkI_Compressed.resize(Nbuffer); Disp_jk_Compressed.resize(Nbuffer); Disp_jI_Compressed.resize(Nbuffer); Disp_kI_Compressed.resize(Nbuffer); DistIndice_k.resize(Nbuffer); } void initUnique() { typename std::map<std::string, FT*>::iterator it(J3Unique.begin()), it_end(J3Unique.end()); du_dalpha.resize(J3Unique.size()); dgrad_dalpha.resize(J3Unique.size()); dhess_dalpha.resize(J3Unique.size()); int ifunc = 0; while (it != it_end) { J3UniqueIndex[it->second] = ifunc; FT& functor = *(it->second); int numParams = functor.getNumParameters(); du_dalpha[ifunc].resize(numParams); dgrad_dalpha[ifunc].resize(numParams); dhess_dalpha[ifunc].resize(numParams); ++it; ifunc++; } } void addFunc(int iSpecies, int eSpecies1, int eSpecies2, FT* j) { if (eSpecies1 == eSpecies2) { //if only up-up is specified, assume spin-unpolarized correlations if (eSpecies1 == 0) for (int eG1 = 0; eG1 < eGroups; eG1++) for (int eG2 = 0; eG2 < eGroups; eG2++) { if (F(iSpecies, eG1, eG2) == 0) F(iSpecies, eG1, eG2) = j; } } else { F(iSpecies, eSpecies1, eSpecies2) = j; F(iSpecies, eSpecies2, eSpecies1) = j; } if (j) { RealType rcut = 0.5 * j->cutoff_radius; for (int i = 0; i < Nion; i++) if (Ions.GroupID[i] == iSpecies) Ion_cutoff[i] = rcut; } else { APP_ABORT("JeeIOrbitalSoA::addFunc Jastrow function pointer is NULL"); } std::stringstream aname; aname << iSpecies << "_" << eSpecies1 << "_" << eSpecies2; J3Unique[aname.str()] = j; initUnique(); } /** check that correlation information is complete */ void check_complete() { //check that correlation pointers are either all 0 or all assigned bool complete = true; for (int i = 0; i < iGroups; ++i) { int nfilled = 0; bool partial; for (int e1 = 0; e1 < eGroups; ++e1) for (int e2 = 0; e2 < eGroups; ++e2) if (F(i, e1, e2) != 0) nfilled++; partial = nfilled > 0 && nfilled < eGroups * eGroups; if (partial) app_log() << "J3 eeI is missing correlation for ion " << i << std::endl; complete = complete && !partial; } if (!complete) { APP_ABORT("JeeIOrbitalSoA::check_complete J3 eeI is missing correlation components\n see preceding messages " "for details"); } //first set radii for (int i = 0; i < Nion; ++i) { FT* f = F(Ions.GroupID[i], 0, 0); if (f != 0) Ion_cutoff[i] = .5 * f->cutoff_radius; } //then check radii bool all_radii_match = true; for (int i = 0; i < iGroups; ++i) { if (F(i, 0, 0) != 0) { bool radii_match = true; RealType rcut = F(i, 0, 0)->cutoff_radius; for (int e1 = 0; e1 < eGroups; ++e1) for (int e2 = 0; e2 < eGroups; ++e2) radii_match = radii_match && F(i, e1, e2)->cutoff_radius == rcut; if (!radii_match) app_log() << "eeI functors for ion species " << i << " have different radii" << std::endl; all_radii_match = all_radii_match && radii_match; } } if (!all_radii_match) { APP_ABORT("JeeIOrbitalSoA::check_radii J3 eeI are inconsistent for some ion species\n see preceding messages " "for details"); } } //evaluate the distance table with els void resetTargetParticleSet(ParticleSet& P) {} /** check in an optimizable parameter * @param o a super set of optimizable variables */ void checkInVariables(opt_variables_type& active) { myVars.clear(); typename std::map<std::string, FT*>::iterator it(J3Unique.begin()), it_end(J3Unique.end()); while (it != it_end) { (*it).second->checkInVariables(active); (*it).second->checkInVariables(myVars); ++it; } } /** check out optimizable variables */ void checkOutVariables(const opt_variables_type& active) { myVars.clear(); typename std::map<std::string, FT*>::iterator it(J3Unique.begin()), it_end(J3Unique.end()); while (it != it_end) { (*it).second->myVars.getIndex(active); myVars.insertFrom((*it).second->myVars); ++it; } myVars.getIndex(active); NumVars = myVars.size(); if (NumVars) { dLogPsi.resize(NumVars); gradLogPsi.resize(NumVars, Nelec); lapLogPsi.resize(NumVars, Nelec); VarOffset.resize(iGroups, eGroups, eGroups); int varoffset = myVars.Index[0]; for (int ig = 0; ig < iGroups; ig++) for (int jg = 0; jg < eGroups; jg++) for (int kg = 0; kg < eGroups; kg++) { FT* func_ijk = F(ig, jg, kg); if (func_ijk == nullptr) continue; VarOffset(ig, jg, kg).first = func_ijk->myVars.Index.front() - varoffset; VarOffset(ig, jg, kg).second = func_ijk->myVars.Index.size() + VarOffset(ig, jg, kg).first; } } } ///reset the value of all the unique Two-Body Jastrow functions void resetParameters(const opt_variables_type& active) { if (!Optimizable) return; typename std::map<std::string, FT*>::iterator it(J3Unique.begin()), it_end(J3Unique.end()); while (it != it_end) { (*it++).second->resetParameters(active); } for (int i = 0; i < myVars.size(); ++i) { int ii = myVars.Index[i]; if (ii >= 0) myVars[i] = active[ii]; } } /** print the state, e.g., optimizables */ void reportStatus(std::ostream& os) { typename std::map<std::string, FT*>::iterator it(J3Unique.begin()), it_end(J3Unique.end()); while (it != it_end) { (*it).second->myVars.print(os); ++it; } } void build_compact_list(ParticleSet& P) { const DistanceTableData& eI_table = P.getDistTable(ei_Table_ID_); for (int iat = 0; iat < Nion; ++iat) for (int jg = 0; jg < eGroups; ++jg) { elecs_inside(jg, iat).clear(); elecs_inside_dist(jg, iat).clear(); elecs_inside_displ(jg, iat).clear(); } for (int jg = 0; jg < eGroups; ++jg) for (int jel = P.first(jg); jel < P.last(jg); jel++) for (int iat = 0; iat < Nion; ++iat) if (eI_table.Distances[jel][iat] < Ion_cutoff[iat]) { elecs_inside(jg, iat).push_back(jel); elecs_inside_dist(jg, iat).push_back(eI_table.Distances[jel][iat]); elecs_inside_displ(jg, iat).push_back(eI_table.Displacements[jel][iat]); } } LogValueType evaluateLog(ParticleSet& P, ParticleSet::ParticleGradient_t& G, ParticleSet::ParticleLaplacian_t& L) { evaluateGL(P, G, L, true); return LogValue; } ValueType ratio(ParticleSet& P, int iat) { UpdateMode = ORB_PBYP_RATIO; const DistanceTableData& eI_table = P.getDistTable(ei_Table_ID_); const DistanceTableData& ee_table = P.getDistTable(ee_Table_ID_); cur_Uat = computeU(P, iat, P.GroupID[iat], eI_table.Temp_r.data(), ee_table.Temp_r.data(), ions_nearby_new); DiffVal = Uat[iat] - cur_Uat; return std::exp(DiffVal); } void evaluateRatios(VirtualParticleSet& VP, std::vector<ValueType>& ratios) { for (int k = 0; k < ratios.size(); ++k) ratios[k] = std::exp(Uat[VP.refPtcl] - computeU(VP.refPS, VP.refPtcl, VP.refPS.GroupID[VP.refPtcl], VP.getDistTable(ei_Table_ID_).Distances[k], VP.getDistTable(ee_Table_ID_).Distances[k], ions_nearby_old)); } void evaluateRatiosAlltoOne(ParticleSet& P, std::vector<ValueType>& ratios) { const DistanceTableData& eI_table = P.getDistTable(ei_Table_ID_); const DistanceTableData& ee_table = P.getDistTable(ee_Table_ID_); for (int jg = 0; jg < eGroups; ++jg) { const valT sumU = computeU(P, -1, jg, eI_table.Temp_r.data(), ee_table.Temp_r.data(), ions_nearby_new); for (int j = P.first(jg); j < P.last(jg); ++j) { // remove self-interaction valT Uself(0); for (int iat = 0; iat < Nion; ++iat) { const valT& r_Ij = eI_table.Temp_r[iat]; const valT& r_Ik = eI_table.Distances[j][iat]; if (r_Ij < Ion_cutoff[iat] && r_Ik < Ion_cutoff[iat]) { const int ig = Ions.GroupID[iat]; Uself += F(ig, jg, jg)->evaluate(ee_table.Temp_r[j], r_Ij, r_Ik); } } ratios[j] = std::exp(Uat[j] + Uself - sumU); } } } GradType evalGrad(ParticleSet& P, int iat) { return GradType(dUat[iat]); } ValueType ratioGrad(ParticleSet& P, int iat, GradType& grad_iat) { UpdateMode = ORB_PBYP_PARTIAL; const DistanceTableData& eI_table = P.getDistTable(ei_Table_ID_); const DistanceTableData& ee_table = P.getDistTable(ee_Table_ID_); computeU3(P, iat, eI_table.Temp_r.data(), eI_table.Temp_dr, ee_table.Temp_r.data(), ee_table.Temp_dr, cur_Uat, cur_dUat, cur_d2Uat, newUk, newdUk, newd2Uk, ions_nearby_new); DiffVal = Uat[iat] - cur_Uat; grad_iat += cur_dUat; return std::exp(DiffVal); } inline void restore(int iat) {} void acceptMove(ParticleSet& P, int iat) { const DistanceTableData& eI_table = P.getDistTable(ei_Table_ID_); const DistanceTableData& ee_table = P.getDistTable(ee_Table_ID_); // get the old value, grad, lapl computeU3(P, iat, eI_table.Distances[iat], eI_table.Displacements[iat], ee_table.Distances[iat], ee_table.Displacements[iat], Uat[iat], dUat_temp, d2Uat[iat], oldUk, olddUk, oldd2Uk, ions_nearby_old); if (UpdateMode == ORB_PBYP_RATIO) { //ratio-only during the move; need to compute derivatives computeU3(P, iat, eI_table.Temp_r.data(), eI_table.Temp_dr, ee_table.Temp_r.data(), ee_table.Temp_dr, cur_Uat, cur_dUat, cur_d2Uat, newUk, newdUk, newd2Uk, ions_nearby_new); } #pragma omp simd for (int jel = 0; jel < Nelec; jel++) { Uat[jel] += newUk[jel] - oldUk[jel]; d2Uat[jel] += newd2Uk[jel] - oldd2Uk[jel]; } for (int idim = 0; idim < OHMMS_DIM; ++idim) { valT* restrict save_g = dUat.data(idim); const valT* restrict new_g = newdUk.data(idim); const valT* restrict old_g = olddUk.data(idim); #pragma omp simd aligned(save_g, new_g, old_g) for (int jel = 0; jel < Nelec; jel++) save_g[jel] += new_g[jel] - old_g[jel]; } LogValue += Uat[iat] - cur_Uat; Uat[iat] = cur_Uat; dUat(iat) = cur_dUat; d2Uat[iat] = cur_d2Uat; const int ig = P.GroupID[iat]; // update compact list elecs_inside // if the old position exists in elecs_inside for (int iind = 0; iind < ions_nearby_old.size(); iind++) { int jat = ions_nearby_old[iind]; auto iter = std::find(elecs_inside(ig, jat).begin(), elecs_inside(ig, jat).end(), iat); auto iter_dist = elecs_inside_dist(ig, jat).begin() + std::distance(elecs_inside(ig, jat).begin(), iter); auto iter_displ = elecs_inside_displ(ig, jat).begin() + std::distance(elecs_inside(ig, jat).begin(), iter); if (eI_table.Temp_r[jat] < Ion_cutoff[jat]) // the new position is still inside { *iter_dist = eI_table.Temp_r[jat]; *iter_displ = eI_table.Temp_dr[jat]; *std::find(ions_nearby_new.begin(), ions_nearby_new.end(), jat) = -1; } else { *iter = elecs_inside(ig, jat).back(); elecs_inside(ig, jat).pop_back(); *iter_dist = elecs_inside_dist(ig, jat).back(); elecs_inside_dist(ig, jat).pop_back(); *iter_displ = elecs_inside_displ(ig, jat).back(); elecs_inside_displ(ig, jat).pop_back(); } } // if the old position doesn't exist in elecs_inside but the new position do for (int iind = 0; iind < ions_nearby_new.size(); iind++) { int jat = ions_nearby_new[iind]; if (jat >= 0) { elecs_inside(ig, jat).push_back(iat); elecs_inside_dist(ig, jat).push_back(eI_table.Temp_r[jat]); elecs_inside_displ(ig, jat).push_back(eI_table.Temp_dr[jat]); } } } inline void recompute(ParticleSet& P) { const DistanceTableData& eI_table = P.getDistTable(ei_Table_ID_); const DistanceTableData& ee_table = P.getDistTable(ee_Table_ID_); build_compact_list(P); for (int jel = 0; jel < Nelec; ++jel) { computeU3(P, jel, eI_table.Distances[jel], eI_table.Displacements[jel], ee_table.Distances[jel], ee_table.Displacements[jel], Uat[jel], dUat_temp, d2Uat[jel], newUk, newdUk, newd2Uk, ions_nearby_new, true); dUat(jel) = dUat_temp; // add the contribution from the upper triangle #pragma omp simd for (int kel = 0; kel < jel; kel++) { Uat[kel] += newUk[kel]; d2Uat[kel] += newd2Uk[kel]; } for (int idim = 0; idim < OHMMS_DIM; ++idim) { valT* restrict save_g = dUat.data(idim); const valT* restrict new_g = newdUk.data(idim); #pragma omp simd aligned(save_g, new_g) for (int kel = 0; kel < jel; kel++) save_g[kel] += new_g[kel]; } } } inline valT computeU(const ParticleSet& P, int jel, int jg, const RealType* distjI, const RealType* distjk, std::vector<int>& ions_nearby) { ions_nearby.clear(); for (int iat = 0; iat < Nion; ++iat) if (distjI[iat] < Ion_cutoff[iat]) ions_nearby.push_back(iat); valT Uj = valT(0); for (int kg = 0; kg < eGroups; ++kg) { int kel_counter = 0; for (int iind = 0; iind < ions_nearby.size(); ++iind) { const int iat = ions_nearby[iind]; const int ig = Ions.GroupID[iat]; const valT r_jI = distjI[iat]; for (int kind = 0; kind < elecs_inside(kg, iat).size(); kind++) { const int kel = elecs_inside(kg, iat)[kind]; if (kel != jel) { DistkI_Compressed[kel_counter] = elecs_inside_dist(kg, iat)[kind]; Distjk_Compressed[kel_counter] = distjk[kel]; DistjI_Compressed[kel_counter] = r_jI; kel_counter++; if (kel_counter == Nbuffer) { const FT& feeI(*F(ig, jg, kg)); Uj += feeI.evaluateV(kel_counter, Distjk_Compressed.data(), DistjI_Compressed.data(), DistkI_Compressed.data()); kel_counter = 0; } } } if ((iind + 1 == ions_nearby.size() || ig != Ions.GroupID[ions_nearby[iind + 1]]) && kel_counter > 0) { const FT& feeI(*F(ig, jg, kg)); Uj += feeI.evaluateV(kel_counter, Distjk_Compressed.data(), DistjI_Compressed.data(), DistkI_Compressed.data()); kel_counter = 0; } } } return Uj; } inline void computeU3_engine(const ParticleSet& P, const FT& feeI, int kel_counter, valT& Uj, posT& dUj, valT& d2Uj, Vector<valT>& Uk, gContainer_type& dUk, Vector<valT>& d2Uk) { constexpr valT czero(0); constexpr valT cone(1); constexpr valT ctwo(2); constexpr valT lapfac = OHMMS_DIM - cone; valT* restrict val = mVGL.data(0); valT* restrict gradF0 = mVGL.data(1); valT* restrict gradF1 = mVGL.data(2); valT* restrict gradF2 = mVGL.data(3); valT* restrict hessF00 = mVGL.data(4); valT* restrict hessF11 = mVGL.data(5); valT* restrict hessF22 = mVGL.data(6); valT* restrict hessF01 = mVGL.data(7); valT* restrict hessF02 = mVGL.data(8); feeI.evaluateVGL(kel_counter, Distjk_Compressed.data(), DistjI_Compressed.data(), DistkI_Compressed.data(), val, gradF0, gradF1, gradF2, hessF00, hessF11, hessF22, hessF01, hessF02); // compute the contribution to jel, kel Uj = simd::accumulate_n(val, kel_counter, Uj); valT gradF0_sum = simd::accumulate_n(gradF0, kel_counter, czero); valT gradF1_sum = simd::accumulate_n(gradF1, kel_counter, czero); valT hessF00_sum = simd::accumulate_n(hessF00, kel_counter, czero); valT hessF11_sum = simd::accumulate_n(hessF11, kel_counter, czero); d2Uj -= hessF00_sum + hessF11_sum + lapfac * (gradF0_sum + gradF1_sum); std::fill_n(hessF11, kel_counter, czero); for (int idim = 0; idim < OHMMS_DIM; ++idim) { valT* restrict jk = Disp_jk_Compressed.data(idim); valT* restrict jI = Disp_jI_Compressed.data(idim); valT* restrict kI = Disp_kI_Compressed.data(idim); valT dUj_x(0); #pragma omp simd aligned(gradF0, gradF1, gradF2, hessF11, jk, jI, kI) reduction(+ : dUj_x) for (int kel_index = 0; kel_index < kel_counter; kel_index++) { // recycle hessF11 hessF11[kel_index] += kI[kel_index] * jk[kel_index]; dUj_x += gradF1[kel_index] * jI[kel_index]; // destroy jk, kI const valT temp = jk[kel_index] * gradF0[kel_index]; dUj_x += temp; jk[kel_index] *= jI[kel_index]; kI[kel_index] = kI[kel_index] * gradF2[kel_index] - temp; } dUj[idim] += dUj_x; valT* restrict jk0 = Disp_jk_Compressed.data(0); if (idim > 0) { #pragma omp simd aligned(jk, jk0) for (int kel_index = 0; kel_index < kel_counter; kel_index++) jk0[kel_index] += jk[kel_index]; } valT* restrict dUk_x = dUk.data(idim); for (int kel_index = 0; kel_index < kel_counter; kel_index++) dUk_x[DistIndice_k[kel_index]] += kI[kel_index]; } valT sum(0); valT* restrict jk0 = Disp_jk_Compressed.data(0); #pragma omp simd aligned(jk0, hessF01) reduction(+ : sum) for (int kel_index = 0; kel_index < kel_counter; kel_index++) sum += hessF01[kel_index] * jk0[kel_index]; d2Uj -= ctwo * sum; #pragma omp simd aligned(hessF00, hessF22, gradF0, gradF2, hessF02, hessF11) for (int kel_index = 0; kel_index < kel_counter; kel_index++) hessF00[kel_index] = hessF00[kel_index] + hessF22[kel_index] + lapfac * (gradF0[kel_index] + gradF2[kel_index]) - ctwo * hessF02[kel_index] * hessF11[kel_index]; for (int kel_index = 0; kel_index < kel_counter; kel_index++) { const int kel = DistIndice_k[kel_index]; Uk[kel] += val[kel_index]; d2Uk[kel] -= hessF00[kel_index]; } } inline void computeU3(const ParticleSet& P, int jel, const RealType* distjI, const RowContainer& displjI, const RealType* distjk, const RowContainer& displjk, valT& Uj, posT& dUj, valT& d2Uj, Vector<valT>& Uk, gContainer_type& dUk, Vector<valT>& d2Uk, std::vector<int>& ions_nearby, bool triangle = false) { constexpr valT czero(0); Uj = czero; dUj = posT(); d2Uj = czero; const int jg = P.GroupID[jel]; const int kelmax = triangle ? jel : Nelec; std::fill_n(Uk.data(), kelmax, czero); std::fill_n(d2Uk.data(), kelmax, czero); for (int idim = 0; idim < OHMMS_DIM; ++idim) std::fill_n(dUk.data(idim), kelmax, czero); ions_nearby.clear(); for (int iat = 0; iat < Nion; ++iat) if (distjI[iat] < Ion_cutoff[iat]) ions_nearby.push_back(iat); for (int kg = 0; kg < eGroups; ++kg) { int kel_counter = 0; for (int iind = 0; iind < ions_nearby.size(); ++iind) { const int iat = ions_nearby[iind]; const int ig = Ions.GroupID[iat]; const valT r_jI = distjI[iat]; const posT disp_Ij = displjI[iat]; for (int kind = 0; kind < elecs_inside(kg, iat).size(); kind++) { const int kel = elecs_inside(kg, iat)[kind]; if (kel < kelmax && kel != jel) { DistkI_Compressed[kel_counter] = elecs_inside_dist(kg, iat)[kind]; DistjI_Compressed[kel_counter] = r_jI; Distjk_Compressed[kel_counter] = distjk[kel]; Disp_kI_Compressed(kel_counter) = elecs_inside_displ(kg, iat)[kind]; Disp_jI_Compressed(kel_counter) = disp_Ij; Disp_jk_Compressed(kel_counter) = displjk[kel]; DistIndice_k[kel_counter] = kel; kel_counter++; if (kel_counter == Nbuffer) { const FT& feeI(*F(ig, jg, kg)); computeU3_engine(P, feeI, kel_counter, Uj, dUj, d2Uj, Uk, dUk, d2Uk); kel_counter = 0; } } } if ((iind + 1 == ions_nearby.size() || ig != Ions.GroupID[ions_nearby[iind + 1]]) && kel_counter > 0) { const FT& feeI(*F(ig, jg, kg)); computeU3_engine(P, feeI, kel_counter, Uj, dUj, d2Uj, Uk, dUk, d2Uk); kel_counter = 0; } } } } inline void registerData(ParticleSet& P, WFBufferType& buf) { if (Bytes_in_WFBuffer == 0) { Bytes_in_WFBuffer = buf.current(); buf.add(Uat.begin(), Uat.end()); buf.add(dUat.data(), dUat.end()); buf.add(d2Uat.begin(), d2Uat.end()); Bytes_in_WFBuffer = buf.current() - Bytes_in_WFBuffer; // free local space Uat.free(); dUat.free(); d2Uat.free(); } else { buf.forward(Bytes_in_WFBuffer); } } inline LogValueType updateBuffer(ParticleSet& P, WFBufferType& buf, bool fromscratch = false) { evaluateGL(P, P.G, P.L, false); buf.forward(Bytes_in_WFBuffer); return LogValue; } inline void copyFromBuffer(ParticleSet& P, WFBufferType& buf) { Uat.attachReference(buf.lendReference<valT>(Nelec), Nelec); dUat.attachReference(Nelec, Nelec_padded, buf.lendReference<valT>(Nelec_padded * OHMMS_DIM)); d2Uat.attachReference(buf.lendReference<valT>(Nelec), Nelec); build_compact_list(P); } void evaluateGL(ParticleSet& P, ParticleSet::ParticleGradient_t& G, ParticleSet::ParticleLaplacian_t& L, bool fromscratch = false) { if (fromscratch) recompute(P); LogValue = valT(0); for (int iat = 0; iat < Nelec; ++iat) { LogValue += Uat[iat]; G[iat] += dUat[iat]; L[iat] += d2Uat[iat]; } LogValue = - LogValue * 0.5; } void evaluateDerivatives(ParticleSet& P, const opt_variables_type& optvars, std::vector<ValueType>& dlogpsi, std::vector<ValueType>& dhpsioverpsi) { bool recalculate(false); std::vector<bool> rcsingles(myVars.size(), false); for (int k = 0; k < myVars.size(); ++k) { int kk = myVars.where(k); if (kk < 0) continue; if (optvars.recompute(kk)) recalculate = true; rcsingles[k] = true; } if (recalculate) { constexpr valT czero(0); constexpr valT cone(1); constexpr valT cminus(-1); constexpr valT ctwo(2); constexpr valT lapfac = OHMMS_DIM - cone; const DistanceTableData& ee_table = P.getDistTable(ee_Table_ID_); build_compact_list(P); dLogPsi = czero; gradLogPsi = PosType(); lapLogPsi = czero; for (int iat = 0; iat < Nion; ++iat) { const int ig = Ions.GroupID[iat]; for (int jg = 0; jg < eGroups; ++jg) for (int jind = 0; jind < elecs_inside(jg, iat).size(); jind++) { const int jel = elecs_inside(jg, iat)[jind]; const valT r_Ij = elecs_inside_dist(jg, iat)[jind]; const posT disp_Ij = cminus * elecs_inside_displ(jg, iat)[jind]; const valT r_Ij_inv = cone / r_Ij; for (int kg = 0; kg < eGroups; ++kg) for (int kind = 0; kind < elecs_inside(kg, iat).size(); kind++) { const int kel = elecs_inside(kg, iat)[kind]; if (kel < jel) { const valT r_Ik = elecs_inside_dist(kg, iat)[kind]; const posT disp_Ik = cminus * elecs_inside_displ(kg, iat)[kind]; const valT r_Ik_inv = cone / r_Ik; const valT r_jk = ee_table.Distances[jel][kel]; const posT disp_jk = ee_table.Displacements[jel][kel]; const valT r_jk_inv = cone / r_jk; FT& func = *F(ig, jg, kg); int idx = J3UniqueIndex[F(ig, jg, kg)]; func.evaluateDerivatives(r_jk, r_Ij, r_Ik, du_dalpha[idx], dgrad_dalpha[idx], dhess_dalpha[idx]); int first = VarOffset(ig, jg, kg).first; int last = VarOffset(ig, jg, kg).second; std::vector<RealType>& dlog = du_dalpha[idx]; std::vector<PosType>& dgrad = dgrad_dalpha[idx]; std::vector<Tensor<RealType, 3>>& dhess = dhess_dalpha[idx]; for (int p = first, ip = 0; p < last; p++, ip++) { RealType& dval = dlog[ip]; PosType& dg = dgrad[ip]; Tensor<RealType, 3>& dh = dhess[ip]; dg[0] *= r_jk_inv; dg[1] *= r_Ij_inv; dg[2] *= r_Ik_inv; PosType gr_ee = dg[0] * disp_jk; gradLogPsi(p, jel) -= dg[1] * disp_Ij - gr_ee; lapLogPsi(p, jel) -= (dh(0, 0) + lapfac * dg[0] - ctwo * dh(0, 1) * dot(disp_jk, disp_Ij) * r_jk_inv * r_Ij_inv + dh(1, 1) + lapfac * dg[1]); gradLogPsi(p, kel) -= dg[2] * disp_Ik + gr_ee; lapLogPsi(p, kel) -= (dh(0, 0) + lapfac * dg[0] + ctwo * dh(0, 2) * dot(disp_jk, disp_Ik) * r_jk_inv * r_Ik_inv + dh(2, 2) + lapfac * dg[2]); dLogPsi[p] -= dval; } } } } } for (int k = 0; k < myVars.size(); ++k) { int kk = myVars.where(k); if (kk < 0) continue; dlogpsi[kk] = (ValueType)dLogPsi[k]; RealType sum = 0.0; for (int i = 0; i < Nelec; i++) { #if defined(QMC_COMPLEX) sum -= 0.5 * lapLogPsi(k, i); for (int jdim = 0; jdim < OHMMS_DIM; ++jdim) sum -= P.G[i][jdim].real() * gradLogPsi(k, i)[jdim]; #else sum -= 0.5 * lapLogPsi(k, i) + dot(P.G[i], gradLogPsi(k, i)); #endif } dhpsioverpsi[kk] = (ValueType)sum; } } } }; } // namespace qmcplusplus #endif
task-dependency.c
/* * task-dependency.c -- Archer testcase */ //===----------------------------------------------------------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // // See tools/archer/LICENSE.txt for details. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // RUN: %libarcher-compile-and-run-race | FileCheck %s // RUN: %libarcher-compile-and-run-race-noserial | FileCheck %s // REQUIRES: tsan #include "ompt/ompt-signal.h" #include <omp.h> #include <stdio.h> #include <unistd.h> int main(int argc, char *argv[]) { int var = 0, a = 0; #pragma omp parallel num_threads(2) shared(var, a) #pragma omp master { #pragma omp task shared(var, a) depend(out : var) { OMPT_SIGNAL(a); var++; } #pragma omp task shared(a) depend(in : var) { OMPT_SIGNAL(a); OMPT_WAIT(a, 3); } #pragma omp task shared(var) // depend(in: var) is missing here! { var++; OMPT_SIGNAL(a); } // Give other thread time to steal the task. OMPT_WAIT(a, 2); } int error = (var != 2); fprintf(stderr, "DONE\n"); return error; } // CHECK: WARNING: ThreadSanitizer: data race // CHECK-NEXT: {{(Write|Read)}} of size 4 // CHECK-NEXT: #0 {{.*}}task-dependency.c:41 // CHECK: Previous write of size 4 // CHECK-NEXT: #0 {{.*}}task-dependency.c:30 // CHECK: DONE // CHECK: ThreadSanitizer: reported 1 warnings
opencl_zip_fmt_plug.c
/* * * This software is Copyright (c) 2012 Dhiru Kholia <dhiru at openwall.com> * with some code (c) 2012 Lukas Odzioba <ukasz@openwall.net> * and improvements (c) 2014 by magnum and JimF. * * This is hereby released to the general public under the following terms: * Redistribution and use in source and binary forms, with or without * modification, are permitted. */ #ifdef HAVE_OPENCL #if FMT_EXTERNS_H extern struct fmt_main fmt_opencl_zip; #elif FMT_REGISTERS_H john_register_one(&fmt_opencl_zip); #else #include <string.h> #include <stdint.h> #include <openssl/des.h> #ifdef _OPENMP #include <omp.h> #endif #include "arch.h" #include "formats.h" #include "common.h" #include "misc.h" #include "common-opencl.h" #include "pkzip.h" #include "dyna_salt.h" #include "hmac_sha.h" #include "options.h" #define OPENCL_FORMAT 1 #include "pbkdf2_hmac_sha1.h" #define FORMAT_LABEL "zip-opencl" #define FORMAT_NAME "ZIP" #define ALGORITHM_NAME "PBKDF2-SHA1 OpenCL" #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #define SWAP(n) \ (((n) << 24) | (((n) & 0xff00) << 8) | (((n) >> 8) & 0xff00) | ((n) >> 24)) #define BINARY_ALIGN sizeof(uint32_t) #define PLAINTEXT_LENGTH 64 #define SALT_SIZE sizeof(my_salt*) #define SALT_ALIGN sizeof(size_t) typedef struct { uint32_t length; uint8_t v[PLAINTEXT_LENGTH]; } zip_password; typedef struct { uint32_t v[(2 * KEY_LENGTH(3) + PWD_VER_LENGTH + 3) / 4]; } zip_hash; typedef struct { uint32_t iterations; uint32_t outlen; uint32_t skip_bytes; uint8_t length; uint8_t salt[64]; } zip_salt; typedef struct my_salt_t { dyna_salt dsalt; uint32_t comp_len; struct { uint16_t type : 4; uint16_t mode : 4; } v; unsigned char passverify[2]; unsigned char salt[SALT_LENGTH(3)]; //uint64_t data_key; // MSB of md5(data blob). We lookup using this. unsigned char datablob[1]; } my_salt; static my_salt *saved_salt; static unsigned char (*crypt_key)[((WINZIP_BINARY_SIZE + 4)/4)*4]; // ensure 32-bit alignment static cl_int cl_error; static zip_password *inbuffer; static zip_hash *outbuffer; static zip_salt currentsalt; static cl_mem mem_in, mem_out, mem_setting; static struct fmt_main *self; static size_t insize, outsize, settingsize; #define STEP 0 #define SEED 256 // This file contains auto-tuning routine(s). Has to be included after formats definitions. #include "opencl_autotune.h" #include "memdbg.h" static const char * warn[] = { "xfer: ", ", crypt: ", ", xfer: " }; /* ------- Helper functions ------- */ static size_t get_task_max_work_group_size() { return autotune_get_task_max_work_group_size(FALSE, 0, crypt_kernel); } static void create_clobj(size_t gws, struct fmt_main *self) { insize = sizeof(zip_password) * gws; outsize = sizeof(zip_hash) * gws; settingsize = sizeof(zip_salt); inbuffer = mem_calloc(1, insize); outbuffer = mem_alloc(outsize); crypt_key = mem_calloc(gws, sizeof(*crypt_key)); mem_in = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, insize, NULL, &cl_error); HANDLE_CLERROR(cl_error, "Error allocating mem in"); mem_setting = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, settingsize, NULL, &cl_error); HANDLE_CLERROR(cl_error, "Error allocating mem setting"); mem_out = clCreateBuffer(context[gpu_id], CL_MEM_WRITE_ONLY, outsize, NULL, &cl_error); HANDLE_CLERROR(cl_error, "Error allocating mem out"); HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 0, sizeof(mem_in), &mem_in), "Error while setting mem_in kernel argument"); HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 1, sizeof(mem_out), &mem_out), "Error while setting mem_out kernel argument"); HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 2, sizeof(mem_setting), &mem_setting), "Error while setting mem_salt kernel argument"); } static void release_clobj(void) { if (crypt_key) { HANDLE_CLERROR(clReleaseMemObject(mem_in), "Release mem in"); HANDLE_CLERROR(clReleaseMemObject(mem_setting), "Release mem setting"); HANDLE_CLERROR(clReleaseMemObject(mem_out), "Release mem out"); MEM_FREE(crypt_key); MEM_FREE(inbuffer); MEM_FREE(outbuffer); } } static void done(void) { if (autotuned) { release_clobj(); HANDLE_CLERROR(clReleaseKernel(crypt_kernel), "Release kernel"); HANDLE_CLERROR(clReleaseProgram(program[gpu_id]), "Release Program"); autotuned--; } } static void init(struct fmt_main *_self) { self = _self; opencl_prepare_dev(gpu_id); } static void reset(struct db_main *db) { if (!autotuned) { char build_opts[64]; snprintf(build_opts, sizeof(build_opts), "-DKEYLEN=%d -DSALTLEN=%d -DOUTLEN=%d", PLAINTEXT_LENGTH, (int)sizeof(currentsalt.salt), (int)sizeof(outbuffer->v)); opencl_init("$JOHN/kernels/pbkdf2_hmac_sha1_unsplit_kernel.cl", gpu_id, build_opts); crypt_kernel = clCreateKernel(program[gpu_id], "derive_key", &cl_error); HANDLE_CLERROR(cl_error, "Error creating kernel"); // Initialize openCL tuning (library) for this format. opencl_init_auto_setup(SEED, 0, NULL, warn, 1, self, create_clobj, release_clobj, sizeof(zip_password), 0, db); // Auto tune execution from shared/included code. autotune_run(self, 1, 0, 1000); } } static void *get_salt(char *ciphertext) { int i; my_salt salt, *psalt; static unsigned char *ptr; /* extract data from "ciphertext" */ c8 *copy_mem = strdup(ciphertext); c8 *cp, *p; if (!ptr) ptr = mem_alloc_tiny(sizeof(my_salt*),sizeof(my_salt*)); p = copy_mem + WINZIP_TAG_LENGTH+1; /* skip over "$zip2$*" */ memset(&salt, 0, sizeof(salt)); cp = strtokm(p, "*"); // type salt.v.type = atoi((const char*)cp); cp = strtokm(NULL, "*"); // mode salt.v.mode = atoi((const char*)cp); cp = strtokm(NULL, "*"); // file_magic enum (ignored) cp = strtokm(NULL, "*"); // salt for (i = 0; i < SALT_LENGTH(salt.v.mode); i++) salt.salt[i] = (atoi16[ARCH_INDEX(cp[i<<1])]<<4) | atoi16[ARCH_INDEX(cp[(i<<1)+1])]; cp = strtokm(NULL, "*"); // validator salt.passverify[0] = (atoi16[ARCH_INDEX(cp[0])]<<4) | atoi16[ARCH_INDEX(cp[1])]; salt.passverify[1] = (atoi16[ARCH_INDEX(cp[2])]<<4) | atoi16[ARCH_INDEX(cp[3])]; cp = strtokm(NULL, "*"); // data len sscanf((const char *)cp, "%x", &salt.comp_len); // later we will store the data blob in our own static data structure, and place the 64 bit LSB of the // MD5 of the data blob into a field in the salt. For the first POC I store the entire blob and just // make sure all my test data is small enough to fit. cp = strtokm(NULL, "*"); // data blob // Ok, now create the allocated salt record we are going to return back to John, using the dynamic // sized data buffer. psalt = (my_salt*)mem_calloc(1, sizeof(my_salt) + salt.comp_len); psalt->v.type = salt.v.type; psalt->v.mode = salt.v.mode; psalt->comp_len = salt.comp_len; psalt->dsalt.salt_alloc_needs_free = 1; // we used mem_calloc, so JtR CAN free our pointer when done with them. memcpy(psalt->salt, salt.salt, sizeof(salt.salt)); psalt->passverify[0] = salt.passverify[0]; psalt->passverify[1] = salt.passverify[1]; // set the JtR core linkage stuff for this dyna_salt psalt->dsalt.salt_cmp_offset = SALT_CMP_OFF(my_salt, comp_len); psalt->dsalt.salt_cmp_size = SALT_CMP_SIZE(my_salt, comp_len, datablob, psalt->comp_len); if (strcmp((const char*)cp, "ZFILE")) { for (i = 0; i < psalt->comp_len; i++) psalt->datablob[i] = (atoi16[ARCH_INDEX(cp[i<<1])]<<4) | atoi16[ARCH_INDEX(cp[(i<<1)+1])]; } else { c8 *Fn, *Oh, *Ob; long len; uint32_t id; FILE *fp; Fn = strtokm(NULL, "*"); Oh = strtokm(NULL, "*"); Ob = strtokm(NULL, "*"); fp = fopen((const char*)Fn, "rb"); if (!fp) { psalt->v.type = 1; // this will tell the format to 'skip' this salt, it is garbage goto Bail; } sscanf((const char*)Oh, "%lx", &len); if (fseek(fp, len, SEEK_SET)) { fclose(fp); psalt->v.type = 1; goto Bail; } id = fget32LE(fp); if (id != 0x04034b50U) { fclose(fp); psalt->v.type = 1; goto Bail; } sscanf((const char*)Ob, "%lx", &len); if (fseek(fp, len, SEEK_SET)) { fclose(fp); psalt->v.type = 1; goto Bail; } if (fread(psalt->datablob, 1, psalt->comp_len, fp) != psalt->comp_len) { fclose(fp); psalt->v.type = 1; goto Bail; } fclose(fp); } Bail:; MEM_FREE(copy_mem); memcpy(ptr, &psalt, sizeof(my_salt*)); return (void*)ptr; } static void set_salt(void *salt) { saved_salt = *((my_salt**)salt); memcpy((char*)currentsalt.salt, saved_salt->salt, SALT_LENGTH(saved_salt->v.mode)); currentsalt.length = SALT_LENGTH(saved_salt->v.mode); currentsalt.iterations = KEYING_ITERATIONS; currentsalt.outlen = PWD_VER_LENGTH; currentsalt.skip_bytes = 2 * KEY_LENGTH(saved_salt->v.mode); HANDLE_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_setting, CL_FALSE, 0, settingsize, &currentsalt, 0, NULL, NULL), "Copy setting to gpu"); } #undef set_key static void set_key(char *key, int index) { uint8_t length = strlen(key); if (length > PLAINTEXT_LENGTH) length = PLAINTEXT_LENGTH; inbuffer[index].length = length; memcpy(inbuffer[index].v, key, length); } static char *get_key(int index) { static char ret[PLAINTEXT_LENGTH + 1]; uint8_t length = inbuffer[index].length; memcpy(ret, inbuffer[index].v, length); ret[length] = '\0'; return ret; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index; size_t *lws = local_work_size ? &local_work_size : NULL; global_work_size = GET_MULTIPLE_OR_BIGGER(count, local_work_size); if (saved_salt->v.type) { // This salt passed valid() but failed get_salt(). // Should never happen. memset(crypt_key, 0, count * WINZIP_BINARY_SIZE); return count; } /// Copy data to gpu BENCH_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_in, CL_FALSE, 0, insize, inbuffer, 0, NULL, multi_profilingEvent[0]), "Copy data to gpu"); /// Run kernel BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], crypt_kernel, 1, NULL, &global_work_size, lws, 0, NULL, multi_profilingEvent[1]), "Run kernel"); /// Read the result back BENCH_CLERROR(clEnqueueReadBuffer(queue[gpu_id], mem_out, CL_TRUE, 0, outsize, outbuffer, 0, NULL, multi_profilingEvent[2]), "Copy result back"); if (ocl_autotune_running) return count; #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < count; index++) { if (!memcmp((unsigned char*)outbuffer[index].v, saved_salt->passverify, 2)) { unsigned char pwd_ver[4+64]; pbkdf2_sha1(inbuffer[index].v, inbuffer[index].length, saved_salt->salt, SALT_LENGTH(saved_salt->v.mode), KEYING_ITERATIONS, pwd_ver, KEY_LENGTH(saved_salt->v.mode), KEY_LENGTH(saved_salt->v.mode)); hmac_sha1(pwd_ver, KEY_LENGTH(saved_salt->v.mode), (const unsigned char*)saved_salt->datablob, saved_salt->comp_len, crypt_key[index], WINZIP_BINARY_SIZE); } else memset(crypt_key[index], 0, WINZIP_BINARY_SIZE); } return count; } static int cmp_all(void *binary, int count) { int i; for (i = 0; i < count; i++) if (((uint32_t*)&(crypt_key[i]))[0] == ((uint32_t*)binary)[0]) return 1; return 0; } static int cmp_one(void *binary, int index) { return (((uint32_t*)&(crypt_key[index]))[0] == ((uint32_t*)binary)[0]); } static int cmp_exact(char *source, int index) { void *b = winzip_common_binary(source); return !memcmp(b, crypt_key[index], sizeof(crypt_key[index])); } struct fmt_main fmt_opencl_zip = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, WINZIP_BENCHMARK_COMMENT, WINZIP_BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, WINZIP_BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_DYNA_SALT | FMT_HUGE_INPUT, { NULL }, { WINZIP_FORMAT_TAG }, winzip_common_tests }, { init, done, reset, fmt_default_prepare, winzip_common_valid, winzip_common_split, winzip_common_binary, get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash }, fmt_default_dyna_salt_hash, NULL, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { fmt_default_get_hash }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */ #endif /* HAVE_OPENCL */
GB_unaryop__lnot_uint64_uint64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_uint64_uint64 // op(A') function: GB_tran__lnot_uint64_uint64 // C type: uint64_t // A type: uint64_t // cast: uint64_t cij = (uint64_t) aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ uint64_t #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, aij) \ uint64_t z = (uint64_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_UINT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_uint64_uint64 ( uint64_t *Cx, // Cx and Ax may be aliased uint64_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_uint64_uint64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unop__minv_uint32_uint32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__minv_uint32_uint32 // op(A') function: GB_unop_tran__minv_uint32_uint32 // C type: uint32_t // A type: uint32_t // cast: uint32_t cij = aij // unaryop: cij = GB_IMINV_UNSIGNED (aij, 32) #define GB_ATYPE \ uint32_t #define GB_CTYPE \ uint32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IMINV_UNSIGNED (x, 32) ; // casting #define GB_CAST(z, aij) \ uint32_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint32_t z = aij ; \ Cx [pC] = GB_IMINV_UNSIGNED (z, 32) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_UINT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__minv_uint32_uint32 ( uint32_t *Cx, // Cx and Ax may be aliased const uint32_t *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (uint32_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint32_t aij = Ax [p] ; uint32_t z = aij ; Cx [p] = GB_IMINV_UNSIGNED (z, 32) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; uint32_t aij = Ax [p] ; uint32_t z = aij ; Cx [p] = GB_IMINV_UNSIGNED (z, 32) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__minv_uint32_uint32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
arrsched.h
#pragma omp parallel for collapse(2) for (long tk = GZ; tk < N + GZ; tk += TILEK) for (long tj = GZ; tj < N + GZ; tj += TILEJ) for (long ti = GZ; ti < N + GZ; ti += TILEI) for (long k = tk; k < tk + TILEK; ++k) for (long j = tj; j < tj + TILEJ; ++j) #pragma omp simd for (long i = ti; i < ti + TILEI; ++i)
collective_alltoall.c
/***************************************************************************** * * * Mixed-mode OpenMP/MPI MicroBenchmark Suite - Version 1.0 * * * * produced by * * * * Mark Bull, Jim Enright and Fiona Reid * * * * at * * * * Edinburgh Parallel Computing Centre * * * * email: markb@epcc.ed.ac.uk, fiona@epcc.ed.ac.uk * * * * * * Copyright 2012, The University of Edinburgh * * * * * * Licensed under the Apache License, Version 2.0 (the "License"); * * you may not use this file except in compliance with the License. * * You may obtain a copy of the License at * * * * http://www.apache.org/licenses/LICENSE-2.0 * * * * Unless required by applicable law or agreed to in writing, software * * distributed under the License is distributed on an "AS IS" BASIS, * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * * See the License for the specific language governing permissions and * * limitations under the License. * * * ****************************************************************************/ /*-----------------------------------------------------------*/ /* Implements the alltoall mixed mode OpenMP/MPI benchmark. */ /*-----------------------------------------------------------*/ #include "collective_alltoall.h" int freeAlltoallData(); /*-----------------------------------------------------------*/ /* alltoall */ /* */ /* Driver routine for the alltoall benchmark. */ /*-----------------------------------------------------------*/ int alltoall() { int dataSizeIter; int bufferSize; /* Initialise repsToDo to defaultReps */ repsToDo = defaultReps; /* Start loop over data sizes */ dataSizeIter = minDataSize; /* initialise dataSizeIter */ while (dataSizeIter <= maxDataSize) { /* Calculate bufferSize and allocate space for * the data arrays. */ bufferSize = dataSizeIter * numThreads * numMPIprocs * numThreads; allocateAlltoallData(bufferSize); /* Perform warm-up of benchmark */ alltoallKernel(warmUpIters, dataSizeIter); /* Test if alltoall was successful */ testAlltoall(dataSizeIter); /* Initialise the benchmark */ benchComplete = FALSE; /* Execute benchmark until target time is reached */ while (benchComplete != TRUE) { /* Start timer */ MPI_Barrier(comm); startTime = MPI_Wtime(); /* Execute alltoall for repsToDo repetitions */ alltoallKernel(repsToDo, dataSizeIter); /* Stop timer */ MPI_Barrier(comm); finishTime = MPI_Wtime(); totalTime = finishTime - startTime; /* Test if target time was reached */ if (myMPIRank == 0) { benchComplete = repTimeCheck(totalTime, repsToDo); } /* Ensure all procs have the same value of benchComplete */ /* and repsToDo */ MPI_Bcast(&benchComplete, 1, MPI_INT, 0, comm); MPI_Bcast(&repsToDo, 1, MPI_INT, 0, comm); } /* Master process sets benchmark result for reporting */ if (myMPIRank == 0) { setReportParams(dataSizeIter, repsToDo, totalTime); printReport(); } /* Free allocated data */ freeAlltoallData(); /* Double data size and loop again */ dataSizeIter = dataSizeIter * 2; } return 0; } /*-----------------------------------------------------------*/ /* alltoallKernel */ /* */ /* Implements the all to all benchmark. */ /* Each thread sends/receives dataSize items to/from */ /* every other process. */ /*-----------------------------------------------------------*/ int alltoallKernel(int totalReps, int dataSize) { int repIter, i, j; int dataForEachProc, numsToWrite; int blockNum, startOffset; /* Calculate how much data each thread sends to each process */ numsToWrite = numThreads * dataSize; /* Calculate total amount of data each process receives * from any other process.... * ...each thread gets dataSize items from every other thread. */ dataForEachProc = numThreads * numThreads * dataSize; for (repIter = 0; repIter < totalReps; repIter++) { /* Each thread writes numsToWrite items for each * MPI process to alltoallSendBuf. */ #pragma omp parallel default(none) private(blockNum, i, j) \ shared(numsToWrite, dataForEachProc, globalIDarray) \ shared(alltoallSendBuf, numMPIprocs) { /* Calculate the blockNum of each thread. * This is used to find which portion of the * dataForEachProc elements a thread will * be responsible for. */ blockNum = (myThreadID)*numsToWrite; /* Write threadID to correct location in * alltoallSendBuf. */ for (i = 0; i < numMPIprocs; i++) { /* loop over MPI processes */ for (j = 0; j < numsToWrite; j++) { /* loop over data to write */ alltoallSendBuf[blockNum + (i * dataForEachProc) + j] = globalIDarray[myThreadID]; } } } /* Call MPI_AlltoAll */ MPI_Alltoall(alltoallSendBuf, dataForEachProc, MPI_INT, alltoallRecvBuf, dataForEachProc, MPI_INT, comm); /* Each thread now reads the receive buffer so that it gets * dataSize values from every other thread in its portion * of alltoallFinalBuf. */ #pragma omp parallel default(none) private(blockNum, startOffset, i, j) \ shared(alltoallRecvBuf, alltoallFinalBuf, numMPIprocs) \ shared(dataForEachProc, numsToWrite, dataSize, globalIDarray) \ shared(numThreads) { /* Calculate the blockNum. * This identifies which portion of the data from each * process a thread is responsible for. */ blockNum = myThreadID * dataSize; /* Calculate the offset into each MPI processes finalBuf * where each thread will start to read its data. */ startOffset = (numsToWrite * numMPIprocs) * myThreadID; /* Loop over all processors (threads & proceeses). */ for (i = 0; i < (numThreads * numMPIprocs); i++) { for (j = 0; j < dataSize; j++) { alltoallFinalBuf[startOffset + (i * dataSize) + j] = alltoallRecvBuf[blockNum + (i * numsToWrite) + j]; } } } } return 0; } /*-----------------------------------------------------------*/ /* allocateAlltoallData */ /* */ /* Allocates memory for the main data arrays used in the */ /* alltoall benchmark. */ /*-----------------------------------------------------------*/ int allocateAlltoallData(int bufferSize) { alltoallSendBuf = (int *)malloc(bufferSize * sizeof(int)); alltoallRecvBuf = (int *)malloc(bufferSize * sizeof(int)); alltoallFinalBuf = (int *)malloc(bufferSize * sizeof(int)); return 0; } /*-----------------------------------------------------------*/ /* freeAlltoallData */ /* */ /* Free memory of the main data arrays. */ /*-----------------------------------------------------------*/ int freeAlltoallData() { free(alltoallSendBuf); free(alltoallRecvBuf); free(alltoallFinalBuf); return 0; } /*-----------------------------------------------------------*/ /* testAlltoall */ /* */ /* Verifies that the all to all completed successfully. */ /*-----------------------------------------------------------*/ int testAlltoall(int dataSize) { int sizeofBuffer, i, j; int dataForEachThread, startElem; int testFlag, reduceFlag; int *testBuf; /* Set testFlag to true */ testFlag = TRUE; /* calculate the size of the buffer on each process and allocate */ sizeofBuffer = dataSize * numThreads * numMPIprocs * numThreads; testBuf = (int *)malloc(sizeofBuffer * sizeof(int)); /* Calculate how many elements each thread will work with */ dataForEachThread = dataSize * numThreads * numMPIprocs; /* Fill buffer with expected values. */ #pragma omp parallel default(none) private(i, j, startElem) \ shared(testBuf, globalIDarray, sizeofBuffer, dataSize) \ shared(numThreads, numMPIprocs, dataForEachThread) { /* Calculate start element for each thread */ startElem = (myThreadID)*dataForEachThread; for (i = 0; i < (numThreads * numMPIprocs); i++) { for (j = 0; j < dataSize; j++) { testBuf[startElem + (i * dataSize) + j] = i; } } } /* Compare */ for (i = 0; i < sizeofBuffer; i++) { if (alltoallFinalBuf[i] != testBuf[i]) { testFlag = FALSE; } } /* Reduce testFlag with logical AND operator to * get overall test result. */ MPI_Reduce(&testFlag, &reduceFlag, 1, MPI_INT, MPI_LAND, 0, comm); /* Master then sets testOutcome flag */ if (myMPIRank == 0) { setTestOutcome(reduceFlag); } /* Free space for testBuf */ free(testBuf); return 0; }
3d25pt_var.c
/* * Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*13); for(m=0; m<13;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations $reset_tile_sizes // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<13; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt; t++) { for (i = 4; i < Nz-4; i++) { for (j = 4; j < Ny-4; j++) { for (k = 4; k < Nx-4; k++) { A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[(t)%2][i ][j ][k ] + coef[1][i][j][k] * (A[(t)%2][i-1][j ][k ] + A[(t)%2][i+1][j ][k ]) + coef[2][i][j][k] * (A[(t)%2][i ][j-1][k ] + A[(t)%2][i ][j+1][k ]) + coef[3][i][j][k] * (A[(t)%2][i ][j ][k-1] + A[(t)%2][i ][j ][k+1]) + coef[4][i][j][k] * (A[(t)%2][i-2][j ][k ] + A[(t)%2][i+2][j ][k ]) + coef[5][i][j][k] * (A[(t)%2][i ][j-2][k ] + A[(t)%2][i ][j+2][k ]) + coef[6][i][j][k] * (A[(t)%2][i ][j ][k-2] + A[(t)%2][i ][j ][k+2]) + coef[7][i][j][k] * (A[(t)%2][i-3][j ][k ] + A[(t)%2][i+3][j ][k ]) + coef[8][i][j][k] * (A[(t)%2][i ][j-3][k ] + A[(t)%2][i ][j+3][k ]) + coef[9][i][j][k] * (A[(t)%2][i ][j ][k-3] + A[(t)%2][i ][j ][k+3]) + coef[10][i][j][k]* (A[(t)%2][i-4][j ][k ] + A[(t)%2][i+4][j ][k ]) + coef[11][i][j][k]* (A[(t)%2][i ][j-4][k ] + A[(t)%2][i ][j+4][k ]) + coef[12][i][j][k]* (A[(t)%2][i ][j ][k-4] + A[(t)%2][i ][j ][k+4]) ; } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = MIN(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "variable axis-symmetric") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif double total = 0.0; for (i = 0; i < Nz; ++i) { for (j = 0; j < Ny; ++j) { for (k = 0; k < Nx; ++k) { total += A[Nt%2][i][j][k]; } } } printf("Sum(final): %e\n", total); // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<13;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
Stmt.h
//===- Stmt.h - Classes for representing statements -------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file defines the Stmt interface and subclasses. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_AST_STMT_H #define LLVM_CLANG_AST_STMT_H #include "clang/AST/DeclGroup.h" #include "clang/AST/DependenceFlags.h" #include "clang/AST/StmtIterator.h" #include "clang/Basic/CapturedStmt.h" #include "clang/Basic/IdentifierTable.h" #include "clang/Basic/LLVM.h" #include "clang/Basic/SourceLocation.h" #include "clang/Basic/Specifiers.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/BitmaskEnum.h" #include "llvm/ADT/PointerIntPair.h" #include "llvm/ADT/StringRef.h" #include "llvm/ADT/iterator.h" #include "llvm/ADT/iterator_range.h" #include "llvm/Support/Casting.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/ErrorHandling.h" #include <algorithm> #include <cassert> #include <cstddef> #include <iterator> #include <string> namespace llvm { class FoldingSetNodeID; } // namespace llvm namespace clang { class ASTContext; class Attr; class CapturedDecl; class Decl; class Expr; class AddrLabelExpr; class LabelDecl; class ODRHash; class PrinterHelper; struct PrintingPolicy; class RecordDecl; class SourceManager; class StringLiteral; class Token; class VarDecl; //===----------------------------------------------------------------------===// // AST classes for statements. //===----------------------------------------------------------------------===// /// Stmt - This represents one statement. /// class alignas(void *) Stmt { public: enum StmtClass { NoStmtClass = 0, #define STMT(CLASS, PARENT) CLASS##Class, #define STMT_RANGE(BASE, FIRST, LAST) \ first##BASE##Constant=FIRST##Class, last##BASE##Constant=LAST##Class, #define LAST_STMT_RANGE(BASE, FIRST, LAST) \ first##BASE##Constant=FIRST##Class, last##BASE##Constant=LAST##Class #define ABSTRACT_STMT(STMT) #include "clang/AST/StmtNodes.inc" }; // Make vanilla 'new' and 'delete' illegal for Stmts. protected: friend class ASTStmtReader; friend class ASTStmtWriter; void *operator new(size_t bytes) noexcept { llvm_unreachable("Stmts cannot be allocated with regular 'new'."); } void operator delete(void *data) noexcept { llvm_unreachable("Stmts cannot be released with regular 'delete'."); } //===--- Statement bitfields classes ---===// class StmtBitfields { friend class ASTStmtReader; friend class ASTStmtWriter; friend class Stmt; /// The statement class. unsigned sClass : 8; }; enum { NumStmtBits = 8 }; class NullStmtBitfields { friend class ASTStmtReader; friend class ASTStmtWriter; friend class NullStmt; unsigned : NumStmtBits; /// True if the null statement was preceded by an empty macro, e.g: /// @code /// #define CALL(x) /// CALL(0); /// @endcode unsigned HasLeadingEmptyMacro : 1; /// The location of the semi-colon. SourceLocation SemiLoc; }; class CompoundStmtBitfields { friend class ASTStmtReader; friend class CompoundStmt; unsigned : NumStmtBits; unsigned NumStmts : 32 - NumStmtBits; /// The location of the opening "{". SourceLocation LBraceLoc; }; class LabelStmtBitfields { friend class LabelStmt; unsigned : NumStmtBits; SourceLocation IdentLoc; }; class AttributedStmtBitfields { friend class ASTStmtReader; friend class AttributedStmt; unsigned : NumStmtBits; /// Number of attributes. unsigned NumAttrs : 32 - NumStmtBits; /// The location of the attribute. SourceLocation AttrLoc; }; class IfStmtBitfields { friend class ASTStmtReader; friend class IfStmt; unsigned : NumStmtBits; /// Whether this is a constexpr if, or a consteval if, or neither. unsigned Kind : 3; /// True if this if statement has storage for an else statement. unsigned HasElse : 1; /// True if this if statement has storage for a variable declaration. unsigned HasVar : 1; /// True if this if statement has storage for an init statement. unsigned HasInit : 1; /// The location of the "if". SourceLocation IfLoc; }; class SwitchStmtBitfields { friend class SwitchStmt; unsigned : NumStmtBits; /// True if the SwitchStmt has storage for an init statement. unsigned HasInit : 1; /// True if the SwitchStmt has storage for a condition variable. unsigned HasVar : 1; /// If the SwitchStmt is a switch on an enum value, records whether all /// the enum values were covered by CaseStmts. The coverage information /// value is meant to be a hint for possible clients. unsigned AllEnumCasesCovered : 1; /// The location of the "switch". SourceLocation SwitchLoc; }; class WhileStmtBitfields { friend class ASTStmtReader; friend class WhileStmt; unsigned : NumStmtBits; /// True if the WhileStmt has storage for a condition variable. unsigned HasVar : 1; /// The location of the "while". SourceLocation WhileLoc; }; class DoStmtBitfields { friend class DoStmt; unsigned : NumStmtBits; /// The location of the "do". SourceLocation DoLoc; }; class ForStmtBitfields { friend class ForStmt; unsigned : NumStmtBits; /// The location of the "for". SourceLocation ForLoc; }; class GotoStmtBitfields { friend class GotoStmt; friend class IndirectGotoStmt; unsigned : NumStmtBits; /// The location of the "goto". SourceLocation GotoLoc; }; class ContinueStmtBitfields { friend class ContinueStmt; unsigned : NumStmtBits; /// The location of the "continue". SourceLocation ContinueLoc; }; class BreakStmtBitfields { friend class BreakStmt; unsigned : NumStmtBits; /// The location of the "break". SourceLocation BreakLoc; }; class ReturnStmtBitfields { friend class ReturnStmt; unsigned : NumStmtBits; /// True if this ReturnStmt has storage for an NRVO candidate. unsigned HasNRVOCandidate : 1; /// The location of the "return". SourceLocation RetLoc; }; class SwitchCaseBitfields { friend class SwitchCase; friend class CaseStmt; unsigned : NumStmtBits; /// Used by CaseStmt to store whether it is a case statement /// of the form case LHS ... RHS (a GNU extension). unsigned CaseStmtIsGNURange : 1; /// The location of the "case" or "default" keyword. SourceLocation KeywordLoc; }; //===--- Expression bitfields classes ---===// class ExprBitfields { friend class ASTStmtReader; // deserialization friend class AtomicExpr; // ctor friend class BlockDeclRefExpr; // ctor friend class CallExpr; // ctor friend class CXXConstructExpr; // ctor friend class CXXDependentScopeMemberExpr; // ctor friend class CXXNewExpr; // ctor friend class CXXUnresolvedConstructExpr; // ctor friend class DeclRefExpr; // computeDependence friend class DependentScopeDeclRefExpr; // ctor friend class DesignatedInitExpr; // ctor friend class Expr; friend class InitListExpr; // ctor friend class ObjCArrayLiteral; // ctor friend class ObjCDictionaryLiteral; // ctor friend class ObjCMessageExpr; // ctor friend class OffsetOfExpr; // ctor friend class OpaqueValueExpr; // ctor friend class OverloadExpr; // ctor friend class ParenListExpr; // ctor friend class PseudoObjectExpr; // ctor friend class ShuffleVectorExpr; // ctor unsigned : NumStmtBits; unsigned ValueKind : 2; unsigned ObjectKind : 3; unsigned /*ExprDependence*/ Dependent : llvm::BitWidth<ExprDependence>; }; enum { NumExprBits = NumStmtBits + 5 + llvm::BitWidth<ExprDependence> }; class ConstantExprBitfields { friend class ASTStmtReader; friend class ASTStmtWriter; friend class ConstantExpr; unsigned : NumExprBits; /// The kind of result that is tail-allocated. unsigned ResultKind : 2; /// The kind of Result as defined by APValue::Kind. unsigned APValueKind : 4; /// When ResultKind == RSK_Int64, true if the tail-allocated integer is /// unsigned. unsigned IsUnsigned : 1; /// When ResultKind == RSK_Int64. the BitWidth of the tail-allocated /// integer. 7 bits because it is the minimal number of bits to represent a /// value from 0 to 64 (the size of the tail-allocated integer). unsigned BitWidth : 7; /// When ResultKind == RSK_APValue, true if the ASTContext will cleanup the /// tail-allocated APValue. unsigned HasCleanup : 1; /// True if this ConstantExpr was created for immediate invocation. unsigned IsImmediateInvocation : 1; }; class PredefinedExprBitfields { friend class ASTStmtReader; friend class PredefinedExpr; unsigned : NumExprBits; /// The kind of this PredefinedExpr. One of the enumeration values /// in PredefinedExpr::IdentKind. unsigned Kind : 4; /// True if this PredefinedExpr has a trailing "StringLiteral *" /// for the predefined identifier. unsigned HasFunctionName : 1; /// The location of this PredefinedExpr. SourceLocation Loc; }; class DeclRefExprBitfields { friend class ASTStmtReader; // deserialization friend class DeclRefExpr; unsigned : NumExprBits; unsigned HasQualifier : 1; unsigned HasTemplateKWAndArgsInfo : 1; unsigned HasFoundDecl : 1; unsigned HadMultipleCandidates : 1; unsigned RefersToEnclosingVariableOrCapture : 1; unsigned NonOdrUseReason : 2; /// The location of the declaration name itself. SourceLocation Loc; }; class FloatingLiteralBitfields { friend class FloatingLiteral; unsigned : NumExprBits; unsigned Semantics : 3; // Provides semantics for APFloat construction unsigned IsExact : 1; }; class StringLiteralBitfields { friend class ASTStmtReader; friend class StringLiteral; unsigned : NumExprBits; /// The kind of this string literal. /// One of the enumeration values of StringLiteral::StringKind. unsigned Kind : 3; /// The width of a single character in bytes. Only values of 1, 2, /// and 4 bytes are supported. StringLiteral::mapCharByteWidth maps /// the target + string kind to the appropriate CharByteWidth. unsigned CharByteWidth : 3; unsigned IsPascal : 1; /// The number of concatenated token this string is made of. /// This is the number of trailing SourceLocation. unsigned NumConcatenated; }; class CharacterLiteralBitfields { friend class CharacterLiteral; unsigned : NumExprBits; unsigned Kind : 3; }; class UnaryOperatorBitfields { friend class UnaryOperator; unsigned : NumExprBits; unsigned Opc : 5; unsigned CanOverflow : 1; // /// This is only meaningful for operations on floating point /// types when additional values need to be in trailing storage. /// It is 0 otherwise. unsigned HasFPFeatures : 1; SourceLocation Loc; }; class UnaryExprOrTypeTraitExprBitfields { friend class UnaryExprOrTypeTraitExpr; unsigned : NumExprBits; unsigned Kind : 3; unsigned IsType : 1; // true if operand is a type, false if an expression. }; class ArrayOrMatrixSubscriptExprBitfields { friend class ArraySubscriptExpr; friend class MatrixSubscriptExpr; unsigned : NumExprBits; SourceLocation RBracketLoc; }; class CallExprBitfields { friend class CallExpr; unsigned : NumExprBits; unsigned NumPreArgs : 1; /// True if the callee of the call expression was found using ADL. unsigned UsesADL : 1; /// True if the call expression has some floating-point features. unsigned HasFPFeatures : 1; /// Padding used to align OffsetToTrailingObjects to a byte multiple. unsigned : 24 - 3 - NumExprBits; /// The offset in bytes from the this pointer to the start of the /// trailing objects belonging to CallExpr. Intentionally byte sized /// for faster access. unsigned OffsetToTrailingObjects : 8; }; enum { NumCallExprBits = 32 }; class MemberExprBitfields { friend class ASTStmtReader; friend class MemberExpr; unsigned : NumExprBits; /// IsArrow - True if this is "X->F", false if this is "X.F". unsigned IsArrow : 1; /// True if this member expression used a nested-name-specifier to /// refer to the member, e.g., "x->Base::f", or found its member via /// a using declaration. When true, a MemberExprNameQualifier /// structure is allocated immediately after the MemberExpr. unsigned HasQualifierOrFoundDecl : 1; /// True if this member expression specified a template keyword /// and/or a template argument list explicitly, e.g., x->f<int>, /// x->template f, x->template f<int>. /// When true, an ASTTemplateKWAndArgsInfo structure and its /// TemplateArguments (if any) are present. unsigned HasTemplateKWAndArgsInfo : 1; /// True if this member expression refers to a method that /// was resolved from an overloaded set having size greater than 1. unsigned HadMultipleCandidates : 1; /// Value of type NonOdrUseReason indicating why this MemberExpr does /// not constitute an odr-use of the named declaration. Meaningful only /// when naming a static member. unsigned NonOdrUseReason : 2; /// This is the location of the -> or . in the expression. SourceLocation OperatorLoc; }; class CastExprBitfields { friend class CastExpr; friend class ImplicitCastExpr; unsigned : NumExprBits; unsigned Kind : 7; unsigned PartOfExplicitCast : 1; // Only set for ImplicitCastExpr. /// True if the call expression has some floating-point features. unsigned HasFPFeatures : 1; /// The number of CXXBaseSpecifiers in the cast. 14 bits would be enough /// here. ([implimits] Direct and indirect base classes [16384]). unsigned BasePathSize; }; class BinaryOperatorBitfields { friend class BinaryOperator; unsigned : NumExprBits; unsigned Opc : 6; /// This is only meaningful for operations on floating point /// types when additional values need to be in trailing storage. /// It is 0 otherwise. unsigned HasFPFeatures : 1; SourceLocation OpLoc; }; class InitListExprBitfields { friend class InitListExpr; unsigned : NumExprBits; /// Whether this initializer list originally had a GNU array-range /// designator in it. This is a temporary marker used by CodeGen. unsigned HadArrayRangeDesignator : 1; }; class ParenListExprBitfields { friend class ASTStmtReader; friend class ParenListExpr; unsigned : NumExprBits; /// The number of expressions in the paren list. unsigned NumExprs; }; class GenericSelectionExprBitfields { friend class ASTStmtReader; friend class GenericSelectionExpr; unsigned : NumExprBits; /// The location of the "_Generic". SourceLocation GenericLoc; }; class PseudoObjectExprBitfields { friend class ASTStmtReader; // deserialization friend class PseudoObjectExpr; unsigned : NumExprBits; // These don't need to be particularly wide, because they're // strictly limited by the forms of expressions we permit. unsigned NumSubExprs : 8; unsigned ResultIndex : 32 - 8 - NumExprBits; }; class SourceLocExprBitfields { friend class ASTStmtReader; friend class SourceLocExpr; unsigned : NumExprBits; /// The kind of source location builtin represented by the SourceLocExpr. /// Ex. __builtin_LINE, __builtin_FUNCTION, ect. unsigned Kind : 2; }; class StmtExprBitfields { friend class ASTStmtReader; friend class StmtExpr; unsigned : NumExprBits; /// The number of levels of template parameters enclosing this statement /// expression. Used to determine if a statement expression remains /// dependent after instantiation. unsigned TemplateDepth; }; //===--- C++ Expression bitfields classes ---===// class CXXOperatorCallExprBitfields { friend class ASTStmtReader; friend class CXXOperatorCallExpr; unsigned : NumCallExprBits; /// The kind of this overloaded operator. One of the enumerator /// value of OverloadedOperatorKind. unsigned OperatorKind : 6; }; class CXXRewrittenBinaryOperatorBitfields { friend class ASTStmtReader; friend class CXXRewrittenBinaryOperator; unsigned : NumCallExprBits; unsigned IsReversed : 1; }; class CXXBoolLiteralExprBitfields { friend class CXXBoolLiteralExpr; unsigned : NumExprBits; /// The value of the boolean literal. unsigned Value : 1; /// The location of the boolean literal. SourceLocation Loc; }; class CXXNullPtrLiteralExprBitfields { friend class CXXNullPtrLiteralExpr; unsigned : NumExprBits; /// The location of the null pointer literal. SourceLocation Loc; }; class CXXThisExprBitfields { friend class CXXThisExpr; unsigned : NumExprBits; /// Whether this is an implicit "this". unsigned IsImplicit : 1; /// The location of the "this". SourceLocation Loc; }; class CXXThrowExprBitfields { friend class ASTStmtReader; friend class CXXThrowExpr; unsigned : NumExprBits; /// Whether the thrown variable (if any) is in scope. unsigned IsThrownVariableInScope : 1; /// The location of the "throw". SourceLocation ThrowLoc; }; class CXXDefaultArgExprBitfields { friend class ASTStmtReader; friend class CXXDefaultArgExpr; unsigned : NumExprBits; /// The location where the default argument expression was used. SourceLocation Loc; }; class CXXDefaultInitExprBitfields { friend class ASTStmtReader; friend class CXXDefaultInitExpr; unsigned : NumExprBits; /// The location where the default initializer expression was used. SourceLocation Loc; }; class CXXScalarValueInitExprBitfields { friend class ASTStmtReader; friend class CXXScalarValueInitExpr; unsigned : NumExprBits; SourceLocation RParenLoc; }; class CXXNewExprBitfields { friend class ASTStmtReader; friend class ASTStmtWriter; friend class CXXNewExpr; unsigned : NumExprBits; /// Was the usage ::new, i.e. is the global new to be used? unsigned IsGlobalNew : 1; /// Do we allocate an array? If so, the first trailing "Stmt *" is the /// size expression. unsigned IsArray : 1; /// Should the alignment be passed to the allocation function? unsigned ShouldPassAlignment : 1; /// If this is an array allocation, does the usual deallocation /// function for the allocated type want to know the allocated size? unsigned UsualArrayDeleteWantsSize : 1; /// What kind of initializer do we have? Could be none, parens, or braces. /// In storage, we distinguish between "none, and no initializer expr", and /// "none, but an implicit initializer expr". unsigned StoredInitializationStyle : 2; /// True if the allocated type was expressed as a parenthesized type-id. unsigned IsParenTypeId : 1; /// The number of placement new arguments. unsigned NumPlacementArgs; }; class CXXDeleteExprBitfields { friend class ASTStmtReader; friend class CXXDeleteExpr; unsigned : NumExprBits; /// Is this a forced global delete, i.e. "::delete"? unsigned GlobalDelete : 1; /// Is this the array form of delete, i.e. "delete[]"? unsigned ArrayForm : 1; /// ArrayFormAsWritten can be different from ArrayForm if 'delete' is /// applied to pointer-to-array type (ArrayFormAsWritten will be false /// while ArrayForm will be true). unsigned ArrayFormAsWritten : 1; /// Does the usual deallocation function for the element type require /// a size_t argument? unsigned UsualArrayDeleteWantsSize : 1; /// Location of the expression. SourceLocation Loc; }; class TypeTraitExprBitfields { friend class ASTStmtReader; friend class ASTStmtWriter; friend class TypeTraitExpr; unsigned : NumExprBits; /// The kind of type trait, which is a value of a TypeTrait enumerator. unsigned Kind : 8; /// If this expression is not value-dependent, this indicates whether /// the trait evaluated true or false. unsigned Value : 1; /// The number of arguments to this type trait. According to [implimits] /// 8 bits would be enough, but we require (and test for) at least 16 bits /// to mirror FunctionType. unsigned NumArgs; }; class DependentScopeDeclRefExprBitfields { friend class ASTStmtReader; friend class ASTStmtWriter; friend class DependentScopeDeclRefExpr; unsigned : NumExprBits; /// Whether the name includes info for explicit template /// keyword and arguments. unsigned HasTemplateKWAndArgsInfo : 1; }; class CXXConstructExprBitfields { friend class ASTStmtReader; friend class CXXConstructExpr; unsigned : NumExprBits; unsigned Elidable : 1; unsigned HadMultipleCandidates : 1; unsigned ListInitialization : 1; unsigned StdInitListInitialization : 1; unsigned ZeroInitialization : 1; unsigned ConstructionKind : 3; SourceLocation Loc; }; class ExprWithCleanupsBitfields { friend class ASTStmtReader; // deserialization friend class ExprWithCleanups; unsigned : NumExprBits; // When false, it must not have side effects. unsigned CleanupsHaveSideEffects : 1; unsigned NumObjects : 32 - 1 - NumExprBits; }; class CXXUnresolvedConstructExprBitfields { friend class ASTStmtReader; friend class CXXUnresolvedConstructExpr; unsigned : NumExprBits; /// The number of arguments used to construct the type. unsigned NumArgs; }; class CXXDependentScopeMemberExprBitfields { friend class ASTStmtReader; friend class CXXDependentScopeMemberExpr; unsigned : NumExprBits; /// Whether this member expression used the '->' operator or /// the '.' operator. unsigned IsArrow : 1; /// Whether this member expression has info for explicit template /// keyword and arguments. unsigned HasTemplateKWAndArgsInfo : 1; /// See getFirstQualifierFoundInScope() and the comment listing /// the trailing objects. unsigned HasFirstQualifierFoundInScope : 1; /// The location of the '->' or '.' operator. SourceLocation OperatorLoc; }; class OverloadExprBitfields { friend class ASTStmtReader; friend class OverloadExpr; unsigned : NumExprBits; /// Whether the name includes info for explicit template /// keyword and arguments. unsigned HasTemplateKWAndArgsInfo : 1; /// Padding used by the derived classes to store various bits. If you /// need to add some data here, shrink this padding and add your data /// above. NumOverloadExprBits also needs to be updated. unsigned : 32 - NumExprBits - 1; /// The number of results. unsigned NumResults; }; enum { NumOverloadExprBits = NumExprBits + 1 }; class UnresolvedLookupExprBitfields { friend class ASTStmtReader; friend class UnresolvedLookupExpr; unsigned : NumOverloadExprBits; /// True if these lookup results should be extended by /// argument-dependent lookup if this is the operand of a function call. unsigned RequiresADL : 1; /// True if these lookup results are overloaded. This is pretty trivially /// rederivable if we urgently need to kill this field. unsigned Overloaded : 1; }; static_assert(sizeof(UnresolvedLookupExprBitfields) <= 4, "UnresolvedLookupExprBitfields must be <= than 4 bytes to" "avoid trashing OverloadExprBitfields::NumResults!"); class UnresolvedMemberExprBitfields { friend class ASTStmtReader; friend class UnresolvedMemberExpr; unsigned : NumOverloadExprBits; /// Whether this member expression used the '->' operator or /// the '.' operator. unsigned IsArrow : 1; /// Whether the lookup results contain an unresolved using declaration. unsigned HasUnresolvedUsing : 1; }; static_assert(sizeof(UnresolvedMemberExprBitfields) <= 4, "UnresolvedMemberExprBitfields must be <= than 4 bytes to" "avoid trashing OverloadExprBitfields::NumResults!"); class CXXNoexceptExprBitfields { friend class ASTStmtReader; friend class CXXNoexceptExpr; unsigned : NumExprBits; unsigned Value : 1; }; class SubstNonTypeTemplateParmExprBitfields { friend class ASTStmtReader; friend class SubstNonTypeTemplateParmExpr; unsigned : NumExprBits; /// The location of the non-type template parameter reference. SourceLocation NameLoc; }; class LambdaExprBitfields { friend class ASTStmtReader; friend class ASTStmtWriter; friend class LambdaExpr; unsigned : NumExprBits; /// The default capture kind, which is a value of type /// LambdaCaptureDefault. unsigned CaptureDefault : 2; /// Whether this lambda had an explicit parameter list vs. an /// implicit (and empty) parameter list. unsigned ExplicitParams : 1; /// Whether this lambda had the result type explicitly specified. unsigned ExplicitResultType : 1; /// The number of captures. unsigned NumCaptures : 16; }; class RequiresExprBitfields { friend class ASTStmtReader; friend class ASTStmtWriter; friend class RequiresExpr; unsigned : NumExprBits; unsigned IsSatisfied : 1; SourceLocation RequiresKWLoc; }; //===--- C++ Coroutines TS bitfields classes ---===// class CoawaitExprBitfields { friend class CoawaitExpr; unsigned : NumExprBits; unsigned IsImplicit : 1; }; //===--- Obj-C Expression bitfields classes ---===// class ObjCIndirectCopyRestoreExprBitfields { friend class ObjCIndirectCopyRestoreExpr; unsigned : NumExprBits; unsigned ShouldCopy : 1; }; //===--- Clang Extensions bitfields classes ---===// class OpaqueValueExprBitfields { friend class ASTStmtReader; friend class OpaqueValueExpr; unsigned : NumExprBits; /// The OVE is a unique semantic reference to its source expression if this /// bit is set to true. unsigned IsUnique : 1; SourceLocation Loc; }; union { // Same order as in StmtNodes.td. // Statements StmtBitfields StmtBits; NullStmtBitfields NullStmtBits; CompoundStmtBitfields CompoundStmtBits; LabelStmtBitfields LabelStmtBits; AttributedStmtBitfields AttributedStmtBits; IfStmtBitfields IfStmtBits; SwitchStmtBitfields SwitchStmtBits; WhileStmtBitfields WhileStmtBits; DoStmtBitfields DoStmtBits; ForStmtBitfields ForStmtBits; GotoStmtBitfields GotoStmtBits; ContinueStmtBitfields ContinueStmtBits; BreakStmtBitfields BreakStmtBits; ReturnStmtBitfields ReturnStmtBits; SwitchCaseBitfields SwitchCaseBits; // Expressions ExprBitfields ExprBits; ConstantExprBitfields ConstantExprBits; PredefinedExprBitfields PredefinedExprBits; DeclRefExprBitfields DeclRefExprBits; FloatingLiteralBitfields FloatingLiteralBits; StringLiteralBitfields StringLiteralBits; CharacterLiteralBitfields CharacterLiteralBits; UnaryOperatorBitfields UnaryOperatorBits; UnaryExprOrTypeTraitExprBitfields UnaryExprOrTypeTraitExprBits; ArrayOrMatrixSubscriptExprBitfields ArrayOrMatrixSubscriptExprBits; CallExprBitfields CallExprBits; MemberExprBitfields MemberExprBits; CastExprBitfields CastExprBits; BinaryOperatorBitfields BinaryOperatorBits; InitListExprBitfields InitListExprBits; ParenListExprBitfields ParenListExprBits; GenericSelectionExprBitfields GenericSelectionExprBits; PseudoObjectExprBitfields PseudoObjectExprBits; SourceLocExprBitfields SourceLocExprBits; // GNU Extensions. StmtExprBitfields StmtExprBits; // C++ Expressions CXXOperatorCallExprBitfields CXXOperatorCallExprBits; CXXRewrittenBinaryOperatorBitfields CXXRewrittenBinaryOperatorBits; CXXBoolLiteralExprBitfields CXXBoolLiteralExprBits; CXXNullPtrLiteralExprBitfields CXXNullPtrLiteralExprBits; CXXThisExprBitfields CXXThisExprBits; CXXThrowExprBitfields CXXThrowExprBits; CXXDefaultArgExprBitfields CXXDefaultArgExprBits; CXXDefaultInitExprBitfields CXXDefaultInitExprBits; CXXScalarValueInitExprBitfields CXXScalarValueInitExprBits; CXXNewExprBitfields CXXNewExprBits; CXXDeleteExprBitfields CXXDeleteExprBits; TypeTraitExprBitfields TypeTraitExprBits; DependentScopeDeclRefExprBitfields DependentScopeDeclRefExprBits; CXXConstructExprBitfields CXXConstructExprBits; ExprWithCleanupsBitfields ExprWithCleanupsBits; CXXUnresolvedConstructExprBitfields CXXUnresolvedConstructExprBits; CXXDependentScopeMemberExprBitfields CXXDependentScopeMemberExprBits; OverloadExprBitfields OverloadExprBits; UnresolvedLookupExprBitfields UnresolvedLookupExprBits; UnresolvedMemberExprBitfields UnresolvedMemberExprBits; CXXNoexceptExprBitfields CXXNoexceptExprBits; SubstNonTypeTemplateParmExprBitfields SubstNonTypeTemplateParmExprBits; LambdaExprBitfields LambdaExprBits; RequiresExprBitfields RequiresExprBits; // C++ Coroutines TS expressions CoawaitExprBitfields CoawaitBits; // Obj-C Expressions ObjCIndirectCopyRestoreExprBitfields ObjCIndirectCopyRestoreExprBits; // Clang Extensions OpaqueValueExprBitfields OpaqueValueExprBits; }; public: // Only allow allocation of Stmts using the allocator in ASTContext // or by doing a placement new. void* operator new(size_t bytes, const ASTContext& C, unsigned alignment = 8); void* operator new(size_t bytes, const ASTContext* C, unsigned alignment = 8) { return operator new(bytes, *C, alignment); } void *operator new(size_t bytes, void *mem) noexcept { return mem; } void operator delete(void *, const ASTContext &, unsigned) noexcept {} void operator delete(void *, const ASTContext *, unsigned) noexcept {} void operator delete(void *, size_t) noexcept {} void operator delete(void *, void *) noexcept {} public: /// A placeholder type used to construct an empty shell of a /// type, that will be filled in later (e.g., by some /// de-serialization). struct EmptyShell {}; /// The likelihood of a branch being taken. enum Likelihood { LH_Unlikely = -1, ///< Branch has the [[unlikely]] attribute. LH_None, ///< No attribute set or branches of the IfStmt have ///< the same attribute. LH_Likely ///< Branch has the [[likely]] attribute. }; protected: /// Iterator for iterating over Stmt * arrays that contain only T *. /// /// This is needed because AST nodes use Stmt* arrays to store /// references to children (to be compatible with StmtIterator). template<typename T, typename TPtr = T *, typename StmtPtr = Stmt *> struct CastIterator : llvm::iterator_adaptor_base<CastIterator<T, TPtr, StmtPtr>, StmtPtr *, std::random_access_iterator_tag, TPtr> { using Base = typename CastIterator::iterator_adaptor_base; CastIterator() : Base(nullptr) {} CastIterator(StmtPtr *I) : Base(I) {} typename Base::value_type operator*() const { return cast_or_null<T>(*this->I); } }; /// Const iterator for iterating over Stmt * arrays that contain only T *. template <typename T> using ConstCastIterator = CastIterator<T, const T *const, const Stmt *const>; using ExprIterator = CastIterator<Expr>; using ConstExprIterator = ConstCastIterator<Expr>; private: /// Whether statistic collection is enabled. static bool StatisticsEnabled; protected: /// Construct an empty statement. explicit Stmt(StmtClass SC, EmptyShell) : Stmt(SC) {} public: Stmt() = delete; Stmt(const Stmt &) = delete; Stmt(Stmt &&) = delete; Stmt &operator=(const Stmt &) = delete; Stmt &operator=(Stmt &&) = delete; Stmt(StmtClass SC) { static_assert(sizeof(*this) <= 8, "changing bitfields changed sizeof(Stmt)"); static_assert(sizeof(*this) % alignof(void *) == 0, "Insufficient alignment!"); StmtBits.sClass = SC; if (StatisticsEnabled) Stmt::addStmtClass(SC); } StmtClass getStmtClass() const { return static_cast<StmtClass>(StmtBits.sClass); } const char *getStmtClassName() const; /// SourceLocation tokens are not useful in isolation - they are low level /// value objects created/interpreted by SourceManager. We assume AST /// clients will have a pointer to the respective SourceManager. SourceRange getSourceRange() const LLVM_READONLY; SourceLocation getBeginLoc() const LLVM_READONLY; SourceLocation getEndLoc() const LLVM_READONLY; // global temp stats (until we have a per-module visitor) static void addStmtClass(const StmtClass s); static void EnableStatistics(); static void PrintStats(); /// \returns the likelihood of a set of attributes. static Likelihood getLikelihood(ArrayRef<const Attr *> Attrs); /// \returns the likelihood of a statement. static Likelihood getLikelihood(const Stmt *S); /// \returns the likelihood attribute of a statement. static const Attr *getLikelihoodAttr(const Stmt *S); /// \returns the likelihood of the 'then' branch of an 'if' statement. The /// 'else' branch is required to determine whether both branches specify the /// same likelihood, which affects the result. static Likelihood getLikelihood(const Stmt *Then, const Stmt *Else); /// \returns whether the likelihood of the branches of an if statement are /// conflicting. When the first element is \c true there's a conflict and /// the Attr's are the conflicting attributes of the Then and Else Stmt. static std::tuple<bool, const Attr *, const Attr *> determineLikelihoodConflict(const Stmt *Then, const Stmt *Else); /// Dumps the specified AST fragment and all subtrees to /// \c llvm::errs(). void dump() const; void dump(raw_ostream &OS, const ASTContext &Context) const; /// \return Unique reproducible object identifier int64_t getID(const ASTContext &Context) const; /// dumpColor - same as dump(), but forces color highlighting. void dumpColor() const; /// dumpPretty/printPretty - These two methods do a "pretty print" of the AST /// back to its original source language syntax. void dumpPretty(const ASTContext &Context) const; void printPretty(raw_ostream &OS, PrinterHelper *Helper, const PrintingPolicy &Policy, unsigned Indentation = 0, StringRef NewlineSymbol = "\n", const ASTContext *Context = nullptr) const; void printPrettyControlled(raw_ostream &OS, PrinterHelper *Helper, const PrintingPolicy &Policy, unsigned Indentation = 0, StringRef NewlineSymbol = "\n", const ASTContext *Context = nullptr) const; /// Pretty-prints in JSON format. void printJson(raw_ostream &Out, PrinterHelper *Helper, const PrintingPolicy &Policy, bool AddQuotes) const; /// viewAST - Visualize an AST rooted at this Stmt* using GraphViz. Only /// works on systems with GraphViz (Mac OS X) or dot+gv installed. void viewAST() const; /// Skip no-op (attributed, compound) container stmts and skip captured /// stmt at the top, if \a IgnoreCaptured is true. Stmt *IgnoreContainers(bool IgnoreCaptured = false); const Stmt *IgnoreContainers(bool IgnoreCaptured = false) const { return const_cast<Stmt *>(this)->IgnoreContainers(IgnoreCaptured); } const Stmt *stripLabelLikeStatements() const; Stmt *stripLabelLikeStatements() { return const_cast<Stmt*>( const_cast<const Stmt*>(this)->stripLabelLikeStatements()); } /// Child Iterators: All subclasses must implement 'children' /// to permit easy iteration over the substatements/subexpessions of an /// AST node. This permits easy iteration over all nodes in the AST. using child_iterator = StmtIterator; using const_child_iterator = ConstStmtIterator; using child_range = llvm::iterator_range<child_iterator>; using const_child_range = llvm::iterator_range<const_child_iterator>; child_range children(); const_child_range children() const { auto Children = const_cast<Stmt *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_iterator child_begin() { return children().begin(); } child_iterator child_end() { return children().end(); } const_child_iterator child_begin() const { return children().begin(); } const_child_iterator child_end() const { return children().end(); } /// Produce a unique representation of the given statement. /// /// \param ID once the profiling operation is complete, will contain /// the unique representation of the given statement. /// /// \param Context the AST context in which the statement resides /// /// \param Canonical whether the profile should be based on the canonical /// representation of this statement (e.g., where non-type template /// parameters are identified by index/level rather than their /// declaration pointers) or the exact representation of the statement as /// written in the source. void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context, bool Canonical) const; /// Calculate a unique representation for a statement that is /// stable across compiler invocations. /// /// \param ID profile information will be stored in ID. /// /// \param Hash an ODRHash object which will be called where pointers would /// have been used in the Profile function. void ProcessODRHash(llvm::FoldingSetNodeID &ID, ODRHash& Hash) const; }; /// DeclStmt - Adaptor class for mixing declarations with statements and /// expressions. For example, CompoundStmt mixes statements, expressions /// and declarations (variables, types). Another example is ForStmt, where /// the first statement can be an expression or a declaration. class DeclStmt : public Stmt { DeclGroupRef DG; SourceLocation StartLoc, EndLoc; public: DeclStmt(DeclGroupRef dg, SourceLocation startLoc, SourceLocation endLoc) : Stmt(DeclStmtClass), DG(dg), StartLoc(startLoc), EndLoc(endLoc) {} /// Build an empty declaration statement. explicit DeclStmt(EmptyShell Empty) : Stmt(DeclStmtClass, Empty) {} /// isSingleDecl - This method returns true if this DeclStmt refers /// to a single Decl. bool isSingleDecl() const { return DG.isSingleDecl(); } const Decl *getSingleDecl() const { return DG.getSingleDecl(); } Decl *getSingleDecl() { return DG.getSingleDecl(); } const DeclGroupRef getDeclGroup() const { return DG; } DeclGroupRef getDeclGroup() { return DG; } void setDeclGroup(DeclGroupRef DGR) { DG = DGR; } void setStartLoc(SourceLocation L) { StartLoc = L; } SourceLocation getEndLoc() const { return EndLoc; } void setEndLoc(SourceLocation L) { EndLoc = L; } SourceLocation getBeginLoc() const LLVM_READONLY { return StartLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == DeclStmtClass; } // Iterators over subexpressions. child_range children() { return child_range(child_iterator(DG.begin(), DG.end()), child_iterator(DG.end(), DG.end())); } const_child_range children() const { auto Children = const_cast<DeclStmt *>(this)->children(); return const_child_range(Children); } using decl_iterator = DeclGroupRef::iterator; using const_decl_iterator = DeclGroupRef::const_iterator; using decl_range = llvm::iterator_range<decl_iterator>; using decl_const_range = llvm::iterator_range<const_decl_iterator>; decl_range decls() { return decl_range(decl_begin(), decl_end()); } decl_const_range decls() const { return decl_const_range(decl_begin(), decl_end()); } decl_iterator decl_begin() { return DG.begin(); } decl_iterator decl_end() { return DG.end(); } const_decl_iterator decl_begin() const { return DG.begin(); } const_decl_iterator decl_end() const { return DG.end(); } using reverse_decl_iterator = std::reverse_iterator<decl_iterator>; reverse_decl_iterator decl_rbegin() { return reverse_decl_iterator(decl_end()); } reverse_decl_iterator decl_rend() { return reverse_decl_iterator(decl_begin()); } }; /// NullStmt - This is the null statement ";": C99 6.8.3p3. /// class NullStmt : public Stmt { public: NullStmt(SourceLocation L, bool hasLeadingEmptyMacro = false) : Stmt(NullStmtClass) { NullStmtBits.HasLeadingEmptyMacro = hasLeadingEmptyMacro; setSemiLoc(L); } /// Build an empty null statement. explicit NullStmt(EmptyShell Empty) : Stmt(NullStmtClass, Empty) {} SourceLocation getSemiLoc() const { return NullStmtBits.SemiLoc; } void setSemiLoc(SourceLocation L) { NullStmtBits.SemiLoc = L; } bool hasLeadingEmptyMacro() const { return NullStmtBits.HasLeadingEmptyMacro; } SourceLocation getBeginLoc() const { return getSemiLoc(); } SourceLocation getEndLoc() const { return getSemiLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == NullStmtClass; } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } }; /// CompoundStmt - This represents a group of statements like { stmt stmt }. class CompoundStmt final : public Stmt, private llvm::TrailingObjects<CompoundStmt, Stmt *> { friend class ASTStmtReader; friend TrailingObjects; /// The location of the closing "}". LBraceLoc is stored in CompoundStmtBits. SourceLocation RBraceLoc; CompoundStmt(ArrayRef<Stmt *> Stmts, SourceLocation LB, SourceLocation RB); explicit CompoundStmt(EmptyShell Empty) : Stmt(CompoundStmtClass, Empty) {} void setStmts(ArrayRef<Stmt *> Stmts); public: static CompoundStmt *Create(const ASTContext &C, ArrayRef<Stmt *> Stmts, SourceLocation LB, SourceLocation RB); // Build an empty compound statement with a location. explicit CompoundStmt(SourceLocation Loc) : Stmt(CompoundStmtClass), RBraceLoc(Loc) { CompoundStmtBits.NumStmts = 0; CompoundStmtBits.LBraceLoc = Loc; } // Build an empty compound statement. static CompoundStmt *CreateEmpty(const ASTContext &C, unsigned NumStmts); bool body_empty() const { return CompoundStmtBits.NumStmts == 0; } unsigned size() const { return CompoundStmtBits.NumStmts; } using body_iterator = Stmt **; using body_range = llvm::iterator_range<body_iterator>; body_range body() { return body_range(body_begin(), body_end()); } body_iterator body_begin() { return getTrailingObjects<Stmt *>(); } body_iterator body_end() { return body_begin() + size(); } Stmt *body_front() { return !body_empty() ? body_begin()[0] : nullptr; } Stmt *body_back() { return !body_empty() ? body_begin()[size() - 1] : nullptr; } using const_body_iterator = Stmt *const *; using body_const_range = llvm::iterator_range<const_body_iterator>; body_const_range body() const { return body_const_range(body_begin(), body_end()); } const_body_iterator body_begin() const { return getTrailingObjects<Stmt *>(); } const_body_iterator body_end() const { return body_begin() + size(); } const Stmt *body_front() const { return !body_empty() ? body_begin()[0] : nullptr; } const Stmt *body_back() const { return !body_empty() ? body_begin()[size() - 1] : nullptr; } using reverse_body_iterator = std::reverse_iterator<body_iterator>; reverse_body_iterator body_rbegin() { return reverse_body_iterator(body_end()); } reverse_body_iterator body_rend() { return reverse_body_iterator(body_begin()); } using const_reverse_body_iterator = std::reverse_iterator<const_body_iterator>; const_reverse_body_iterator body_rbegin() const { return const_reverse_body_iterator(body_end()); } const_reverse_body_iterator body_rend() const { return const_reverse_body_iterator(body_begin()); } // Get the Stmt that StmtExpr would consider to be the result of this // compound statement. This is used by StmtExpr to properly emulate the GCC // compound expression extension, which ignores trailing NullStmts when // getting the result of the expression. // i.e. ({ 5;;; }) // ^^ ignored // If we don't find something that isn't a NullStmt, just return the last // Stmt. Stmt *getStmtExprResult() { for (auto *B : llvm::reverse(body())) { if (!isa<NullStmt>(B)) return B; } return body_back(); } const Stmt *getStmtExprResult() const { return const_cast<CompoundStmt *>(this)->getStmtExprResult(); } SourceLocation getBeginLoc() const { return CompoundStmtBits.LBraceLoc; } SourceLocation getEndLoc() const { return RBraceLoc; } SourceLocation getLBracLoc() const { return CompoundStmtBits.LBraceLoc; } SourceLocation getRBracLoc() const { return RBraceLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == CompoundStmtClass; } // Iterators child_range children() { return child_range(body_begin(), body_end()); } const_child_range children() const { return const_child_range(body_begin(), body_end()); } }; // SwitchCase is the base class for CaseStmt and DefaultStmt, class SwitchCase : public Stmt { protected: /// The location of the ":". SourceLocation ColonLoc; // The location of the "case" or "default" keyword. Stored in SwitchCaseBits. // SourceLocation KeywordLoc; /// A pointer to the following CaseStmt or DefaultStmt class, /// used by SwitchStmt. SwitchCase *NextSwitchCase = nullptr; SwitchCase(StmtClass SC, SourceLocation KWLoc, SourceLocation ColonLoc) : Stmt(SC), ColonLoc(ColonLoc) { setKeywordLoc(KWLoc); } SwitchCase(StmtClass SC, EmptyShell) : Stmt(SC) {} public: const SwitchCase *getNextSwitchCase() const { return NextSwitchCase; } SwitchCase *getNextSwitchCase() { return NextSwitchCase; } void setNextSwitchCase(SwitchCase *SC) { NextSwitchCase = SC; } SourceLocation getKeywordLoc() const { return SwitchCaseBits.KeywordLoc; } void setKeywordLoc(SourceLocation L) { SwitchCaseBits.KeywordLoc = L; } SourceLocation getColonLoc() const { return ColonLoc; } void setColonLoc(SourceLocation L) { ColonLoc = L; } inline Stmt *getSubStmt(); const Stmt *getSubStmt() const { return const_cast<SwitchCase *>(this)->getSubStmt(); } SourceLocation getBeginLoc() const { return getKeywordLoc(); } inline SourceLocation getEndLoc() const LLVM_READONLY; static bool classof(const Stmt *T) { return T->getStmtClass() == CaseStmtClass || T->getStmtClass() == DefaultStmtClass; } }; /// CaseStmt - Represent a case statement. It can optionally be a GNU case /// statement of the form LHS ... RHS representing a range of cases. class CaseStmt final : public SwitchCase, private llvm::TrailingObjects<CaseStmt, Stmt *, SourceLocation> { friend TrailingObjects; // CaseStmt is followed by several trailing objects, some of which optional. // Note that it would be more convenient to put the optional trailing objects // at the end but this would impact children(). // The trailing objects are in order: // // * A "Stmt *" for the LHS of the case statement. Always present. // // * A "Stmt *" for the RHS of the case statement. This is a GNU extension // which allow ranges in cases statement of the form LHS ... RHS. // Present if and only if caseStmtIsGNURange() is true. // // * A "Stmt *" for the substatement of the case statement. Always present. // // * A SourceLocation for the location of the ... if this is a case statement // with a range. Present if and only if caseStmtIsGNURange() is true. enum { LhsOffset = 0, SubStmtOffsetFromRhs = 1 }; enum { NumMandatoryStmtPtr = 2 }; unsigned numTrailingObjects(OverloadToken<Stmt *>) const { return NumMandatoryStmtPtr + caseStmtIsGNURange(); } unsigned numTrailingObjects(OverloadToken<SourceLocation>) const { return caseStmtIsGNURange(); } unsigned lhsOffset() const { return LhsOffset; } unsigned rhsOffset() const { return LhsOffset + caseStmtIsGNURange(); } unsigned subStmtOffset() const { return rhsOffset() + SubStmtOffsetFromRhs; } /// Build a case statement assuming that the storage for the /// trailing objects has been properly allocated. CaseStmt(Expr *lhs, Expr *rhs, SourceLocation caseLoc, SourceLocation ellipsisLoc, SourceLocation colonLoc) : SwitchCase(CaseStmtClass, caseLoc, colonLoc) { // Handle GNU case statements of the form LHS ... RHS. bool IsGNURange = rhs != nullptr; SwitchCaseBits.CaseStmtIsGNURange = IsGNURange; setLHS(lhs); setSubStmt(nullptr); if (IsGNURange) { setRHS(rhs); setEllipsisLoc(ellipsisLoc); } } /// Build an empty switch case statement. explicit CaseStmt(EmptyShell Empty, bool CaseStmtIsGNURange) : SwitchCase(CaseStmtClass, Empty) { SwitchCaseBits.CaseStmtIsGNURange = CaseStmtIsGNURange; } public: /// Build a case statement. static CaseStmt *Create(const ASTContext &Ctx, Expr *lhs, Expr *rhs, SourceLocation caseLoc, SourceLocation ellipsisLoc, SourceLocation colonLoc); /// Build an empty case statement. static CaseStmt *CreateEmpty(const ASTContext &Ctx, bool CaseStmtIsGNURange); /// True if this case statement is of the form case LHS ... RHS, which /// is a GNU extension. In this case the RHS can be obtained with getRHS() /// and the location of the ellipsis can be obtained with getEllipsisLoc(). bool caseStmtIsGNURange() const { return SwitchCaseBits.CaseStmtIsGNURange; } SourceLocation getCaseLoc() const { return getKeywordLoc(); } void setCaseLoc(SourceLocation L) { setKeywordLoc(L); } /// Get the location of the ... in a case statement of the form LHS ... RHS. SourceLocation getEllipsisLoc() const { return caseStmtIsGNURange() ? *getTrailingObjects<SourceLocation>() : SourceLocation(); } /// Set the location of the ... in a case statement of the form LHS ... RHS. /// Assert that this case statement is of this form. void setEllipsisLoc(SourceLocation L) { assert( caseStmtIsGNURange() && "setEllipsisLoc but this is not a case stmt of the form LHS ... RHS!"); *getTrailingObjects<SourceLocation>() = L; } Expr *getLHS() { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[lhsOffset()]); } const Expr *getLHS() const { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[lhsOffset()]); } void setLHS(Expr *Val) { getTrailingObjects<Stmt *>()[lhsOffset()] = reinterpret_cast<Stmt *>(Val); } Expr *getRHS() { return caseStmtIsGNURange() ? reinterpret_cast<Expr *>( getTrailingObjects<Stmt *>()[rhsOffset()]) : nullptr; } const Expr *getRHS() const { return caseStmtIsGNURange() ? reinterpret_cast<Expr *>( getTrailingObjects<Stmt *>()[rhsOffset()]) : nullptr; } void setRHS(Expr *Val) { assert(caseStmtIsGNURange() && "setRHS but this is not a case stmt of the form LHS ... RHS!"); getTrailingObjects<Stmt *>()[rhsOffset()] = reinterpret_cast<Stmt *>(Val); } Stmt *getSubStmt() { return getTrailingObjects<Stmt *>()[subStmtOffset()]; } const Stmt *getSubStmt() const { return getTrailingObjects<Stmt *>()[subStmtOffset()]; } void setSubStmt(Stmt *S) { getTrailingObjects<Stmt *>()[subStmtOffset()] = S; } SourceLocation getBeginLoc() const { return getKeywordLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { // Handle deeply nested case statements with iteration instead of recursion. const CaseStmt *CS = this; while (const auto *CS2 = dyn_cast<CaseStmt>(CS->getSubStmt())) CS = CS2; return CS->getSubStmt()->getEndLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == CaseStmtClass; } // Iterators child_range children() { return child_range(getTrailingObjects<Stmt *>(), getTrailingObjects<Stmt *>() + numTrailingObjects(OverloadToken<Stmt *>())); } const_child_range children() const { return const_child_range(getTrailingObjects<Stmt *>(), getTrailingObjects<Stmt *>() + numTrailingObjects(OverloadToken<Stmt *>())); } }; class DefaultStmt : public SwitchCase { Stmt *SubStmt; public: DefaultStmt(SourceLocation DL, SourceLocation CL, Stmt *substmt) : SwitchCase(DefaultStmtClass, DL, CL), SubStmt(substmt) {} /// Build an empty default statement. explicit DefaultStmt(EmptyShell Empty) : SwitchCase(DefaultStmtClass, Empty) {} Stmt *getSubStmt() { return SubStmt; } const Stmt *getSubStmt() const { return SubStmt; } void setSubStmt(Stmt *S) { SubStmt = S; } SourceLocation getDefaultLoc() const { return getKeywordLoc(); } void setDefaultLoc(SourceLocation L) { setKeywordLoc(L); } SourceLocation getBeginLoc() const { return getKeywordLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return SubStmt->getEndLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == DefaultStmtClass; } // Iterators child_range children() { return child_range(&SubStmt, &SubStmt + 1); } const_child_range children() const { return const_child_range(&SubStmt, &SubStmt + 1); } }; SourceLocation SwitchCase::getEndLoc() const { if (const auto *CS = dyn_cast<CaseStmt>(this)) return CS->getEndLoc(); else if (const auto *DS = dyn_cast<DefaultStmt>(this)) return DS->getEndLoc(); llvm_unreachable("SwitchCase is neither a CaseStmt nor a DefaultStmt!"); } Stmt *SwitchCase::getSubStmt() { if (auto *CS = dyn_cast<CaseStmt>(this)) return CS->getSubStmt(); else if (auto *DS = dyn_cast<DefaultStmt>(this)) return DS->getSubStmt(); llvm_unreachable("SwitchCase is neither a CaseStmt nor a DefaultStmt!"); } /// Represents a statement that could possibly have a value and type. This /// covers expression-statements, as well as labels and attributed statements. /// /// Value statements have a special meaning when they are the last non-null /// statement in a GNU statement expression, where they determine the value /// of the statement expression. class ValueStmt : public Stmt { protected: using Stmt::Stmt; public: const Expr *getExprStmt() const; Expr *getExprStmt() { const ValueStmt *ConstThis = this; return const_cast<Expr*>(ConstThis->getExprStmt()); } static bool classof(const Stmt *T) { return T->getStmtClass() >= firstValueStmtConstant && T->getStmtClass() <= lastValueStmtConstant; } }; /// LabelStmt - Represents a label, which has a substatement. For example: /// foo: return; class LabelStmt : public ValueStmt { LabelDecl *TheDecl; Stmt *SubStmt; bool SideEntry = false; public: /// Build a label statement. LabelStmt(SourceLocation IL, LabelDecl *D, Stmt *substmt) : ValueStmt(LabelStmtClass), TheDecl(D), SubStmt(substmt) { setIdentLoc(IL); } /// Build an empty label statement. explicit LabelStmt(EmptyShell Empty) : ValueStmt(LabelStmtClass, Empty) {} SourceLocation getIdentLoc() const { return LabelStmtBits.IdentLoc; } void setIdentLoc(SourceLocation L) { LabelStmtBits.IdentLoc = L; } LabelDecl *getDecl() const { return TheDecl; } void setDecl(LabelDecl *D) { TheDecl = D; } const char *getName() const; Stmt *getSubStmt() { return SubStmt; } const Stmt *getSubStmt() const { return SubStmt; } void setSubStmt(Stmt *SS) { SubStmt = SS; } SourceLocation getBeginLoc() const { return getIdentLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return SubStmt->getEndLoc();} child_range children() { return child_range(&SubStmt, &SubStmt + 1); } const_child_range children() const { return const_child_range(&SubStmt, &SubStmt + 1); } static bool classof(const Stmt *T) { return T->getStmtClass() == LabelStmtClass; } bool isSideEntry() const { return SideEntry; } void setSideEntry(bool SE) { SideEntry = SE; } }; /// Represents an attribute applied to a statement. /// /// Represents an attribute applied to a statement. For example: /// [[omp::for(...)]] for (...) { ... } class AttributedStmt final : public ValueStmt, private llvm::TrailingObjects<AttributedStmt, const Attr *> { friend class ASTStmtReader; friend TrailingObjects; Stmt *SubStmt; AttributedStmt(SourceLocation Loc, ArrayRef<const Attr *> Attrs, Stmt *SubStmt) : ValueStmt(AttributedStmtClass), SubStmt(SubStmt) { AttributedStmtBits.NumAttrs = Attrs.size(); AttributedStmtBits.AttrLoc = Loc; std::copy(Attrs.begin(), Attrs.end(), getAttrArrayPtr()); } explicit AttributedStmt(EmptyShell Empty, unsigned NumAttrs) : ValueStmt(AttributedStmtClass, Empty) { AttributedStmtBits.NumAttrs = NumAttrs; AttributedStmtBits.AttrLoc = SourceLocation{}; std::fill_n(getAttrArrayPtr(), NumAttrs, nullptr); } const Attr *const *getAttrArrayPtr() const { return getTrailingObjects<const Attr *>(); } const Attr **getAttrArrayPtr() { return getTrailingObjects<const Attr *>(); } public: static AttributedStmt *Create(const ASTContext &C, SourceLocation Loc, ArrayRef<const Attr *> Attrs, Stmt *SubStmt); // Build an empty attributed statement. static AttributedStmt *CreateEmpty(const ASTContext &C, unsigned NumAttrs); SourceLocation getAttrLoc() const { return AttributedStmtBits.AttrLoc; } ArrayRef<const Attr *> getAttrs() const { return llvm::makeArrayRef(getAttrArrayPtr(), AttributedStmtBits.NumAttrs); } Stmt *getSubStmt() { return SubStmt; } const Stmt *getSubStmt() const { return SubStmt; } SourceLocation getBeginLoc() const { return getAttrLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return SubStmt->getEndLoc();} child_range children() { return child_range(&SubStmt, &SubStmt + 1); } const_child_range children() const { return const_child_range(&SubStmt, &SubStmt + 1); } static bool classof(const Stmt *T) { return T->getStmtClass() == AttributedStmtClass; } }; /// IfStmt - This represents an if/then/else. class IfStmt final : public Stmt, private llvm::TrailingObjects<IfStmt, Stmt *, SourceLocation> { friend TrailingObjects; // IfStmt is followed by several trailing objects, some of which optional. // Note that it would be more convenient to put the optional trailing // objects at then end but this would change the order of the children. // The trailing objects are in order: // // * A "Stmt *" for the init statement. // Present if and only if hasInitStorage(). // // * A "Stmt *" for the condition variable. // Present if and only if hasVarStorage(). This is in fact a "DeclStmt *". // // * A "Stmt *" for the condition. // Always present. This is in fact a "Expr *". // // * A "Stmt *" for the then statement. // Always present. // // * A "Stmt *" for the else statement. // Present if and only if hasElseStorage(). // // * A "SourceLocation" for the location of the "else". // Present if and only if hasElseStorage(). enum { InitOffset = 0, ThenOffsetFromCond = 1, ElseOffsetFromCond = 2 }; enum { NumMandatoryStmtPtr = 2 }; SourceLocation LParenLoc; SourceLocation RParenLoc; unsigned numTrailingObjects(OverloadToken<Stmt *>) const { return NumMandatoryStmtPtr + hasElseStorage() + hasVarStorage() + hasInitStorage(); } unsigned numTrailingObjects(OverloadToken<SourceLocation>) const { return hasElseStorage(); } unsigned initOffset() const { return InitOffset; } unsigned varOffset() const { return InitOffset + hasInitStorage(); } unsigned condOffset() const { return InitOffset + hasInitStorage() + hasVarStorage(); } unsigned thenOffset() const { return condOffset() + ThenOffsetFromCond; } unsigned elseOffset() const { return condOffset() + ElseOffsetFromCond; } /// Build an if/then/else statement. IfStmt(const ASTContext &Ctx, SourceLocation IL, IfStatementKind Kind, Stmt *Init, VarDecl *Var, Expr *Cond, SourceLocation LParenLoc, SourceLocation RParenLoc, Stmt *Then, SourceLocation EL, Stmt *Else); /// Build an empty if/then/else statement. explicit IfStmt(EmptyShell Empty, bool HasElse, bool HasVar, bool HasInit); public: /// Create an IfStmt. static IfStmt *Create(const ASTContext &Ctx, SourceLocation IL, IfStatementKind Kind, Stmt *Init, VarDecl *Var, Expr *Cond, SourceLocation LPL, SourceLocation RPL, Stmt *Then, SourceLocation EL = SourceLocation(), Stmt *Else = nullptr); /// Create an empty IfStmt optionally with storage for an else statement, /// condition variable and init expression. static IfStmt *CreateEmpty(const ASTContext &Ctx, bool HasElse, bool HasVar, bool HasInit); /// True if this IfStmt has the storage for an init statement. bool hasInitStorage() const { return IfStmtBits.HasInit; } /// True if this IfStmt has storage for a variable declaration. bool hasVarStorage() const { return IfStmtBits.HasVar; } /// True if this IfStmt has storage for an else statement. bool hasElseStorage() const { return IfStmtBits.HasElse; } Expr *getCond() { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]); } const Expr *getCond() const { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]); } void setCond(Expr *Cond) { getTrailingObjects<Stmt *>()[condOffset()] = reinterpret_cast<Stmt *>(Cond); } Stmt *getThen() { return getTrailingObjects<Stmt *>()[thenOffset()]; } const Stmt *getThen() const { return getTrailingObjects<Stmt *>()[thenOffset()]; } void setThen(Stmt *Then) { getTrailingObjects<Stmt *>()[thenOffset()] = Then; } Stmt *getElse() { return hasElseStorage() ? getTrailingObjects<Stmt *>()[elseOffset()] : nullptr; } const Stmt *getElse() const { return hasElseStorage() ? getTrailingObjects<Stmt *>()[elseOffset()] : nullptr; } void setElse(Stmt *Else) { assert(hasElseStorage() && "This if statement has no storage for an else statement!"); getTrailingObjects<Stmt *>()[elseOffset()] = Else; } /// Retrieve the variable declared in this "if" statement, if any. /// /// In the following example, "x" is the condition variable. /// \code /// if (int x = foo()) { /// printf("x is %d", x); /// } /// \endcode VarDecl *getConditionVariable(); const VarDecl *getConditionVariable() const { return const_cast<IfStmt *>(this)->getConditionVariable(); } /// Set the condition variable for this if statement. /// The if statement must have storage for the condition variable. void setConditionVariable(const ASTContext &Ctx, VarDecl *V); /// If this IfStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. DeclStmt *getConditionVariableDeclStmt() { return hasVarStorage() ? static_cast<DeclStmt *>( getTrailingObjects<Stmt *>()[varOffset()]) : nullptr; } const DeclStmt *getConditionVariableDeclStmt() const { return hasVarStorage() ? static_cast<DeclStmt *>( getTrailingObjects<Stmt *>()[varOffset()]) : nullptr; } Stmt *getInit() { return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()] : nullptr; } const Stmt *getInit() const { return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()] : nullptr; } void setInit(Stmt *Init) { assert(hasInitStorage() && "This if statement has no storage for an init statement!"); getTrailingObjects<Stmt *>()[initOffset()] = Init; } SourceLocation getIfLoc() const { return IfStmtBits.IfLoc; } void setIfLoc(SourceLocation IfLoc) { IfStmtBits.IfLoc = IfLoc; } SourceLocation getElseLoc() const { return hasElseStorage() ? *getTrailingObjects<SourceLocation>() : SourceLocation(); } void setElseLoc(SourceLocation ElseLoc) { assert(hasElseStorage() && "This if statement has no storage for an else statement!"); *getTrailingObjects<SourceLocation>() = ElseLoc; } bool isConsteval() const { return getStatementKind() == IfStatementKind::ConstevalNonNegated || getStatementKind() == IfStatementKind::ConstevalNegated; } bool isNonNegatedConsteval() const { return getStatementKind() == IfStatementKind::ConstevalNonNegated; } bool isNegatedConsteval() const { return getStatementKind() == IfStatementKind::ConstevalNegated; } bool isConstexpr() const { return getStatementKind() == IfStatementKind::Constexpr; } void setStatementKind(IfStatementKind Kind) { IfStmtBits.Kind = static_cast<unsigned>(Kind); } IfStatementKind getStatementKind() const { return static_cast<IfStatementKind>(IfStmtBits.Kind); } /// If this is an 'if constexpr', determine which substatement will be taken. /// Otherwise, or if the condition is value-dependent, returns None. Optional<const Stmt*> getNondiscardedCase(const ASTContext &Ctx) const; Optional<Stmt *> getNondiscardedCase(const ASTContext &Ctx); bool isObjCAvailabilityCheck() const; SourceLocation getBeginLoc() const { return getIfLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { if (getElse()) return getElse()->getEndLoc(); return getThen()->getEndLoc(); } SourceLocation getLParenLoc() const { return LParenLoc; } void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } SourceLocation getRParenLoc() const { return RParenLoc; } void setRParenLoc(SourceLocation Loc) { RParenLoc = Loc; } // Iterators over subexpressions. The iterators will include iterating // over the initialization expression referenced by the condition variable. child_range children() { // We always store a condition, but there is none for consteval if // statements, so skip it. return child_range(getTrailingObjects<Stmt *>() + (isConsteval() ? thenOffset() : 0), getTrailingObjects<Stmt *>() + numTrailingObjects(OverloadToken<Stmt *>())); } const_child_range children() const { // We always store a condition, but there is none for consteval if // statements, so skip it. return const_child_range(getTrailingObjects<Stmt *>() + (isConsteval() ? thenOffset() : 0), getTrailingObjects<Stmt *>() + numTrailingObjects(OverloadToken<Stmt *>())); } static bool classof(const Stmt *T) { return T->getStmtClass() == IfStmtClass; } }; /// SwitchStmt - This represents a 'switch' stmt. class SwitchStmt final : public Stmt, private llvm::TrailingObjects<SwitchStmt, Stmt *> { friend TrailingObjects; /// Points to a linked list of case and default statements. SwitchCase *FirstCase = nullptr; // SwitchStmt is followed by several trailing objects, // some of which optional. Note that it would be more convenient to // put the optional trailing objects at the end but this would change // the order in children(). // The trailing objects are in order: // // * A "Stmt *" for the init statement. // Present if and only if hasInitStorage(). // // * A "Stmt *" for the condition variable. // Present if and only if hasVarStorage(). This is in fact a "DeclStmt *". // // * A "Stmt *" for the condition. // Always present. This is in fact an "Expr *". // // * A "Stmt *" for the body. // Always present. enum { InitOffset = 0, BodyOffsetFromCond = 1 }; enum { NumMandatoryStmtPtr = 2 }; SourceLocation LParenLoc; SourceLocation RParenLoc; unsigned numTrailingObjects(OverloadToken<Stmt *>) const { return NumMandatoryStmtPtr + hasInitStorage() + hasVarStorage(); } unsigned initOffset() const { return InitOffset; } unsigned varOffset() const { return InitOffset + hasInitStorage(); } unsigned condOffset() const { return InitOffset + hasInitStorage() + hasVarStorage(); } unsigned bodyOffset() const { return condOffset() + BodyOffsetFromCond; } /// Build a switch statement. SwitchStmt(const ASTContext &Ctx, Stmt *Init, VarDecl *Var, Expr *Cond, SourceLocation LParenLoc, SourceLocation RParenLoc); /// Build a empty switch statement. explicit SwitchStmt(EmptyShell Empty, bool HasInit, bool HasVar); public: /// Create a switch statement. static SwitchStmt *Create(const ASTContext &Ctx, Stmt *Init, VarDecl *Var, Expr *Cond, SourceLocation LParenLoc, SourceLocation RParenLoc); /// Create an empty switch statement optionally with storage for /// an init expression and a condition variable. static SwitchStmt *CreateEmpty(const ASTContext &Ctx, bool HasInit, bool HasVar); /// True if this SwitchStmt has storage for an init statement. bool hasInitStorage() const { return SwitchStmtBits.HasInit; } /// True if this SwitchStmt has storage for a condition variable. bool hasVarStorage() const { return SwitchStmtBits.HasVar; } Expr *getCond() { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]); } const Expr *getCond() const { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]); } void setCond(Expr *Cond) { getTrailingObjects<Stmt *>()[condOffset()] = reinterpret_cast<Stmt *>(Cond); } Stmt *getBody() { return getTrailingObjects<Stmt *>()[bodyOffset()]; } const Stmt *getBody() const { return getTrailingObjects<Stmt *>()[bodyOffset()]; } void setBody(Stmt *Body) { getTrailingObjects<Stmt *>()[bodyOffset()] = Body; } Stmt *getInit() { return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()] : nullptr; } const Stmt *getInit() const { return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()] : nullptr; } void setInit(Stmt *Init) { assert(hasInitStorage() && "This switch statement has no storage for an init statement!"); getTrailingObjects<Stmt *>()[initOffset()] = Init; } /// Retrieve the variable declared in this "switch" statement, if any. /// /// In the following example, "x" is the condition variable. /// \code /// switch (int x = foo()) { /// case 0: break; /// // ... /// } /// \endcode VarDecl *getConditionVariable(); const VarDecl *getConditionVariable() const { return const_cast<SwitchStmt *>(this)->getConditionVariable(); } /// Set the condition variable in this switch statement. /// The switch statement must have storage for it. void setConditionVariable(const ASTContext &Ctx, VarDecl *VD); /// If this SwitchStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. DeclStmt *getConditionVariableDeclStmt() { return hasVarStorage() ? static_cast<DeclStmt *>( getTrailingObjects<Stmt *>()[varOffset()]) : nullptr; } const DeclStmt *getConditionVariableDeclStmt() const { return hasVarStorage() ? static_cast<DeclStmt *>( getTrailingObjects<Stmt *>()[varOffset()]) : nullptr; } SwitchCase *getSwitchCaseList() { return FirstCase; } const SwitchCase *getSwitchCaseList() const { return FirstCase; } void setSwitchCaseList(SwitchCase *SC) { FirstCase = SC; } SourceLocation getSwitchLoc() const { return SwitchStmtBits.SwitchLoc; } void setSwitchLoc(SourceLocation L) { SwitchStmtBits.SwitchLoc = L; } SourceLocation getLParenLoc() const { return LParenLoc; } void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } SourceLocation getRParenLoc() const { return RParenLoc; } void setRParenLoc(SourceLocation Loc) { RParenLoc = Loc; } void setBody(Stmt *S, SourceLocation SL) { setBody(S); setSwitchLoc(SL); } void addSwitchCase(SwitchCase *SC) { assert(!SC->getNextSwitchCase() && "case/default already added to a switch"); SC->setNextSwitchCase(FirstCase); FirstCase = SC; } /// Set a flag in the SwitchStmt indicating that if the 'switch (X)' is a /// switch over an enum value then all cases have been explicitly covered. void setAllEnumCasesCovered() { SwitchStmtBits.AllEnumCasesCovered = true; } /// Returns true if the SwitchStmt is a switch of an enum value and all cases /// have been explicitly covered. bool isAllEnumCasesCovered() const { return SwitchStmtBits.AllEnumCasesCovered; } SourceLocation getBeginLoc() const { return getSwitchLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return getBody() ? getBody()->getEndLoc() : reinterpret_cast<const Stmt *>(getCond())->getEndLoc(); } // Iterators child_range children() { return child_range(getTrailingObjects<Stmt *>(), getTrailingObjects<Stmt *>() + numTrailingObjects(OverloadToken<Stmt *>())); } const_child_range children() const { return const_child_range(getTrailingObjects<Stmt *>(), getTrailingObjects<Stmt *>() + numTrailingObjects(OverloadToken<Stmt *>())); } static bool classof(const Stmt *T) { return T->getStmtClass() == SwitchStmtClass; } }; /// WhileStmt - This represents a 'while' stmt. class WhileStmt final : public Stmt, private llvm::TrailingObjects<WhileStmt, Stmt *> { friend TrailingObjects; // WhileStmt is followed by several trailing objects, // some of which optional. Note that it would be more // convenient to put the optional trailing object at the end // but this would affect children(). // The trailing objects are in order: // // * A "Stmt *" for the condition variable. // Present if and only if hasVarStorage(). This is in fact a "DeclStmt *". // // * A "Stmt *" for the condition. // Always present. This is in fact an "Expr *". // // * A "Stmt *" for the body. // Always present. // enum { VarOffset = 0, BodyOffsetFromCond = 1 }; enum { NumMandatoryStmtPtr = 2 }; SourceLocation LParenLoc, RParenLoc; unsigned varOffset() const { return VarOffset; } unsigned condOffset() const { return VarOffset + hasVarStorage(); } unsigned bodyOffset() const { return condOffset() + BodyOffsetFromCond; } unsigned numTrailingObjects(OverloadToken<Stmt *>) const { return NumMandatoryStmtPtr + hasVarStorage(); } /// Build a while statement. WhileStmt(const ASTContext &Ctx, VarDecl *Var, Expr *Cond, Stmt *Body, SourceLocation WL, SourceLocation LParenLoc, SourceLocation RParenLoc); /// Build an empty while statement. explicit WhileStmt(EmptyShell Empty, bool HasVar); public: /// Create a while statement. static WhileStmt *Create(const ASTContext &Ctx, VarDecl *Var, Expr *Cond, Stmt *Body, SourceLocation WL, SourceLocation LParenLoc, SourceLocation RParenLoc); /// Create an empty while statement optionally with storage for /// a condition variable. static WhileStmt *CreateEmpty(const ASTContext &Ctx, bool HasVar); /// True if this WhileStmt has storage for a condition variable. bool hasVarStorage() const { return WhileStmtBits.HasVar; } Expr *getCond() { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]); } const Expr *getCond() const { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]); } void setCond(Expr *Cond) { getTrailingObjects<Stmt *>()[condOffset()] = reinterpret_cast<Stmt *>(Cond); } Stmt *getBody() { return getTrailingObjects<Stmt *>()[bodyOffset()]; } const Stmt *getBody() const { return getTrailingObjects<Stmt *>()[bodyOffset()]; } void setBody(Stmt *Body) { getTrailingObjects<Stmt *>()[bodyOffset()] = Body; } /// Retrieve the variable declared in this "while" statement, if any. /// /// In the following example, "x" is the condition variable. /// \code /// while (int x = random()) { /// // ... /// } /// \endcode VarDecl *getConditionVariable(); const VarDecl *getConditionVariable() const { return const_cast<WhileStmt *>(this)->getConditionVariable(); } /// Set the condition variable of this while statement. /// The while statement must have storage for it. void setConditionVariable(const ASTContext &Ctx, VarDecl *V); /// If this WhileStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. DeclStmt *getConditionVariableDeclStmt() { return hasVarStorage() ? static_cast<DeclStmt *>( getTrailingObjects<Stmt *>()[varOffset()]) : nullptr; } const DeclStmt *getConditionVariableDeclStmt() const { return hasVarStorage() ? static_cast<DeclStmt *>( getTrailingObjects<Stmt *>()[varOffset()]) : nullptr; } SourceLocation getWhileLoc() const { return WhileStmtBits.WhileLoc; } void setWhileLoc(SourceLocation L) { WhileStmtBits.WhileLoc = L; } SourceLocation getLParenLoc() const { return LParenLoc; } void setLParenLoc(SourceLocation L) { LParenLoc = L; } SourceLocation getRParenLoc() const { return RParenLoc; } void setRParenLoc(SourceLocation L) { RParenLoc = L; } SourceLocation getBeginLoc() const { return getWhileLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return getBody()->getEndLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == WhileStmtClass; } // Iterators child_range children() { return child_range(getTrailingObjects<Stmt *>(), getTrailingObjects<Stmt *>() + numTrailingObjects(OverloadToken<Stmt *>())); } const_child_range children() const { return const_child_range(getTrailingObjects<Stmt *>(), getTrailingObjects<Stmt *>() + numTrailingObjects(OverloadToken<Stmt *>())); } }; /// DoStmt - This represents a 'do/while' stmt. class DoStmt : public Stmt { enum { BODY, COND, END_EXPR }; Stmt *SubExprs[END_EXPR]; SourceLocation WhileLoc; SourceLocation RParenLoc; // Location of final ')' in do stmt condition. public: DoStmt(Stmt *Body, Expr *Cond, SourceLocation DL, SourceLocation WL, SourceLocation RP) : Stmt(DoStmtClass), WhileLoc(WL), RParenLoc(RP) { setCond(Cond); setBody(Body); setDoLoc(DL); } /// Build an empty do-while statement. explicit DoStmt(EmptyShell Empty) : Stmt(DoStmtClass, Empty) {} Expr *getCond() { return reinterpret_cast<Expr *>(SubExprs[COND]); } const Expr *getCond() const { return reinterpret_cast<Expr *>(SubExprs[COND]); } void setCond(Expr *Cond) { SubExprs[COND] = reinterpret_cast<Stmt *>(Cond); } Stmt *getBody() { return SubExprs[BODY]; } const Stmt *getBody() const { return SubExprs[BODY]; } void setBody(Stmt *Body) { SubExprs[BODY] = Body; } SourceLocation getDoLoc() const { return DoStmtBits.DoLoc; } void setDoLoc(SourceLocation L) { DoStmtBits.DoLoc = L; } SourceLocation getWhileLoc() const { return WhileLoc; } void setWhileLoc(SourceLocation L) { WhileLoc = L; } SourceLocation getRParenLoc() const { return RParenLoc; } void setRParenLoc(SourceLocation L) { RParenLoc = L; } SourceLocation getBeginLoc() const { return getDoLoc(); } SourceLocation getEndLoc() const { return getRParenLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == DoStmtClass; } // Iterators child_range children() { return child_range(&SubExprs[0], &SubExprs[0] + END_EXPR); } const_child_range children() const { return const_child_range(&SubExprs[0], &SubExprs[0] + END_EXPR); } }; /// ForStmt - This represents a 'for (init;cond;inc)' stmt. Note that any of /// the init/cond/inc parts of the ForStmt will be null if they were not /// specified in the source. class ForStmt : public Stmt { enum { INIT, CONDVAR, COND, INC, BODY, END_EXPR }; Stmt* SubExprs[END_EXPR]; // SubExprs[INIT] is an expression or declstmt. SourceLocation LParenLoc, RParenLoc; public: ForStmt(const ASTContext &C, Stmt *Init, Expr *Cond, VarDecl *condVar, Expr *Inc, Stmt *Body, SourceLocation FL, SourceLocation LP, SourceLocation RP); /// Build an empty for statement. explicit ForStmt(EmptyShell Empty) : Stmt(ForStmtClass, Empty) {} Stmt *getInit() { return SubExprs[INIT]; } /// Retrieve the variable declared in this "for" statement, if any. /// /// In the following example, "y" is the condition variable. /// \code /// for (int x = random(); int y = mangle(x); ++x) { /// // ... /// } /// \endcode VarDecl *getConditionVariable() const; void setConditionVariable(const ASTContext &C, VarDecl *V); /// If this ForStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. const DeclStmt *getConditionVariableDeclStmt() const { return reinterpret_cast<DeclStmt*>(SubExprs[CONDVAR]); } Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); } Expr *getInc() { return reinterpret_cast<Expr*>(SubExprs[INC]); } Stmt *getBody() { return SubExprs[BODY]; } const Stmt *getInit() const { return SubExprs[INIT]; } const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);} const Expr *getInc() const { return reinterpret_cast<Expr*>(SubExprs[INC]); } const Stmt *getBody() const { return SubExprs[BODY]; } void setInit(Stmt *S) { SubExprs[INIT] = S; } void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt*>(E); } void setInc(Expr *E) { SubExprs[INC] = reinterpret_cast<Stmt*>(E); } void setBody(Stmt *S) { SubExprs[BODY] = S; } SourceLocation getForLoc() const { return ForStmtBits.ForLoc; } void setForLoc(SourceLocation L) { ForStmtBits.ForLoc = L; } SourceLocation getLParenLoc() const { return LParenLoc; } void setLParenLoc(SourceLocation L) { LParenLoc = L; } SourceLocation getRParenLoc() const { return RParenLoc; } void setRParenLoc(SourceLocation L) { RParenLoc = L; } SourceLocation getBeginLoc() const { return getForLoc(); } SourceLocation getEndLoc() const { return getBody()->getEndLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == ForStmtClass; } // Iterators child_range children() { return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR); } const_child_range children() const { return const_child_range(&SubExprs[0], &SubExprs[0] + END_EXPR); } }; /// GotoStmt - This represents a direct goto. class GotoStmt : public Stmt { LabelDecl *Label; SourceLocation LabelLoc; public: GotoStmt(LabelDecl *label, SourceLocation GL, SourceLocation LL) : Stmt(GotoStmtClass), Label(label), LabelLoc(LL) { setGotoLoc(GL); } /// Build an empty goto statement. explicit GotoStmt(EmptyShell Empty) : Stmt(GotoStmtClass, Empty) {} LabelDecl *getLabel() const { return Label; } void setLabel(LabelDecl *D) { Label = D; } SourceLocation getGotoLoc() const { return GotoStmtBits.GotoLoc; } void setGotoLoc(SourceLocation L) { GotoStmtBits.GotoLoc = L; } SourceLocation getLabelLoc() const { return LabelLoc; } void setLabelLoc(SourceLocation L) { LabelLoc = L; } SourceLocation getBeginLoc() const { return getGotoLoc(); } SourceLocation getEndLoc() const { return getLabelLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == GotoStmtClass; } // Iterators child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } }; /// IndirectGotoStmt - This represents an indirect goto. class IndirectGotoStmt : public Stmt { SourceLocation StarLoc; Stmt *Target; public: IndirectGotoStmt(SourceLocation gotoLoc, SourceLocation starLoc, Expr *target) : Stmt(IndirectGotoStmtClass), StarLoc(starLoc) { setTarget(target); setGotoLoc(gotoLoc); } /// Build an empty indirect goto statement. explicit IndirectGotoStmt(EmptyShell Empty) : Stmt(IndirectGotoStmtClass, Empty) {} void setGotoLoc(SourceLocation L) { GotoStmtBits.GotoLoc = L; } SourceLocation getGotoLoc() const { return GotoStmtBits.GotoLoc; } void setStarLoc(SourceLocation L) { StarLoc = L; } SourceLocation getStarLoc() const { return StarLoc; } Expr *getTarget() { return reinterpret_cast<Expr *>(Target); } const Expr *getTarget() const { return reinterpret_cast<const Expr *>(Target); } void setTarget(Expr *E) { Target = reinterpret_cast<Stmt *>(E); } /// getConstantTarget - Returns the fixed target of this indirect /// goto, if one exists. LabelDecl *getConstantTarget(); const LabelDecl *getConstantTarget() const { return const_cast<IndirectGotoStmt *>(this)->getConstantTarget(); } SourceLocation getBeginLoc() const { return getGotoLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return Target->getEndLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == IndirectGotoStmtClass; } // Iterators child_range children() { return child_range(&Target, &Target + 1); } const_child_range children() const { return const_child_range(&Target, &Target + 1); } }; /// ContinueStmt - This represents a continue. class ContinueStmt : public Stmt { public: ContinueStmt(SourceLocation CL) : Stmt(ContinueStmtClass) { setContinueLoc(CL); } /// Build an empty continue statement. explicit ContinueStmt(EmptyShell Empty) : Stmt(ContinueStmtClass, Empty) {} SourceLocation getContinueLoc() const { return ContinueStmtBits.ContinueLoc; } void setContinueLoc(SourceLocation L) { ContinueStmtBits.ContinueLoc = L; } SourceLocation getBeginLoc() const { return getContinueLoc(); } SourceLocation getEndLoc() const { return getContinueLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == ContinueStmtClass; } // Iterators child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } }; /// BreakStmt - This represents a break. class BreakStmt : public Stmt { public: BreakStmt(SourceLocation BL) : Stmt(BreakStmtClass) { setBreakLoc(BL); } /// Build an empty break statement. explicit BreakStmt(EmptyShell Empty) : Stmt(BreakStmtClass, Empty) {} SourceLocation getBreakLoc() const { return BreakStmtBits.BreakLoc; } void setBreakLoc(SourceLocation L) { BreakStmtBits.BreakLoc = L; } SourceLocation getBeginLoc() const { return getBreakLoc(); } SourceLocation getEndLoc() const { return getBreakLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == BreakStmtClass; } // Iterators child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } }; /// ReturnStmt - This represents a return, optionally of an expression: /// return; /// return 4; /// /// Note that GCC allows return with no argument in a function declared to /// return a value, and it allows returning a value in functions declared to /// return void. We explicitly model this in the AST, which means you can't /// depend on the return type of the function and the presence of an argument. class ReturnStmt final : public Stmt, private llvm::TrailingObjects<ReturnStmt, const VarDecl *> { friend TrailingObjects; /// The return expression. Stmt *RetExpr; // ReturnStmt is followed optionally by a trailing "const VarDecl *" // for the NRVO candidate. Present if and only if hasNRVOCandidate(). /// True if this ReturnStmt has storage for an NRVO candidate. bool hasNRVOCandidate() const { return ReturnStmtBits.HasNRVOCandidate; } unsigned numTrailingObjects(OverloadToken<const VarDecl *>) const { return hasNRVOCandidate(); } /// Build a return statement. ReturnStmt(SourceLocation RL, Expr *E, const VarDecl *NRVOCandidate); /// Build an empty return statement. explicit ReturnStmt(EmptyShell Empty, bool HasNRVOCandidate); public: /// Create a return statement. static ReturnStmt *Create(const ASTContext &Ctx, SourceLocation RL, Expr *E, const VarDecl *NRVOCandidate); /// Create an empty return statement, optionally with /// storage for an NRVO candidate. static ReturnStmt *CreateEmpty(const ASTContext &Ctx, bool HasNRVOCandidate); Expr *getRetValue() { return reinterpret_cast<Expr *>(RetExpr); } const Expr *getRetValue() const { return reinterpret_cast<Expr *>(RetExpr); } void setRetValue(Expr *E) { RetExpr = reinterpret_cast<Stmt *>(E); } /// Retrieve the variable that might be used for the named return /// value optimization. /// /// The optimization itself can only be performed if the variable is /// also marked as an NRVO object. const VarDecl *getNRVOCandidate() const { return hasNRVOCandidate() ? *getTrailingObjects<const VarDecl *>() : nullptr; } /// Set the variable that might be used for the named return value /// optimization. The return statement must have storage for it, /// which is the case if and only if hasNRVOCandidate() is true. void setNRVOCandidate(const VarDecl *Var) { assert(hasNRVOCandidate() && "This return statement has no storage for an NRVO candidate!"); *getTrailingObjects<const VarDecl *>() = Var; } SourceLocation getReturnLoc() const { return ReturnStmtBits.RetLoc; } void setReturnLoc(SourceLocation L) { ReturnStmtBits.RetLoc = L; } SourceLocation getBeginLoc() const { return getReturnLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return RetExpr ? RetExpr->getEndLoc() : getReturnLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == ReturnStmtClass; } // Iterators child_range children() { if (RetExpr) return child_range(&RetExpr, &RetExpr + 1); return child_range(child_iterator(), child_iterator()); } const_child_range children() const { if (RetExpr) return const_child_range(&RetExpr, &RetExpr + 1); return const_child_range(const_child_iterator(), const_child_iterator()); } }; /// AsmStmt is the base class for GCCAsmStmt and MSAsmStmt. class AsmStmt : public Stmt { protected: friend class ASTStmtReader; SourceLocation AsmLoc; /// True if the assembly statement does not have any input or output /// operands. bool IsSimple; /// If true, treat this inline assembly as having side effects. /// This assembly statement should not be optimized, deleted or moved. bool IsVolatile; unsigned NumOutputs; unsigned NumInputs; unsigned NumClobbers; Stmt **Exprs = nullptr; AsmStmt(StmtClass SC, SourceLocation asmloc, bool issimple, bool isvolatile, unsigned numoutputs, unsigned numinputs, unsigned numclobbers) : Stmt (SC), AsmLoc(asmloc), IsSimple(issimple), IsVolatile(isvolatile), NumOutputs(numoutputs), NumInputs(numinputs), NumClobbers(numclobbers) {} public: /// Build an empty inline-assembly statement. explicit AsmStmt(StmtClass SC, EmptyShell Empty) : Stmt(SC, Empty) {} SourceLocation getAsmLoc() const { return AsmLoc; } void setAsmLoc(SourceLocation L) { AsmLoc = L; } bool isSimple() const { return IsSimple; } void setSimple(bool V) { IsSimple = V; } bool isVolatile() const { return IsVolatile; } void setVolatile(bool V) { IsVolatile = V; } SourceLocation getBeginLoc() const LLVM_READONLY { return {}; } SourceLocation getEndLoc() const LLVM_READONLY { return {}; } //===--- Asm String Analysis ---===// /// Assemble final IR asm string. std::string generateAsmString(const ASTContext &C) const; //===--- Output operands ---===// unsigned getNumOutputs() const { return NumOutputs; } /// getOutputConstraint - Return the constraint string for the specified /// output operand. All output constraints are known to be non-empty (either /// '=' or '+'). StringRef getOutputConstraint(unsigned i) const; /// isOutputPlusConstraint - Return true if the specified output constraint /// is a "+" constraint (which is both an input and an output) or false if it /// is an "=" constraint (just an output). bool isOutputPlusConstraint(unsigned i) const { return getOutputConstraint(i)[0] == '+'; } const Expr *getOutputExpr(unsigned i) const; /// getNumPlusOperands - Return the number of output operands that have a "+" /// constraint. unsigned getNumPlusOperands() const; //===--- Input operands ---===// unsigned getNumInputs() const { return NumInputs; } /// getInputConstraint - Return the specified input constraint. Unlike output /// constraints, these can be empty. StringRef getInputConstraint(unsigned i) const; const Expr *getInputExpr(unsigned i) const; //===--- Other ---===// unsigned getNumClobbers() const { return NumClobbers; } StringRef getClobber(unsigned i) const; static bool classof(const Stmt *T) { return T->getStmtClass() == GCCAsmStmtClass || T->getStmtClass() == MSAsmStmtClass; } // Input expr iterators. using inputs_iterator = ExprIterator; using const_inputs_iterator = ConstExprIterator; using inputs_range = llvm::iterator_range<inputs_iterator>; using inputs_const_range = llvm::iterator_range<const_inputs_iterator>; inputs_iterator begin_inputs() { return &Exprs[0] + NumOutputs; } inputs_iterator end_inputs() { return &Exprs[0] + NumOutputs + NumInputs; } inputs_range inputs() { return inputs_range(begin_inputs(), end_inputs()); } const_inputs_iterator begin_inputs() const { return &Exprs[0] + NumOutputs; } const_inputs_iterator end_inputs() const { return &Exprs[0] + NumOutputs + NumInputs; } inputs_const_range inputs() const { return inputs_const_range(begin_inputs(), end_inputs()); } // Output expr iterators. using outputs_iterator = ExprIterator; using const_outputs_iterator = ConstExprIterator; using outputs_range = llvm::iterator_range<outputs_iterator>; using outputs_const_range = llvm::iterator_range<const_outputs_iterator>; outputs_iterator begin_outputs() { return &Exprs[0]; } outputs_iterator end_outputs() { return &Exprs[0] + NumOutputs; } outputs_range outputs() { return outputs_range(begin_outputs(), end_outputs()); } const_outputs_iterator begin_outputs() const { return &Exprs[0]; } const_outputs_iterator end_outputs() const { return &Exprs[0] + NumOutputs; } outputs_const_range outputs() const { return outputs_const_range(begin_outputs(), end_outputs()); } child_range children() { return child_range(&Exprs[0], &Exprs[0] + NumOutputs + NumInputs); } const_child_range children() const { return const_child_range(&Exprs[0], &Exprs[0] + NumOutputs + NumInputs); } }; /// This represents a GCC inline-assembly statement extension. class GCCAsmStmt : public AsmStmt { friend class ASTStmtReader; SourceLocation RParenLoc; StringLiteral *AsmStr; // FIXME: If we wanted to, we could allocate all of these in one big array. StringLiteral **Constraints = nullptr; StringLiteral **Clobbers = nullptr; IdentifierInfo **Names = nullptr; unsigned NumLabels = 0; public: GCCAsmStmt(const ASTContext &C, SourceLocation asmloc, bool issimple, bool isvolatile, unsigned numoutputs, unsigned numinputs, IdentifierInfo **names, StringLiteral **constraints, Expr **exprs, StringLiteral *asmstr, unsigned numclobbers, StringLiteral **clobbers, unsigned numlabels, SourceLocation rparenloc); /// Build an empty inline-assembly statement. explicit GCCAsmStmt(EmptyShell Empty) : AsmStmt(GCCAsmStmtClass, Empty) {} SourceLocation getRParenLoc() const { return RParenLoc; } void setRParenLoc(SourceLocation L) { RParenLoc = L; } //===--- Asm String Analysis ---===// const StringLiteral *getAsmString() const { return AsmStr; } StringLiteral *getAsmString() { return AsmStr; } void setAsmString(StringLiteral *E) { AsmStr = E; } /// AsmStringPiece - this is part of a decomposed asm string specification /// (for use with the AnalyzeAsmString function below). An asm string is /// considered to be a concatenation of these parts. class AsmStringPiece { public: enum Kind { String, // String in .ll asm string form, "$" -> "$$" and "%%" -> "%". Operand // Operand reference, with optional modifier %c4. }; private: Kind MyKind; std::string Str; unsigned OperandNo; // Source range for operand references. CharSourceRange Range; public: AsmStringPiece(const std::string &S) : MyKind(String), Str(S) {} AsmStringPiece(unsigned OpNo, const std::string &S, SourceLocation Begin, SourceLocation End) : MyKind(Operand), Str(S), OperandNo(OpNo), Range(CharSourceRange::getCharRange(Begin, End)) {} bool isString() const { return MyKind == String; } bool isOperand() const { return MyKind == Operand; } const std::string &getString() const { return Str; } unsigned getOperandNo() const { assert(isOperand()); return OperandNo; } CharSourceRange getRange() const { assert(isOperand() && "Range is currently used only for Operands."); return Range; } /// getModifier - Get the modifier for this operand, if present. This /// returns '\0' if there was no modifier. char getModifier() const; }; /// AnalyzeAsmString - Analyze the asm string of the current asm, decomposing /// it into pieces. If the asm string is erroneous, emit errors and return /// true, otherwise return false. This handles canonicalization and /// translation of strings from GCC syntax to LLVM IR syntax, and handles //// flattening of named references like %[foo] to Operand AsmStringPiece's. unsigned AnalyzeAsmString(SmallVectorImpl<AsmStringPiece> &Pieces, const ASTContext &C, unsigned &DiagOffs) const; /// Assemble final IR asm string. std::string generateAsmString(const ASTContext &C) const; //===--- Output operands ---===// IdentifierInfo *getOutputIdentifier(unsigned i) const { return Names[i]; } StringRef getOutputName(unsigned i) const { if (IdentifierInfo *II = getOutputIdentifier(i)) return II->getName(); return {}; } StringRef getOutputConstraint(unsigned i) const; const StringLiteral *getOutputConstraintLiteral(unsigned i) const { return Constraints[i]; } StringLiteral *getOutputConstraintLiteral(unsigned i) { return Constraints[i]; } Expr *getOutputExpr(unsigned i); const Expr *getOutputExpr(unsigned i) const { return const_cast<GCCAsmStmt*>(this)->getOutputExpr(i); } //===--- Input operands ---===// IdentifierInfo *getInputIdentifier(unsigned i) const { return Names[i + NumOutputs]; } StringRef getInputName(unsigned i) const { if (IdentifierInfo *II = getInputIdentifier(i)) return II->getName(); return {}; } StringRef getInputConstraint(unsigned i) const; const StringLiteral *getInputConstraintLiteral(unsigned i) const { return Constraints[i + NumOutputs]; } StringLiteral *getInputConstraintLiteral(unsigned i) { return Constraints[i + NumOutputs]; } Expr *getInputExpr(unsigned i); void setInputExpr(unsigned i, Expr *E); const Expr *getInputExpr(unsigned i) const { return const_cast<GCCAsmStmt*>(this)->getInputExpr(i); } //===--- Labels ---===// bool isAsmGoto() const { return NumLabels > 0; } unsigned getNumLabels() const { return NumLabels; } IdentifierInfo *getLabelIdentifier(unsigned i) const { return Names[i + NumOutputs + NumInputs]; } AddrLabelExpr *getLabelExpr(unsigned i) const; StringRef getLabelName(unsigned i) const; using labels_iterator = CastIterator<AddrLabelExpr>; using const_labels_iterator = ConstCastIterator<AddrLabelExpr>; using labels_range = llvm::iterator_range<labels_iterator>; using labels_const_range = llvm::iterator_range<const_labels_iterator>; labels_iterator begin_labels() { return &Exprs[0] + NumOutputs + NumInputs; } labels_iterator end_labels() { return &Exprs[0] + NumOutputs + NumInputs + NumLabels; } labels_range labels() { return labels_range(begin_labels(), end_labels()); } const_labels_iterator begin_labels() const { return &Exprs[0] + NumOutputs + NumInputs; } const_labels_iterator end_labels() const { return &Exprs[0] + NumOutputs + NumInputs + NumLabels; } labels_const_range labels() const { return labels_const_range(begin_labels(), end_labels()); } private: void setOutputsAndInputsAndClobbers(const ASTContext &C, IdentifierInfo **Names, StringLiteral **Constraints, Stmt **Exprs, unsigned NumOutputs, unsigned NumInputs, unsigned NumLabels, StringLiteral **Clobbers, unsigned NumClobbers); public: //===--- Other ---===// /// getNamedOperand - Given a symbolic operand reference like %[foo], /// translate this into a numeric value needed to reference the same operand. /// This returns -1 if the operand name is invalid. int getNamedOperand(StringRef SymbolicName) const; StringRef getClobber(unsigned i) const; StringLiteral *getClobberStringLiteral(unsigned i) { return Clobbers[i]; } const StringLiteral *getClobberStringLiteral(unsigned i) const { return Clobbers[i]; } SourceLocation getBeginLoc() const LLVM_READONLY { return AsmLoc; } SourceLocation getEndLoc() const LLVM_READONLY { return RParenLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == GCCAsmStmtClass; } }; /// This represents a Microsoft inline-assembly statement extension. class MSAsmStmt : public AsmStmt { friend class ASTStmtReader; SourceLocation LBraceLoc, EndLoc; StringRef AsmStr; unsigned NumAsmToks = 0; Token *AsmToks = nullptr; StringRef *Constraints = nullptr; StringRef *Clobbers = nullptr; public: MSAsmStmt(const ASTContext &C, SourceLocation asmloc, SourceLocation lbraceloc, bool issimple, bool isvolatile, ArrayRef<Token> asmtoks, unsigned numoutputs, unsigned numinputs, ArrayRef<StringRef> constraints, ArrayRef<Expr*> exprs, StringRef asmstr, ArrayRef<StringRef> clobbers, SourceLocation endloc); /// Build an empty MS-style inline-assembly statement. explicit MSAsmStmt(EmptyShell Empty) : AsmStmt(MSAsmStmtClass, Empty) {} SourceLocation getLBraceLoc() const { return LBraceLoc; } void setLBraceLoc(SourceLocation L) { LBraceLoc = L; } SourceLocation getEndLoc() const { return EndLoc; } void setEndLoc(SourceLocation L) { EndLoc = L; } bool hasBraces() const { return LBraceLoc.isValid(); } unsigned getNumAsmToks() { return NumAsmToks; } Token *getAsmToks() { return AsmToks; } //===--- Asm String Analysis ---===// StringRef getAsmString() const { return AsmStr; } /// Assemble final IR asm string. std::string generateAsmString(const ASTContext &C) const; //===--- Output operands ---===// StringRef getOutputConstraint(unsigned i) const { assert(i < NumOutputs); return Constraints[i]; } Expr *getOutputExpr(unsigned i); const Expr *getOutputExpr(unsigned i) const { return const_cast<MSAsmStmt*>(this)->getOutputExpr(i); } //===--- Input operands ---===// StringRef getInputConstraint(unsigned i) const { assert(i < NumInputs); return Constraints[i + NumOutputs]; } Expr *getInputExpr(unsigned i); void setInputExpr(unsigned i, Expr *E); const Expr *getInputExpr(unsigned i) const { return const_cast<MSAsmStmt*>(this)->getInputExpr(i); } //===--- Other ---===// ArrayRef<StringRef> getAllConstraints() const { return llvm::makeArrayRef(Constraints, NumInputs + NumOutputs); } ArrayRef<StringRef> getClobbers() const { return llvm::makeArrayRef(Clobbers, NumClobbers); } ArrayRef<Expr*> getAllExprs() const { return llvm::makeArrayRef(reinterpret_cast<Expr**>(Exprs), NumInputs + NumOutputs); } StringRef getClobber(unsigned i) const { return getClobbers()[i]; } private: void initialize(const ASTContext &C, StringRef AsmString, ArrayRef<Token> AsmToks, ArrayRef<StringRef> Constraints, ArrayRef<Expr*> Exprs, ArrayRef<StringRef> Clobbers); public: SourceLocation getBeginLoc() const LLVM_READONLY { return AsmLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == MSAsmStmtClass; } child_range children() { return child_range(&Exprs[0], &Exprs[NumInputs + NumOutputs]); } const_child_range children() const { return const_child_range(&Exprs[0], &Exprs[NumInputs + NumOutputs]); } }; class SEHExceptStmt : public Stmt { friend class ASTReader; friend class ASTStmtReader; SourceLocation Loc; Stmt *Children[2]; enum { FILTER_EXPR, BLOCK }; SEHExceptStmt(SourceLocation Loc, Expr *FilterExpr, Stmt *Block); explicit SEHExceptStmt(EmptyShell E) : Stmt(SEHExceptStmtClass, E) {} public: static SEHExceptStmt* Create(const ASTContext &C, SourceLocation ExceptLoc, Expr *FilterExpr, Stmt *Block); SourceLocation getBeginLoc() const LLVM_READONLY { return getExceptLoc(); } SourceLocation getExceptLoc() const { return Loc; } SourceLocation getEndLoc() const { return getBlock()->getEndLoc(); } Expr *getFilterExpr() const { return reinterpret_cast<Expr*>(Children[FILTER_EXPR]); } CompoundStmt *getBlock() const { return cast<CompoundStmt>(Children[BLOCK]); } child_range children() { return child_range(Children, Children+2); } const_child_range children() const { return const_child_range(Children, Children + 2); } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHExceptStmtClass; } }; class SEHFinallyStmt : public Stmt { friend class ASTReader; friend class ASTStmtReader; SourceLocation Loc; Stmt *Block; SEHFinallyStmt(SourceLocation Loc, Stmt *Block); explicit SEHFinallyStmt(EmptyShell E) : Stmt(SEHFinallyStmtClass, E) {} public: static SEHFinallyStmt* Create(const ASTContext &C, SourceLocation FinallyLoc, Stmt *Block); SourceLocation getBeginLoc() const LLVM_READONLY { return getFinallyLoc(); } SourceLocation getFinallyLoc() const { return Loc; } SourceLocation getEndLoc() const { return Block->getEndLoc(); } CompoundStmt *getBlock() const { return cast<CompoundStmt>(Block); } child_range children() { return child_range(&Block,&Block+1); } const_child_range children() const { return const_child_range(&Block, &Block + 1); } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHFinallyStmtClass; } }; class SEHTryStmt : public Stmt { friend class ASTReader; friend class ASTStmtReader; bool IsCXXTry; SourceLocation TryLoc; Stmt *Children[2]; enum { TRY = 0, HANDLER = 1 }; SEHTryStmt(bool isCXXTry, // true if 'try' otherwise '__try' SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); explicit SEHTryStmt(EmptyShell E) : Stmt(SEHTryStmtClass, E) {} public: static SEHTryStmt* Create(const ASTContext &C, bool isCXXTry, SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); SourceLocation getBeginLoc() const LLVM_READONLY { return getTryLoc(); } SourceLocation getTryLoc() const { return TryLoc; } SourceLocation getEndLoc() const { return Children[HANDLER]->getEndLoc(); } bool getIsCXXTry() const { return IsCXXTry; } CompoundStmt* getTryBlock() const { return cast<CompoundStmt>(Children[TRY]); } Stmt *getHandler() const { return Children[HANDLER]; } /// Returns 0 if not defined SEHExceptStmt *getExceptHandler() const; SEHFinallyStmt *getFinallyHandler() const; child_range children() { return child_range(Children, Children+2); } const_child_range children() const { return const_child_range(Children, Children + 2); } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHTryStmtClass; } }; /// Represents a __leave statement. class SEHLeaveStmt : public Stmt { SourceLocation LeaveLoc; public: explicit SEHLeaveStmt(SourceLocation LL) : Stmt(SEHLeaveStmtClass), LeaveLoc(LL) {} /// Build an empty __leave statement. explicit SEHLeaveStmt(EmptyShell Empty) : Stmt(SEHLeaveStmtClass, Empty) {} SourceLocation getLeaveLoc() const { return LeaveLoc; } void setLeaveLoc(SourceLocation L) { LeaveLoc = L; } SourceLocation getBeginLoc() const LLVM_READONLY { return LeaveLoc; } SourceLocation getEndLoc() const LLVM_READONLY { return LeaveLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHLeaveStmtClass; } // Iterators child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } }; /// This captures a statement into a function. For example, the following /// pragma annotated compound statement can be represented as a CapturedStmt, /// and this compound statement is the body of an anonymous outlined function. /// @code /// #pragma omp parallel /// { /// compute(); /// } /// @endcode class CapturedStmt : public Stmt { public: /// The different capture forms: by 'this', by reference, capture for /// variable-length array type etc. enum VariableCaptureKind { VCK_This, VCK_ByRef, VCK_ByCopy, VCK_VLAType, }; /// Describes the capture of either a variable, or 'this', or /// variable-length array type. class Capture { llvm::PointerIntPair<VarDecl *, 2, VariableCaptureKind> VarAndKind; SourceLocation Loc; public: friend class ASTStmtReader; /// Create a new capture. /// /// \param Loc The source location associated with this capture. /// /// \param Kind The kind of capture (this, ByRef, ...). /// /// \param Var The variable being captured, or null if capturing this. Capture(SourceLocation Loc, VariableCaptureKind Kind, VarDecl *Var = nullptr); /// Determine the kind of capture. VariableCaptureKind getCaptureKind() const; /// Retrieve the source location at which the variable or 'this' was /// first used. SourceLocation getLocation() const { return Loc; } /// Determine whether this capture handles the C++ 'this' pointer. bool capturesThis() const { return getCaptureKind() == VCK_This; } /// Determine whether this capture handles a variable (by reference). bool capturesVariable() const { return getCaptureKind() == VCK_ByRef; } /// Determine whether this capture handles a variable by copy. bool capturesVariableByCopy() const { return getCaptureKind() == VCK_ByCopy; } /// Determine whether this capture handles a variable-length array /// type. bool capturesVariableArrayType() const { return getCaptureKind() == VCK_VLAType; } /// Retrieve the declaration of the variable being captured. /// /// This operation is only valid if this capture captures a variable. VarDecl *getCapturedVar() const; }; private: /// The number of variable captured, including 'this'. unsigned NumCaptures; /// The pointer part is the implicit the outlined function and the /// int part is the captured region kind, 'CR_Default' etc. llvm::PointerIntPair<CapturedDecl *, 2, CapturedRegionKind> CapDeclAndKind; /// The record for captured variables, a RecordDecl or CXXRecordDecl. RecordDecl *TheRecordDecl = nullptr; /// Construct a captured statement. CapturedStmt(Stmt *S, CapturedRegionKind Kind, ArrayRef<Capture> Captures, ArrayRef<Expr *> CaptureInits, CapturedDecl *CD, RecordDecl *RD); /// Construct an empty captured statement. CapturedStmt(EmptyShell Empty, unsigned NumCaptures); Stmt **getStoredStmts() { return reinterpret_cast<Stmt **>(this + 1); } Stmt *const *getStoredStmts() const { return reinterpret_cast<Stmt *const *>(this + 1); } Capture *getStoredCaptures() const; void setCapturedStmt(Stmt *S) { getStoredStmts()[NumCaptures] = S; } public: friend class ASTStmtReader; static CapturedStmt *Create(const ASTContext &Context, Stmt *S, CapturedRegionKind Kind, ArrayRef<Capture> Captures, ArrayRef<Expr *> CaptureInits, CapturedDecl *CD, RecordDecl *RD); static CapturedStmt *CreateDeserialized(const ASTContext &Context, unsigned NumCaptures); /// Retrieve the statement being captured. Stmt *getCapturedStmt() { return getStoredStmts()[NumCaptures]; } const Stmt *getCapturedStmt() const { return getStoredStmts()[NumCaptures]; } /// Retrieve the outlined function declaration. CapturedDecl *getCapturedDecl(); const CapturedDecl *getCapturedDecl() const; /// Set the outlined function declaration. void setCapturedDecl(CapturedDecl *D); /// Retrieve the captured region kind. CapturedRegionKind getCapturedRegionKind() const; /// Set the captured region kind. void setCapturedRegionKind(CapturedRegionKind Kind); /// Retrieve the record declaration for captured variables. const RecordDecl *getCapturedRecordDecl() const { return TheRecordDecl; } /// Set the record declaration for captured variables. void setCapturedRecordDecl(RecordDecl *D) { assert(D && "null RecordDecl"); TheRecordDecl = D; } /// True if this variable has been captured. bool capturesVariable(const VarDecl *Var) const; /// An iterator that walks over the captures. using capture_iterator = Capture *; using const_capture_iterator = const Capture *; using capture_range = llvm::iterator_range<capture_iterator>; using capture_const_range = llvm::iterator_range<const_capture_iterator>; capture_range captures() { return capture_range(capture_begin(), capture_end()); } capture_const_range captures() const { return capture_const_range(capture_begin(), capture_end()); } /// Retrieve an iterator pointing to the first capture. capture_iterator capture_begin() { return getStoredCaptures(); } const_capture_iterator capture_begin() const { return getStoredCaptures(); } /// Retrieve an iterator pointing past the end of the sequence of /// captures. capture_iterator capture_end() const { return getStoredCaptures() + NumCaptures; } /// Retrieve the number of captures, including 'this'. unsigned capture_size() const { return NumCaptures; } /// Iterator that walks over the capture initialization arguments. using capture_init_iterator = Expr **; using capture_init_range = llvm::iterator_range<capture_init_iterator>; /// Const iterator that walks over the capture initialization /// arguments. using const_capture_init_iterator = Expr *const *; using const_capture_init_range = llvm::iterator_range<const_capture_init_iterator>; capture_init_range capture_inits() { return capture_init_range(capture_init_begin(), capture_init_end()); } const_capture_init_range capture_inits() const { return const_capture_init_range(capture_init_begin(), capture_init_end()); } /// Retrieve the first initialization argument. capture_init_iterator capture_init_begin() { return reinterpret_cast<Expr **>(getStoredStmts()); } const_capture_init_iterator capture_init_begin() const { return reinterpret_cast<Expr *const *>(getStoredStmts()); } /// Retrieve the iterator pointing one past the last initialization /// argument. capture_init_iterator capture_init_end() { return capture_init_begin() + NumCaptures; } const_capture_init_iterator capture_init_end() const { return capture_init_begin() + NumCaptures; } SourceLocation getBeginLoc() const LLVM_READONLY { return getCapturedStmt()->getBeginLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return getCapturedStmt()->getEndLoc(); } SourceRange getSourceRange() const LLVM_READONLY { return getCapturedStmt()->getSourceRange(); } static bool classof(const Stmt *T) { return T->getStmtClass() == CapturedStmtClass; } child_range children(); const_child_range children() const; }; } // namespace clang #endif // LLVM_CLANG_AST_STMT_H
fx.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % FFFFF X X % % F X X % % FFF X % % F X X % % F X X % % % % % % MagickCore Image Special Effects Methods % % % % Software Design % % John Cristy % % October 1996 % % % % % % Copyright 1999-2011 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/annotate.h" #include "magick/artifact.h" #include "magick/attribute.h" #include "magick/cache.h" #include "magick/cache-view.h" #include "magick/color.h" #include "magick/color-private.h" #include "magick/composite.h" #include "magick/decorate.h" #include "magick/draw.h" #include "magick/effect.h" #include "magick/enhance.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/fx.h" #include "magick/fx-private.h" #include "magick/gem.h" #include "magick/geometry.h" #include "magick/layer.h" #include "magick/list.h" #include "magick/log.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/magick.h" #include "magick/memory_.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/option.h" #include "magick/pixel-private.h" #include "magick/property.h" #include "magick/quantum.h" #include "magick/quantum-private.h" #include "magick/random_.h" #include "magick/random-private.h" #include "magick/resample.h" #include "magick/resample-private.h" #include "magick/resize.h" #include "magick/shear.h" #include "magick/splay-tree.h" #include "magick/statistic.h" #include "magick/string_.h" #include "magick/string-private.h" #include "magick/thread-private.h" #include "magick/transform.h" #include "magick/utility.h" /* Define declarations. */ #define LeftShiftOperator 0xf5 #define RightShiftOperator 0xf6 #define LessThanEqualOperator 0xf7 #define GreaterThanEqualOperator 0xf8 #define EqualOperator 0xf9 #define NotEqualOperator 0xfa #define LogicalAndOperator 0xfb #define LogicalOrOperator 0xfc #define ExponentialNotation 0xfd struct _FxInfo { const Image *images; char *expression; FILE *file; SplayTreeInfo *colors, *symbols; CacheView **view; RandomInfo *random_info; ExceptionInfo *exception; }; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + A c q u i r e F x I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireFxInfo() allocates the FxInfo structure. % % The format of the AcquireFxInfo method is: % % FxInfo *AcquireFxInfo(Image *image,const char *expression) % % A description of each parameter follows: % % o image: the image. % % o expression: the expression. % */ MagickExport FxInfo *AcquireFxInfo(const Image *image,const char *expression) { char fx_op[2]; const Image *next; FxInfo *fx_info; register ssize_t i; fx_info=(FxInfo *) AcquireMagickMemory(sizeof(*fx_info)); if (fx_info == (FxInfo *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); (void) ResetMagickMemory(fx_info,0,sizeof(*fx_info)); fx_info->exception=AcquireExceptionInfo(); fx_info->images=image; fx_info->colors=NewSplayTree(CompareSplayTreeString,RelinquishMagickMemory, RelinquishMagickMemory); fx_info->symbols=NewSplayTree(CompareSplayTreeString,RelinquishMagickMemory, RelinquishMagickMemory); fx_info->view=(CacheView **) AcquireQuantumMemory(GetImageListLength( fx_info->images),sizeof(*fx_info->view)); if (fx_info->view == (CacheView **) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); i=0; next=GetFirstImageInList(fx_info->images); for ( ; next != (Image *) NULL; next=next->next) { fx_info->view[i]=AcquireCacheView(next); i++; } fx_info->random_info=AcquireRandomInfo(); fx_info->expression=ConstantString(expression); fx_info->file=stderr; (void) SubstituteString(&fx_info->expression," ",""); /* compact string */ if ((strstr(fx_info->expression,"e+") != (char *) NULL) || (strstr(fx_info->expression,"e-") != (char *) NULL)) { /* Convert scientific notation. */ (void) SubstituteString(&fx_info->expression,"0e+","0**10^"); (void) SubstituteString(&fx_info->expression,"1e+","1**10^"); (void) SubstituteString(&fx_info->expression,"2e+","2**10^"); (void) SubstituteString(&fx_info->expression,"3e+","3**10^"); (void) SubstituteString(&fx_info->expression,"4e+","4**10^"); (void) SubstituteString(&fx_info->expression,"5e+","5**10^"); (void) SubstituteString(&fx_info->expression,"6e+","6**10^"); (void) SubstituteString(&fx_info->expression,"7e+","7**10^"); (void) SubstituteString(&fx_info->expression,"8e+","8**10^"); (void) SubstituteString(&fx_info->expression,"9e+","9**10^"); (void) SubstituteString(&fx_info->expression,"0e-","0**10^-"); (void) SubstituteString(&fx_info->expression,"1e-","1**10^-"); (void) SubstituteString(&fx_info->expression,"2e-","2**10^-"); (void) SubstituteString(&fx_info->expression,"3e-","3**10^-"); (void) SubstituteString(&fx_info->expression,"4e-","4**10^-"); (void) SubstituteString(&fx_info->expression,"5e-","5**10^-"); (void) SubstituteString(&fx_info->expression,"6e-","6**10^-"); (void) SubstituteString(&fx_info->expression,"7e-","7**10^-"); (void) SubstituteString(&fx_info->expression,"8e-","8**10^-"); (void) SubstituteString(&fx_info->expression,"9e-","9**10^-"); } /* Force right-to-left associativity for unary negation. */ (void) SubstituteString(&fx_info->expression,"-","-1.0*"); /* Convert complex to simple operators. */ fx_op[1]='\0'; *fx_op=(char) LeftShiftOperator; (void) SubstituteString(&fx_info->expression,"<<",fx_op); *fx_op=(char) RightShiftOperator; (void) SubstituteString(&fx_info->expression,">>",fx_op); *fx_op=(char) LessThanEqualOperator; (void) SubstituteString(&fx_info->expression,"<=",fx_op); *fx_op=(char) GreaterThanEqualOperator; (void) SubstituteString(&fx_info->expression,">=",fx_op); *fx_op=(char) EqualOperator; (void) SubstituteString(&fx_info->expression,"==",fx_op); *fx_op=(char) NotEqualOperator; (void) SubstituteString(&fx_info->expression,"!=",fx_op); *fx_op=(char) LogicalAndOperator; (void) SubstituteString(&fx_info->expression,"&&",fx_op); *fx_op=(char) LogicalOrOperator; (void) SubstituteString(&fx_info->expression,"||",fx_op); *fx_op=(char) ExponentialNotation; (void) SubstituteString(&fx_info->expression,"**",fx_op); return(fx_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A d d N o i s e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AddNoiseImage() adds random noise to the image. % % The format of the AddNoiseImage method is: % % Image *AddNoiseImage(const Image *image,const NoiseType noise_type, % ExceptionInfo *exception) % Image *AddNoiseImageChannel(const Image *image,const ChannelType channel, % const NoiseType noise_type,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel type. % % o noise_type: The type of noise: Uniform, Gaussian, Multiplicative, % Impulse, Laplacian, or Poisson. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AddNoiseImage(const Image *image,const NoiseType noise_type, ExceptionInfo *exception) { Image *noise_image; noise_image=AddNoiseImageChannel(image,DefaultChannels,noise_type,exception); return(noise_image); } MagickExport Image *AddNoiseImageChannel(const Image *image, const ChannelType channel,const NoiseType noise_type,ExceptionInfo *exception) { #define AddNoiseImageTag "AddNoise/Image" CacheView *image_view, *noise_view; const char *option; Image *noise_image; MagickBooleanType status; MagickOffsetType progress; MagickRealType attenuate; RandomInfo **restrict random_info; ssize_t y; /* Initialize noise image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); noise_image=CloneImage(image,0,0,MagickTrue,exception); if (noise_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(noise_image,DirectClass) == MagickFalse) { InheritException(exception,&noise_image->exception); noise_image=DestroyImage(noise_image); return((Image *) NULL); } /* Add noise in each row. */ attenuate=1.0; option=GetImageArtifact(image,"attenuate"); if (option != (char *) NULL) attenuate=StringToDouble(option); status=MagickTrue; progress=0; random_info=AcquireRandomInfoThreadSet(); image_view=AcquireCacheView(image); noise_view=AcquireCacheView(noise_image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(progress,status) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register const IndexPacket *restrict indexes; register const PixelPacket *restrict p; register IndexPacket *restrict noise_indexes; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=GetCacheViewAuthenticPixels(noise_view,0,y,noise_image->columns,1, exception); if ((p == (PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); noise_indexes=GetCacheViewAuthenticIndexQueue(noise_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) q->red=ClampToQuantum(GenerateDifferentialNoise(random_info[id], p->red,noise_type,attenuate)); if ((channel & GreenChannel) != 0) q->green=ClampToQuantum(GenerateDifferentialNoise(random_info[id], p->green,noise_type,attenuate)); if ((channel & BlueChannel) != 0) q->blue=ClampToQuantum(GenerateDifferentialNoise(random_info[id], p->blue,noise_type,attenuate)); if ((channel & OpacityChannel) != 0) q->opacity=ClampToQuantum(GenerateDifferentialNoise(random_info[id], p->opacity,noise_type,attenuate)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) noise_indexes[x]=(IndexPacket) ClampToQuantum(GenerateDifferentialNoise( random_info[id],indexes[x],noise_type,attenuate)); p++; q++; } sync=SyncCacheViewAuthenticPixels(noise_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_AddNoiseImage) #endif proceed=SetImageProgress(image,AddNoiseImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } noise_view=DestroyCacheView(noise_view); image_view=DestroyCacheView(image_view); random_info=DestroyRandomInfoThreadSet(random_info); if (status == MagickFalse) noise_image=DestroyImage(noise_image); return(noise_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % B l u e S h i f t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % BlueShiftImage() mutes the colors of the image to simulate a scene at % nighttime in the moonlight. % % The format of the BlueShiftImage method is: % % Image *BlueShiftImage(const Image *image,const double factor, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o factor: the shift factor. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *BlueShiftImage(const Image *image,const double factor, ExceptionInfo *exception) { #define BlueShiftImageTag "BlueShift/Image" CacheView *image_view, *shift_view; Image *shift_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; /* Allocate blue shift image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); shift_image=CloneImage(image,image->columns,image->rows,MagickTrue, exception); if (shift_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(shift_image,DirectClass) == MagickFalse) { InheritException(exception,&shift_image->exception); shift_image=DestroyImage(shift_image); return((Image *) NULL); } /* Blue-shift DirectClass image. */ status=MagickTrue; progress=0; image_view=AcquireCacheView(image); shift_view=AcquireCacheView(shift_image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(progress,status) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; MagickPixelPacket pixel; Quantum quantum; register const PixelPacket *restrict p; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(shift_view,0,y,shift_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { quantum=GetRedPixelComponent(p); if (p->green < quantum) quantum=GetGreenPixelComponent(p); if (p->blue < quantum) quantum=GetBluePixelComponent(p); pixel.red=0.5*(p->red+factor*quantum); pixel.green=0.5*(p->green+factor*quantum); pixel.blue=0.5*(p->blue+factor*quantum); quantum=GetRedPixelComponent(p); if (p->green > quantum) quantum=GetGreenPixelComponent(p); if (p->blue > quantum) quantum=GetBluePixelComponent(p); pixel.red=0.5*(pixel.red+factor*quantum); pixel.green=0.5*(pixel.green+factor*quantum); pixel.blue=0.5*(pixel.blue+factor*quantum); SetRedPixelComponent(q,ClampRedPixelComponent(&pixel)); SetGreenPixelComponent(q,ClampGreenPixelComponent(&pixel)); SetBluePixelComponent(q,ClampBluePixelComponent(&pixel)); p++; q++; } sync=SyncCacheViewAuthenticPixels(shift_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_BlueShiftImage) #endif proceed=SetImageProgress(image,BlueShiftImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); shift_view=DestroyCacheView(shift_view); if (status == MagickFalse) shift_image=DestroyImage(shift_image); return(shift_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C h a r c o a l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CharcoalImage() creates a new image that is a copy of an existing one with % the edge highlighted. It allocates the memory necessary for the new Image % structure and returns a pointer to the new image. % % The format of the CharcoalImage method is: % % Image *CharcoalImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the pixel neighborhood. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *CharcoalImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { Image *charcoal_image, *clone_image, *edge_image; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); clone_image=CloneImage(image,0,0,MagickTrue,exception); if (clone_image == (Image *) NULL) return((Image *) NULL); (void) SetImageType(clone_image,GrayscaleType); edge_image=EdgeImage(clone_image,radius,exception); clone_image=DestroyImage(clone_image); if (edge_image == (Image *) NULL) return((Image *) NULL); charcoal_image=BlurImage(edge_image,radius,sigma,exception); edge_image=DestroyImage(edge_image); if (charcoal_image == (Image *) NULL) return((Image *) NULL); (void) NormalizeImage(charcoal_image); (void) NegateImage(charcoal_image,MagickFalse); (void) SetImageType(charcoal_image,GrayscaleType); return(charcoal_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o l o r i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ColorizeImage() blends the fill color with each pixel in the image. % A percentage blend is specified with opacity. Control the application % of different color components by specifying a different percentage for % each component (e.g. 90/100/10 is 90% red, 100% green, and 10% blue). % % The format of the ColorizeImage method is: % % Image *ColorizeImage(const Image *image,const char *opacity, % const PixelPacket colorize,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o opacity: A character string indicating the level of opacity as a % percentage. % % o colorize: A color value. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ColorizeImage(const Image *image,const char *opacity, const PixelPacket colorize,ExceptionInfo *exception) { #define ColorizeImageTag "Colorize/Image" CacheView *colorize_view, *image_view; GeometryInfo geometry_info; Image *colorize_image; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket pixel; MagickStatusType flags; ssize_t y; /* Allocate colorized image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); colorize_image=CloneImage(image,image->columns,image->rows,MagickTrue, exception); if (colorize_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(colorize_image,DirectClass) == MagickFalse) { InheritException(exception,&colorize_image->exception); colorize_image=DestroyImage(colorize_image); return((Image *) NULL); } if (opacity == (const char *) NULL) return(colorize_image); /* Determine RGB values of the pen color. */ flags=ParseGeometry(opacity,&geometry_info); pixel.red=geometry_info.rho; pixel.green=geometry_info.rho; pixel.blue=geometry_info.rho; pixel.opacity=(MagickRealType) OpaqueOpacity; if ((flags & SigmaValue) != 0) pixel.green=geometry_info.sigma; if ((flags & XiValue) != 0) pixel.blue=geometry_info.xi; if ((flags & PsiValue) != 0) pixel.opacity=geometry_info.psi; /* Colorize DirectClass image. */ status=MagickTrue; progress=0; image_view=AcquireCacheView(image); colorize_view=AcquireCacheView(colorize_image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(progress,status) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register const PixelPacket *restrict p; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(colorize_view,0,y,colorize_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { q->red=(Quantum) ((p->red*(100.0-pixel.red)+ colorize.red*pixel.red)/100.0); q->green=(Quantum) ((p->green*(100.0-pixel.green)+ colorize.green*pixel.green)/100.0); q->blue=(Quantum) ((p->blue*(100.0-pixel.blue)+ colorize.blue*pixel.blue)/100.0); q->opacity=(Quantum) ((p->opacity*(100.0-pixel.opacity)+ colorize.opacity*pixel.opacity)/100.0); p++; q++; } sync=SyncCacheViewAuthenticPixels(colorize_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ColorizeImage) #endif proceed=SetImageProgress(image,ColorizeImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); colorize_view=DestroyCacheView(colorize_view); if (status == MagickFalse) colorize_image=DestroyImage(colorize_image); return(colorize_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o l o r M a t r i x I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ColorMatrixImage() applies color transformation to an image. This method % permits saturation changes, hue rotation, luminance to alpha, and various % other effects. Although variable-sized transformation matrices can be used, % typically one uses a 5x5 matrix for an RGBA image and a 6x6 for CMYKA % (or RGBA with offsets). The matrix is similar to those used by Adobe Flash % except offsets are in column 6 rather than 5 (in support of CMYKA images) % and offsets are normalized (divide Flash offset by 255). % % The format of the ColorMatrixImage method is: % % Image *ColorMatrixImage(const Image *image, % const KernelInfo *color_matrix,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o color_matrix: the color matrix. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ColorMatrixImage(const Image *image, const KernelInfo *color_matrix,ExceptionInfo *exception) { #define ColorMatrixImageTag "ColorMatrix/Image" CacheView *color_view, *image_view; double ColorMatrix[6][6] = { { 1.0, 0.0, 0.0, 0.0, 0.0, 0.0 }, { 0.0, 1.0, 0.0, 0.0, 0.0, 0.0 }, { 0.0, 0.0, 1.0, 0.0, 0.0, 0.0 }, { 0.0, 0.0, 0.0, 1.0, 0.0, 0.0 }, { 0.0, 0.0, 0.0, 0.0, 1.0, 0.0 }, { 0.0, 0.0, 0.0, 0.0, 0.0, 1.0 } }; Image *color_image; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; ssize_t u, v, y; /* Create color matrix. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); i=0; for (v=0; v < (ssize_t) color_matrix->height; v++) for (u=0; u < (ssize_t) color_matrix->width; u++) { if ((v < 6) && (u < 6)) ColorMatrix[v][u]=color_matrix->values[i]; i++; } /* Initialize color image. */ color_image=CloneImage(image,0,0,MagickTrue,exception); if (color_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(color_image,DirectClass) == MagickFalse) { InheritException(exception,&color_image->exception); color_image=DestroyImage(color_image); return((Image *) NULL); } if (image->debug != MagickFalse) { char format[MaxTextExtent], *message; (void) LogMagickEvent(TransformEvent,GetMagickModule(), " ColorMatrix image with color matrix:"); message=AcquireString(""); for (v=0; v < 6; v++) { *message='\0'; (void) FormatMagickString(format,MaxTextExtent,"%.20g: ",(double) v); (void) ConcatenateString(&message,format); for (u=0; u < 6; u++) { (void) FormatMagickString(format,MaxTextExtent,"%+f ", ColorMatrix[v][u]); (void) ConcatenateString(&message,format); } (void) LogMagickEvent(TransformEvent,GetMagickModule(),"%s",message); } message=DestroyString(message); } /* ColorMatrix image. */ status=MagickTrue; progress=0; image_view=AcquireCacheView(image); color_view=AcquireCacheView(color_image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(progress,status) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickRealType pixel; register const IndexPacket *restrict indexes; register const PixelPacket *restrict p; register ssize_t x; register IndexPacket *restrict color_indexes; register PixelPacket *restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=GetCacheViewAuthenticPixels(color_view,0,y,color_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); color_indexes=GetCacheViewAuthenticIndexQueue(color_view); for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t v; size_t height; height=color_matrix->height > 6 ? 6UL : color_matrix->height; for (v=0; v < (ssize_t) height; v++) { pixel=ColorMatrix[v][0]*p->red+ColorMatrix[v][1]*p->green+ ColorMatrix[v][2]*p->blue; if (image->matte != MagickFalse) pixel+=ColorMatrix[v][3]*(QuantumRange-p->opacity); if (image->colorspace == CMYKColorspace) pixel+=ColorMatrix[v][4]*indexes[x]; pixel+=QuantumRange*ColorMatrix[v][5]; switch (v) { case 0: q->red=ClampToQuantum(pixel); break; case 1: q->green=ClampToQuantum(pixel); break; case 2: q->blue=ClampToQuantum(pixel); break; case 3: { if (image->matte != MagickFalse) q->opacity=ClampToQuantum(QuantumRange-pixel); break; } case 4: { if (image->colorspace == CMYKColorspace) color_indexes[x]=ClampToQuantum(pixel); break; } } } p++; q++; } if (SyncCacheViewAuthenticPixels(color_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ColorMatrixImage) #endif proceed=SetImageProgress(image,ColorMatrixImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } color_view=DestroyCacheView(color_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) color_image=DestroyImage(color_image); return(color_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y F x I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyFxInfo() deallocates memory associated with an FxInfo structure. % % The format of the DestroyFxInfo method is: % % ImageInfo *DestroyFxInfo(ImageInfo *fx_info) % % A description of each parameter follows: % % o fx_info: the fx info. % */ MagickExport FxInfo *DestroyFxInfo(FxInfo *fx_info) { register ssize_t i; fx_info->exception=DestroyExceptionInfo(fx_info->exception); fx_info->expression=DestroyString(fx_info->expression); fx_info->symbols=DestroySplayTree(fx_info->symbols); fx_info->colors=DestroySplayTree(fx_info->colors); for (i=(ssize_t) GetImageListLength(fx_info->images)-1; i >= 0; i--) fx_info->view[i]=DestroyCacheView(fx_info->view[i]); fx_info->view=(CacheView **) RelinquishMagickMemory(fx_info->view); fx_info->random_info=DestroyRandomInfo(fx_info->random_info); fx_info=(FxInfo *) RelinquishMagickMemory(fx_info); return(fx_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + F x E v a l u a t e C h a n n e l E x p r e s s i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FxEvaluateChannelExpression() evaluates an expression and returns the % results. % % The format of the FxEvaluateExpression method is: % % MagickRealType FxEvaluateChannelExpression(FxInfo *fx_info, % const ChannelType channel,const ssize_t x,const ssize_t y, % MagickRealType *alpha,Exceptioninfo *exception) % MagickRealType FxEvaluateExpression(FxInfo *fx_info, % MagickRealType *alpha,Exceptioninfo *exception) % % A description of each parameter follows: % % o fx_info: the fx info. % % o channel: the channel. % % o x,y: the pixel position. % % o alpha: the result. % % o exception: return any errors or warnings in this structure. % */ static inline double MagickMax(const double x,const double y) { if (x > y) return(x); return(y); } static inline double MagickMin(const double x,const double y) { if (x < y) return(x); return(y); } static MagickRealType FxChannelStatistics(FxInfo *fx_info,const Image *image, ChannelType channel,const char *symbol,ExceptionInfo *exception) { char key[MaxTextExtent], statistic[MaxTextExtent]; const char *value; register const char *p; for (p=symbol; (*p != '.') && (*p != '\0'); p++) ; if (*p == '.') switch (*++p) /* e.g. depth.r */ { case 'r': channel=RedChannel; break; case 'g': channel=GreenChannel; break; case 'b': channel=BlueChannel; break; case 'c': channel=CyanChannel; break; case 'm': channel=MagentaChannel; break; case 'y': channel=YellowChannel; break; case 'k': channel=BlackChannel; break; default: break; } (void) FormatMagickString(key,MaxTextExtent,"%p.%.20g.%s",(void *) image, (double) channel,symbol); value=(const char *) GetValueFromSplayTree(fx_info->symbols,key); if (value != (const char *) NULL) return(QuantumScale*StringToDouble(value)); (void) DeleteNodeFromSplayTree(fx_info->symbols,key); if (LocaleNCompare(symbol,"depth",5) == 0) { size_t depth; depth=GetImageChannelDepth(image,channel,exception); (void) FormatMagickString(statistic,MaxTextExtent,"%.20g",(double) depth); } if (LocaleNCompare(symbol,"kurtosis",8) == 0) { double kurtosis, skewness; (void) GetImageChannelKurtosis(image,channel,&kurtosis,&skewness, exception); (void) FormatMagickString(statistic,MaxTextExtent,"%g",kurtosis); } if (LocaleNCompare(symbol,"maxima",6) == 0) { double maxima, minima; (void) GetImageChannelRange(image,channel,&minima,&maxima,exception); (void) FormatMagickString(statistic,MaxTextExtent,"%g",maxima); } if (LocaleNCompare(symbol,"mean",4) == 0) { double mean, standard_deviation; (void) GetImageChannelMean(image,channel,&mean,&standard_deviation, exception); (void) FormatMagickString(statistic,MaxTextExtent,"%g",mean); } if (LocaleNCompare(symbol,"minima",6) == 0) { double maxima, minima; (void) GetImageChannelRange(image,channel,&minima,&maxima,exception); (void) FormatMagickString(statistic,MaxTextExtent,"%g",minima); } if (LocaleNCompare(symbol,"skewness",8) == 0) { double kurtosis, skewness; (void) GetImageChannelKurtosis(image,channel,&kurtosis,&skewness, exception); (void) FormatMagickString(statistic,MaxTextExtent,"%g",skewness); } if (LocaleNCompare(symbol,"standard_deviation",18) == 0) { double mean, standard_deviation; (void) GetImageChannelMean(image,channel,&mean,&standard_deviation, exception); (void) FormatMagickString(statistic,MaxTextExtent,"%g", standard_deviation); } (void) AddValueToSplayTree(fx_info->symbols,ConstantString(key), ConstantString(statistic)); return(QuantumScale*StringToDouble(statistic)); } static MagickRealType FxEvaluateSubexpression(FxInfo *,const ChannelType,const ssize_t, const ssize_t,const char *,MagickRealType *,ExceptionInfo *); static inline MagickRealType FxMax(FxInfo *fx_info,const ChannelType channel, const ssize_t x,const ssize_t y,const char *expression, ExceptionInfo *exception) { MagickRealType alpha, beta; alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression,&beta,exception); return((MagickRealType) MagickMax((double) alpha,(double) beta)); } static inline MagickRealType FxMin(FxInfo *fx_info,ChannelType channel, const ssize_t x,const ssize_t y,const char *expression, ExceptionInfo *exception) { MagickRealType alpha, beta; alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression,&beta,exception); return((MagickRealType) MagickMin((double) alpha,(double) beta)); } static inline const char *FxSubexpression(const char *expression, ExceptionInfo *exception) { const char *subexpression; register ssize_t level; level=0; subexpression=expression; while ((*subexpression != '\0') && ((level != 1) || (strchr(")",(int) *subexpression) == (char *) NULL))) { if (strchr("(",(int) *subexpression) != (char *) NULL) level++; else if (strchr(")",(int) *subexpression) != (char *) NULL) level--; subexpression++; } if (*subexpression == '\0') (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "UnbalancedParenthesis","`%s'",expression); return(subexpression); } static MagickRealType FxGetSymbol(FxInfo *fx_info,const ChannelType channel, const ssize_t x,const ssize_t y,const char *expression, ExceptionInfo *exception) { char *q, subexpression[MaxTextExtent], symbol[MaxTextExtent]; const char *p, *value; Image *image; InterpolatePixelMethod interpolate_method; MagickPixelPacket pixel; MagickRealType alpha, beta; PointInfo point; register ssize_t i; size_t length; size_t level; p=expression; i=GetImageIndexInList(fx_info->images); level=0; point.x=(double) x; point.y=(double) y; if (isalpha((int) *(p+1)) == 0) { if (strchr("suv",(int) *p) != (char *) NULL) { switch (*p) { case 's': default: { i=GetImageIndexInList(fx_info->images); break; } case 'u': i=0; break; case 'v': i=1; break; } p++; if (*p == '[') { level++; q=subexpression; for (p++; *p != '\0'; ) { if (*p == '[') level++; else if (*p == ']') { level--; if (level == 0) break; } *q++=(*p++); } *q='\0'; alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression, &beta,exception); i=(ssize_t) (alpha+0.5); p++; } if (*p == '.') p++; } if ((isalpha((int) *(p+1)) == 0) && (*p == 'p')) { p++; if (*p == '{') { level++; q=subexpression; for (p++; *p != '\0'; ) { if (*p == '{') level++; else if (*p == '}') { level--; if (level == 0) break; } *q++=(*p++); } *q='\0'; alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression, &beta,exception); point.x=alpha; point.y=beta; p++; } else if (*p == '[') { level++; q=subexpression; for (p++; *p != '\0'; ) { if (*p == '[') level++; else if (*p == ']') { level--; if (level == 0) break; } *q++=(*p++); } *q='\0'; alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression, &beta,exception); point.x+=alpha; point.y+=beta; p++; } if (*p == '.') p++; } } length=GetImageListLength(fx_info->images); while (i < 0) i+=(ssize_t) length; i%=length; image=GetImageFromList(fx_info->images,i); if (image == (Image *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "NoSuchImage","`%s'",expression); return(0.0); } interpolate_method=image->interpolate == UndefinedInterpolatePixel ? NearestNeighborInterpolatePixel : image->interpolate; (void) InterpolateMagickPixelPacket(image,fx_info->view[i],interpolate_method, point.x,point.y,&pixel,exception); if ((strlen(p) > 2) && (LocaleCompare(p,"intensity") != 0) && (LocaleCompare(p,"luminance") != 0) && (LocaleCompare(p,"hue") != 0) && (LocaleCompare(p,"saturation") != 0) && (LocaleCompare(p,"lightness") != 0)) { char name[MaxTextExtent]; (void) CopyMagickString(name,p,MaxTextExtent); for (q=name+(strlen(name)-1); q > name; q--) { if (*q == ')') break; if (*q == '.') { *q='\0'; break; } } if ((strlen(name) > 2) && (GetValueFromSplayTree(fx_info->symbols,name) == (const char *) NULL)) { MagickPixelPacket *color; color=(MagickPixelPacket *) GetValueFromSplayTree(fx_info->colors, name); if (color != (MagickPixelPacket *) NULL) { pixel=(*color); p+=strlen(name); } else if (QueryMagickColor(name,&pixel,fx_info->exception) != MagickFalse) { (void) AddValueToSplayTree(fx_info->colors,ConstantString(name), CloneMagickPixelPacket(&pixel)); p+=strlen(name); } } } (void) CopyMagickString(symbol,p,MaxTextExtent); StripString(symbol); if (*symbol == '\0') { switch (channel) { case RedChannel: return(QuantumScale*pixel.red); case GreenChannel: return(QuantumScale*pixel.green); case BlueChannel: return(QuantumScale*pixel.blue); case OpacityChannel: { MagickRealType alpha; if (pixel.matte == MagickFalse) return(1.0); alpha=(MagickRealType) (QuantumScale*GetAlphaPixelComponent(&pixel)); return(alpha); } case IndexChannel: { if (image->colorspace != CMYKColorspace) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"ColorSeparatedImageRequired","`%s'", image->filename); return(0.0); } return(QuantumScale*pixel.index); } case DefaultChannels: { return(QuantumScale*MagickPixelIntensityToQuantum(&pixel)); } default: break; } (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "UnableToParseExpression","`%s'",p); return(0.0); } switch (*symbol) { case 'A': case 'a': { if (LocaleCompare(symbol,"a") == 0) return((MagickRealType) (QuantumScale*GetAlphaPixelComponent(&pixel))); break; } case 'B': case 'b': { if (LocaleCompare(symbol,"b") == 0) return(QuantumScale*pixel.blue); break; } case 'C': case 'c': { if (LocaleNCompare(symbol,"channel",7) == 0) { GeometryInfo channel_info; MagickStatusType flags; flags=ParseGeometry(symbol+7,&channel_info); if (image->colorspace == CMYKColorspace) switch (channel) { case CyanChannel: { if ((flags & RhoValue) == 0) return(0.0); return(channel_info.rho); } case MagentaChannel: { if ((flags & SigmaValue) == 0) return(0.0); return(channel_info.sigma); } case YellowChannel: { if ((flags & XiValue) == 0) return(0.0); return(channel_info.xi); } case BlackChannel: { if ((flags & PsiValue) == 0) return(0.0); return(channel_info.psi); } case OpacityChannel: { if ((flags & ChiValue) == 0) return(0.0); return(channel_info.chi); } default: return(0.0); } switch (channel) { case RedChannel: { if ((flags & RhoValue) == 0) return(0.0); return(channel_info.rho); } case GreenChannel: { if ((flags & SigmaValue) == 0) return(0.0); return(channel_info.sigma); } case BlueChannel: { if ((flags & XiValue) == 0) return(0.0); return(channel_info.xi); } case OpacityChannel: { if ((flags & PsiValue) == 0) return(0.0); return(channel_info.psi); } case IndexChannel: { if ((flags & ChiValue) == 0) return(0.0); return(channel_info.chi); } default: return(0.0); } return(0.0); } if (LocaleCompare(symbol,"c") == 0) return(QuantumScale*pixel.red); break; } case 'D': case 'd': { if (LocaleNCompare(symbol,"depth",5) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); break; } case 'G': case 'g': { if (LocaleCompare(symbol,"g") == 0) return(QuantumScale*pixel.green); break; } case 'K': case 'k': { if (LocaleNCompare(symbol,"kurtosis",8) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); if (LocaleCompare(symbol,"k") == 0) { if (image->colorspace != CMYKColorspace) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"ColorSeparatedImageRequired","`%s'", image->filename); return(0.0); } return(QuantumScale*pixel.index); } break; } case 'H': case 'h': { if (LocaleCompare(symbol,"h") == 0) return((MagickRealType) image->rows); if (LocaleCompare(symbol,"hue") == 0) { double hue, lightness, saturation; ConvertRGBToHSL(ClampToQuantum(pixel.red),ClampToQuantum(pixel.green), ClampToQuantum(pixel.blue),&hue,&saturation,&lightness); return(hue); } break; } case 'I': case 'i': { if ((LocaleCompare(symbol,"image.depth") == 0) || (LocaleCompare(symbol,"image.minima") == 0) || (LocaleCompare(symbol,"image.maxima") == 0) || (LocaleCompare(symbol,"image.mean") == 0) || (LocaleCompare(symbol,"image.kurtosis") == 0) || (LocaleCompare(symbol,"image.skewness") == 0) || (LocaleCompare(symbol,"image.standard_deviation") == 0)) return(FxChannelStatistics(fx_info,image,channel,symbol+6,exception)); if (LocaleCompare(symbol,"image.resolution.x") == 0) return(image->x_resolution); if (LocaleCompare(symbol,"image.resolution.y") == 0) return(image->y_resolution); if (LocaleCompare(symbol,"intensity") == 0) return(QuantumScale*MagickPixelIntensityToQuantum(&pixel)); if (LocaleCompare(symbol,"i") == 0) return((MagickRealType) x); break; } case 'J': case 'j': { if (LocaleCompare(symbol,"j") == 0) return((MagickRealType) y); break; } case 'L': case 'l': { if (LocaleCompare(symbol,"lightness") == 0) { double hue, lightness, saturation; ConvertRGBToHSL(ClampToQuantum(pixel.red),ClampToQuantum(pixel.green), ClampToQuantum(pixel.blue),&hue,&saturation,&lightness); return(lightness); } if (LocaleCompare(symbol,"luminance") == 0) { double luminence; luminence=0.2126*pixel.red+0.7152*pixel.green+0.0722*pixel.blue; return(QuantumScale*luminence); } break; } case 'M': case 'm': { if (LocaleNCompare(symbol,"maxima",6) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); if (LocaleNCompare(symbol,"mean",4) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); if (LocaleNCompare(symbol,"minima",6) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); if (LocaleCompare(symbol,"m") == 0) return(QuantumScale*pixel.blue); break; } case 'N': case 'n': { if (LocaleCompare(symbol,"n") == 0) return((MagickRealType) GetImageListLength(fx_info->images)); break; } case 'O': case 'o': { if (LocaleCompare(symbol,"o") == 0) return(QuantumScale*pixel.opacity); break; } case 'P': case 'p': { if (LocaleCompare(symbol,"page.height") == 0) return((MagickRealType) image->page.height); if (LocaleCompare(symbol,"page.width") == 0) return((MagickRealType) image->page.width); if (LocaleCompare(symbol,"page.x") == 0) return((MagickRealType) image->page.x); if (LocaleCompare(symbol,"page.y") == 0) return((MagickRealType) image->page.y); break; } case 'R': case 'r': { if (LocaleCompare(symbol,"resolution.x") == 0) return(image->x_resolution); if (LocaleCompare(symbol,"resolution.y") == 0) return(image->y_resolution); if (LocaleCompare(symbol,"r") == 0) return(QuantumScale*pixel.red); break; } case 'S': case 's': { if (LocaleCompare(symbol,"saturation") == 0) { double hue, lightness, saturation; ConvertRGBToHSL(ClampToQuantum(pixel.red),ClampToQuantum(pixel.green), ClampToQuantum(pixel.blue),&hue,&saturation,&lightness); return(saturation); } if (LocaleNCompare(symbol,"skewness",8) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); if (LocaleNCompare(symbol,"standard_deviation",18) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); break; } case 'T': case 't': { if (LocaleCompare(symbol,"t") == 0) return((MagickRealType) GetImageIndexInList(fx_info->images)); break; } case 'W': case 'w': { if (LocaleCompare(symbol,"w") == 0) return((MagickRealType) image->columns); break; } case 'Y': case 'y': { if (LocaleCompare(symbol,"y") == 0) return(QuantumScale*pixel.green); break; } case 'Z': case 'z': { if (LocaleCompare(symbol,"z") == 0) { MagickRealType depth; depth=(MagickRealType) GetImageChannelDepth(image,channel, fx_info->exception); return(depth); } break; } default: break; } value=(const char *) GetValueFromSplayTree(fx_info->symbols,symbol); if (value != (const char *) NULL) return((MagickRealType) StringToDouble(value)); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "UnableToParseExpression","`%s'",symbol); return(0.0); } static const char *FxOperatorPrecedence(const char *expression, ExceptionInfo *exception) { typedef enum { UndefinedPrecedence, NullPrecedence, BitwiseComplementPrecedence, ExponentPrecedence, ExponentialNotationPrecedence, MultiplyPrecedence, AdditionPrecedence, ShiftPrecedence, RelationalPrecedence, EquivalencyPrecedence, BitwiseAndPrecedence, BitwiseOrPrecedence, LogicalAndPrecedence, LogicalOrPrecedence, TernaryPrecedence, AssignmentPrecedence, CommaPrecedence, SeparatorPrecedence } FxPrecedence; FxPrecedence precedence, target; register const char *subexpression; register int c; size_t level; c=0; level=0; subexpression=(const char *) NULL; target=NullPrecedence; while (*expression != '\0') { precedence=UndefinedPrecedence; if ((isspace((int) ((char) *expression)) != 0) || (c == (int) '@')) { expression++; continue; } switch (*expression) { case 'A': case 'a': { if (LocaleNCompare(expression,"atan2",5) == 0) { expression+=5; break; } break; } case 'J': case 'j': { if ((LocaleNCompare(expression,"j0",2) == 0) || (LocaleNCompare(expression,"j1",2) == 0)) { expression+=2; break; } break; } case '#': { while (isxdigit((int) ((unsigned char) *(expression+1))) != 0) expression++; break; } default: break; } if ((c == (int) '{') || (c == (int) '[')) level++; else if ((c == (int) '}') || (c == (int) ']')) level--; if (level == 0) switch ((unsigned char) *expression) { case '~': case '!': { precedence=BitwiseComplementPrecedence; break; } case '^': case '@': { precedence=ExponentPrecedence; break; } default: { if (((c != 0) && ((isdigit((int) ((char) c)) != 0) || (strchr(")",c) != (char *) NULL))) && (((islower((int) ((char) *expression)) != 0) || (strchr("(",(int) *expression) != (char *) NULL)) || ((isdigit((int) ((char) c)) == 0) && (isdigit((int) ((char) *expression)) != 0))) && (strchr("xy",(int) *expression) == (char *) NULL)) precedence=MultiplyPrecedence; break; } case '*': case '/': case '%': { precedence=MultiplyPrecedence; break; } case '+': case '-': { if ((strchr("(+-/*%:&^|<>~,",c) == (char *) NULL) || (isalpha(c) != 0)) precedence=AdditionPrecedence; break; } case LeftShiftOperator: case RightShiftOperator: { precedence=ShiftPrecedence; break; } case '<': case LessThanEqualOperator: case GreaterThanEqualOperator: case '>': { precedence=RelationalPrecedence; break; } case EqualOperator: case NotEqualOperator: { precedence=EquivalencyPrecedence; break; } case '&': { precedence=BitwiseAndPrecedence; break; } case '|': { precedence=BitwiseOrPrecedence; break; } case LogicalAndOperator: { precedence=LogicalAndPrecedence; break; } case LogicalOrOperator: { precedence=LogicalOrPrecedence; break; } case ExponentialNotation: { precedence=ExponentialNotationPrecedence; break; } case ':': case '?': { precedence=TernaryPrecedence; break; } case '=': { precedence=AssignmentPrecedence; break; } case ',': { precedence=CommaPrecedence; break; } case ';': { precedence=SeparatorPrecedence; break; } } if ((precedence == BitwiseComplementPrecedence) || (precedence == TernaryPrecedence) || (precedence == AssignmentPrecedence)) { if (precedence > target) { /* Right-to-left associativity. */ target=precedence; subexpression=expression; } } else if (precedence >= target) { /* Left-to-right associativity. */ target=precedence; subexpression=expression; } if (strchr("(",(int) *expression) != (char *) NULL) expression=FxSubexpression(expression,exception); c=(int) (*expression++); } return(subexpression); } static MagickRealType FxEvaluateSubexpression(FxInfo *fx_info, const ChannelType channel,const ssize_t x,const ssize_t y, const char *expression,MagickRealType *beta,ExceptionInfo *exception) { char *q, subexpression[MaxTextExtent]; MagickRealType alpha, gamma; register const char *p; *beta=0.0; if (exception->severity != UndefinedException) return(0.0); while (isspace((int) *expression) != 0) expression++; if (*expression == '\0') { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "MissingExpression","`%s'",expression); return(0.0); } *subexpression='\0'; p=FxOperatorPrecedence(expression,exception); if (p != (const char *) NULL) { (void) CopyMagickString(subexpression,expression,(size_t) (p-expression+1)); alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,beta, exception); switch ((unsigned char) *p) { case '~': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception); *beta=(MagickRealType) (~(size_t) *beta); return(*beta); } case '!': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception); return(*beta == 0.0 ? 1.0 : 0.0); } case '^': { *beta=pow((double) alpha,(double) FxEvaluateSubexpression(fx_info, channel,x,y,++p,beta,exception)); return(*beta); } case '*': case ExponentialNotation: { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception); return(alpha*(*beta)); } case '/': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception); if (*beta == 0.0) { if (exception->severity == UndefinedException) (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"DivideByZero","`%s'",expression); return(0.0); } return(alpha/(*beta)); } case '%': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception); *beta=fabs(floor(((double) *beta)+0.5)); if (*beta == 0.0) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"DivideByZero","`%s'",expression); return(0.0); } return(fmod((double) alpha,(double) *beta)); } case '+': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception); return(alpha+(*beta)); } case '-': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception); return(alpha-(*beta)); } case LeftShiftOperator: { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception); *beta=(MagickRealType) ((size_t) (alpha+0.5) << (size_t) (gamma+0.5)); return(*beta); } case RightShiftOperator: { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception); *beta=(MagickRealType) ((size_t) (alpha+0.5) >> (size_t) (gamma+0.5)); return(*beta); } case '<': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception); return(alpha < *beta ? 1.0 : 0.0); } case LessThanEqualOperator: { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception); return(alpha <= *beta ? 1.0 : 0.0); } case '>': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception); return(alpha > *beta ? 1.0 : 0.0); } case GreaterThanEqualOperator: { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception); return(alpha >= *beta ? 1.0 : 0.0); } case EqualOperator: { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception); return(fabs(alpha-(*beta)) <= MagickEpsilon ? 1.0 : 0.0); } case NotEqualOperator: { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception); return(fabs(alpha-(*beta)) > MagickEpsilon ? 1.0 : 0.0); } case '&': { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception); *beta=(MagickRealType) ((size_t) (alpha+0.5) & (size_t) (gamma+0.5)); return(*beta); } case '|': { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception); *beta=(MagickRealType) ((size_t) (alpha+0.5) | (size_t) (gamma+0.5)); return(*beta); } case LogicalAndOperator: { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception); *beta=(alpha > 0.0) && (gamma > 0.0) ? 1.0 : 0.0; return(*beta); } case LogicalOrOperator: { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception); *beta=(alpha > 0.0) || (gamma > 0.0) ? 1.0 : 0.0; return(*beta); } case '?': { MagickRealType gamma; (void) CopyMagickString(subexpression,++p,MaxTextExtent); q=subexpression; p=StringToken(":",&q); if (q == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnableToParseExpression","`%s'",subexpression); return(0.0); } if (fabs((double) alpha) > MagickEpsilon) gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,beta,exception); else gamma=FxEvaluateSubexpression(fx_info,channel,x,y,q,beta,exception); return(gamma); } case '=': { char numeric[MaxTextExtent]; q=subexpression; while (isalpha((int) ((unsigned char) *q)) != 0) q++; if (*q != '\0') { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnableToParseExpression","`%s'",subexpression); return(0.0); } ClearMagickException(exception); *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception); (void) FormatMagickString(numeric,MaxTextExtent,"%g",(double) *beta); (void) DeleteNodeFromSplayTree(fx_info->symbols,subexpression); (void) AddValueToSplayTree(fx_info->symbols,ConstantString( subexpression),ConstantString(numeric)); return(*beta); } case ',': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception); return(alpha); } case ';': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,beta,exception); return(*beta); } default: { gamma=alpha*FxEvaluateSubexpression(fx_info,channel,x,y,p,beta, exception); return(gamma); } } } if (strchr("(",(int) *expression) != (char *) NULL) { (void) CopyMagickString(subexpression,expression+1,MaxTextExtent); subexpression[strlen(subexpression)-1]='\0'; gamma=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,beta, exception); return(gamma); } switch (*expression) { case '+': { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,expression+1,beta, exception); return(1.0*gamma); } case '-': { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,expression+1,beta, exception); return(-1.0*gamma); } case '~': { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,expression+1,beta, exception); return((MagickRealType) (~(size_t) (gamma+0.5))); } case 'A': case 'a': { if (LocaleNCompare(expression,"abs",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,beta, exception); return((MagickRealType) fabs((double) alpha)); } if (LocaleNCompare(expression,"acos",4) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,beta, exception); return((MagickRealType) acos((double) alpha)); } #if defined(MAGICKCORE_HAVE_J1) if (LocaleNCompare(expression,"airy",4) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,beta, exception); if (alpha == 0.0) return(1.0); gamma=2.0*j1((double) (MagickPI*alpha))/(MagickPI*alpha); return(gamma*gamma); } #endif if (LocaleNCompare(expression,"asin",4) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,beta, exception); return((MagickRealType) asin((double) alpha)); } if (LocaleNCompare(expression,"alt",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,beta, exception); return(((ssize_t) alpha) & 0x01 ? -1.0 : 1.0); } if (LocaleNCompare(expression,"atan2",5) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,beta, exception); return((MagickRealType) atan2((double) alpha,(double) *beta)); } if (LocaleNCompare(expression,"atan",4) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,beta, exception); return((MagickRealType) atan((double) alpha)); } if (LocaleCompare(expression,"a") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'B': case 'b': { if (LocaleCompare(expression,"b") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'C': case 'c': { if (LocaleNCompare(expression,"ceil",4) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,beta, exception); return((MagickRealType) ceil((double) alpha)); } if (LocaleNCompare(expression,"cosh",4) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,beta, exception); return((MagickRealType) cosh((double) alpha)); } if (LocaleNCompare(expression,"cos",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,beta, exception); return((MagickRealType) cos((double) alpha)); } if (LocaleCompare(expression,"c") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'D': case 'd': { if (LocaleNCompare(expression,"debug",5) == 0) { const char *type; alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,beta, exception); if (fx_info->images->colorspace == CMYKColorspace) switch (channel) { case CyanChannel: type="cyan"; break; case MagentaChannel: type="magenta"; break; case YellowChannel: type="yellow"; break; case OpacityChannel: type="opacity"; break; case BlackChannel: type="black"; break; default: type="unknown"; break; } else switch (channel) { case RedChannel: type="red"; break; case GreenChannel: type="green"; break; case BlueChannel: type="blue"; break; case OpacityChannel: type="opacity"; break; default: type="unknown"; break; } (void) CopyMagickString(subexpression,expression+6,MaxTextExtent); if (strlen(subexpression) > 1) subexpression[strlen(subexpression)-1]='\0'; if (fx_info->file != (FILE *) NULL) (void) fprintf(fx_info->file,"%s[%.20g,%.20g].%s: %s=%.*g\n", fx_info->images->filename,(double) x,(double) y,type, subexpression,GetMagickPrecision(),(double) alpha); return(0.0); } break; } case 'E': case 'e': { if (LocaleCompare(expression,"epsilon") == 0) return((MagickRealType) MagickEpsilon); if (LocaleNCompare(expression,"exp",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,beta, exception); return((MagickRealType) exp((double) alpha)); } if (LocaleCompare(expression,"e") == 0) return((MagickRealType) 2.7182818284590452354); break; } case 'F': case 'f': { if (LocaleNCompare(expression,"floor",5) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,beta, exception); return((MagickRealType) floor((double) alpha)); } break; } case 'G': case 'g': { if (LocaleCompare(expression,"g") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'H': case 'h': { if (LocaleCompare(expression,"h") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); if (LocaleCompare(expression,"hue") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); if (LocaleNCompare(expression,"hypot",5) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,beta, exception); return((MagickRealType) hypot((double) alpha,(double) *beta)); } break; } case 'K': case 'k': { if (LocaleCompare(expression,"k") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'I': case 'i': { if (LocaleCompare(expression,"intensity") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); if (LocaleNCompare(expression,"int",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,beta, exception); return((MagickRealType) floor(alpha)); } if (LocaleCompare(expression,"i") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'J': case 'j': { if (LocaleCompare(expression,"j") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); #if defined(MAGICKCORE_HAVE_J0) if (LocaleNCompare(expression,"j0",2) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+2,beta, exception); return((MagickRealType) j0((double) alpha)); } #endif #if defined(MAGICKCORE_HAVE_J1) if (LocaleNCompare(expression,"j1",2) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+2,beta, exception); return((MagickRealType) j1((double) alpha)); } #endif #if defined(MAGICKCORE_HAVE_J1) if (LocaleNCompare(expression,"jinc",4) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,beta, exception); if (alpha == 0.0) return(1.0); gamma=(MagickRealType) (2.0*j1((double) (MagickPI*alpha))/ (MagickPI*alpha)); return(gamma); } #endif break; } case 'L': case 'l': { if (LocaleNCompare(expression,"ln",2) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+2,beta, exception); return((MagickRealType) log((double) alpha)); } if (LocaleNCompare(expression,"logtwo",6) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+6,beta, exception); return((MagickRealType) log10((double) alpha))/log10(2.0); } if (LocaleNCompare(expression,"log",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,beta, exception); return((MagickRealType) log10((double) alpha)); } if (LocaleCompare(expression,"lightness") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'M': case 'm': { if (LocaleCompare(expression,"MaxRGB") == 0) return((MagickRealType) QuantumRange); if (LocaleNCompare(expression,"maxima",6) == 0) break; if (LocaleNCompare(expression,"max",3) == 0) return(FxMax(fx_info,channel,x,y,expression+3,exception)); if (LocaleNCompare(expression,"minima",6) == 0) break; if (LocaleNCompare(expression,"min",3) == 0) return(FxMin(fx_info,channel,x,y,expression+3,exception)); if (LocaleNCompare(expression,"mod",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,beta, exception); return((MagickRealType) fmod((double) alpha,(double) *beta)); } if (LocaleCompare(expression,"m") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'N': case 'n': { if (LocaleCompare(expression,"n") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'O': case 'o': { if (LocaleCompare(expression,"Opaque") == 0) return(1.0); if (LocaleCompare(expression,"o") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'P': case 'p': { if (LocaleCompare(expression,"pi") == 0) return((MagickRealType) MagickPI); if (LocaleNCompare(expression,"pow",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,beta, exception); return((MagickRealType) pow((double) alpha,(double) *beta)); } if (LocaleCompare(expression,"p") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'Q': case 'q': { if (LocaleCompare(expression,"QuantumRange") == 0) return((MagickRealType) QuantumRange); if (LocaleCompare(expression,"QuantumScale") == 0) return((MagickRealType) QuantumScale); break; } case 'R': case 'r': { if (LocaleNCompare(expression,"rand",4) == 0) return((MagickRealType) GetPseudoRandomValue(fx_info->random_info)); if (LocaleNCompare(expression,"round",5) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,beta, exception); return((MagickRealType) floor((double) alpha+0.5)); } if (LocaleCompare(expression,"r") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'S': case 's': { if (LocaleCompare(expression,"saturation") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); if (LocaleNCompare(expression,"sign",4) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,beta, exception); return(alpha < 0.0 ? -1.0 : 1.0); } if (LocaleNCompare(expression,"sinc",4) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,beta, exception); if (alpha == 0) return(1.0); gamma=(MagickRealType) (sin((double) (MagickPI*alpha))/ (MagickPI*alpha)); return(gamma); } if (LocaleNCompare(expression,"sinh",4) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,beta, exception); return((MagickRealType) sinh((double) alpha)); } if (LocaleNCompare(expression,"sin",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,beta, exception); return((MagickRealType) sin((double) alpha)); } if (LocaleNCompare(expression,"sqrt",4) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,beta, exception); return((MagickRealType) sqrt((double) alpha)); } if (LocaleCompare(expression,"s") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'T': case 't': { if (LocaleNCompare(expression,"tanh",4) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,beta, exception); return((MagickRealType) tanh((double) alpha)); } if (LocaleNCompare(expression,"tan",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,beta, exception); return((MagickRealType) tan((double) alpha)); } if (LocaleCompare(expression,"Transparent") == 0) return(0.0); if (LocaleNCompare(expression,"trunc",5) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,beta, exception); if (alpha >= 0.0) return((MagickRealType) floor((double) alpha)); return((MagickRealType) ceil((double) alpha)); } if (LocaleCompare(expression,"t") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'U': case 'u': { if (LocaleCompare(expression,"u") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'V': case 'v': { if (LocaleCompare(expression,"v") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'W': case 'w': { if (LocaleCompare(expression,"w") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'Y': case 'y': { if (LocaleCompare(expression,"y") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'Z': case 'z': { if (LocaleCompare(expression,"z") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } default: break; } q=(char *) expression; alpha=strtod(expression,&q); if (q == expression) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); return(alpha); } MagickExport MagickBooleanType FxEvaluateExpression(FxInfo *fx_info, MagickRealType *alpha,ExceptionInfo *exception) { MagickBooleanType status; status=FxEvaluateChannelExpression(fx_info,GrayChannel,0,0,alpha,exception); return(status); } MagickExport MagickBooleanType FxPreprocessExpression(FxInfo *fx_info, MagickRealType *alpha,ExceptionInfo *exception) { FILE *file; MagickBooleanType status; file=fx_info->file; fx_info->file=(FILE *) NULL; status=FxEvaluateChannelExpression(fx_info,GrayChannel,0,0,alpha,exception); fx_info->file=file; return(status); } MagickExport MagickBooleanType FxEvaluateChannelExpression(FxInfo *fx_info, const ChannelType channel,const ssize_t x,const ssize_t y, MagickRealType *alpha,ExceptionInfo *exception) { MagickRealType beta; beta=0.0; *alpha=FxEvaluateSubexpression(fx_info,channel,x,y,fx_info->expression,&beta, exception); return(exception->severity == OptionError ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % F x I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FxImage() applies a mathematical expression to the specified image. % % The format of the FxImage method is: % % Image *FxImage(const Image *image,const char *expression, % ExceptionInfo *exception) % Image *FxImageChannel(const Image *image,const ChannelType channel, % const char *expression,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o expression: A mathematical expression. % % o exception: return any errors or warnings in this structure. % */ static FxInfo **DestroyFxThreadSet(FxInfo **fx_info) { register ssize_t i; assert(fx_info != (FxInfo **) NULL); for (i=0; i < (ssize_t) GetOpenMPMaximumThreads(); i++) if (fx_info[i] != (FxInfo *) NULL) fx_info[i]=DestroyFxInfo(fx_info[i]); fx_info=(FxInfo **) RelinquishMagickMemory(fx_info); return(fx_info); } static FxInfo **AcquireFxThreadSet(const Image *image,const char *expression, ExceptionInfo *exception) { char *fx_expression; FxInfo **fx_info; MagickRealType alpha; register ssize_t i; size_t number_threads; number_threads=GetOpenMPMaximumThreads(); fx_info=(FxInfo **) AcquireQuantumMemory(number_threads,sizeof(*fx_info)); if (fx_info == (FxInfo **) NULL) return((FxInfo **) NULL); (void) ResetMagickMemory(fx_info,0,number_threads*sizeof(*fx_info)); if (*expression != '@') fx_expression=ConstantString(expression); else fx_expression=FileToString(expression+1,~0,exception); for (i=0; i < (ssize_t) number_threads; i++) { fx_info[i]=AcquireFxInfo(image,fx_expression); if (fx_info[i] == (FxInfo *) NULL) return(DestroyFxThreadSet(fx_info)); (void) FxPreprocessExpression(fx_info[i],&alpha,fx_info[i]->exception); } fx_expression=DestroyString(fx_expression); return(fx_info); } MagickExport Image *FxImage(const Image *image,const char *expression, ExceptionInfo *exception) { Image *fx_image; fx_image=FxImageChannel(image,GrayChannel,expression,exception); return(fx_image); } MagickExport Image *FxImageChannel(const Image *image,const ChannelType channel, const char *expression,ExceptionInfo *exception) { #define FxImageTag "Fx/Image" CacheView *fx_view; FxInfo **restrict fx_info; Image *fx_image; MagickBooleanType status; MagickOffsetType progress; MagickRealType alpha; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); fx_image=CloneImage(image,0,0,MagickTrue,exception); if (fx_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(fx_image,DirectClass) == MagickFalse) { InheritException(exception,&fx_image->exception); fx_image=DestroyImage(fx_image); return((Image *) NULL); } fx_info=AcquireFxThreadSet(image,expression,exception); if (fx_info == (FxInfo **) NULL) { fx_image=DestroyImage(fx_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } status=FxPreprocessExpression(fx_info[0],&alpha,exception); if (status == MagickFalse) { fx_image=DestroyImage(fx_image); fx_info=DestroyFxThreadSet(fx_info); return((Image *) NULL); } /* Fx image. */ status=MagickTrue; progress=0; fx_view=AcquireCacheView(fx_image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(progress,status) #endif for (y=0; y < (ssize_t) fx_image->rows; y++) { const int id = GetOpenMPThreadId(); MagickRealType alpha; register IndexPacket *restrict fx_indexes; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(fx_view,0,y,fx_image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } fx_indexes=GetCacheViewAuthenticIndexQueue(fx_view); alpha=0.0; for (x=0; x < (ssize_t) fx_image->columns; x++) { if ((channel & RedChannel) != 0) { (void) FxEvaluateChannelExpression(fx_info[id],RedChannel,x,y, &alpha,exception); q->red=ClampToQuantum((MagickRealType) QuantumRange*alpha); } if ((channel & GreenChannel) != 0) { (void) FxEvaluateChannelExpression(fx_info[id],GreenChannel,x,y, &alpha,exception); q->green=ClampToQuantum((MagickRealType) QuantumRange*alpha); } if ((channel & BlueChannel) != 0) { (void) FxEvaluateChannelExpression(fx_info[id],BlueChannel,x,y, &alpha,exception); q->blue=ClampToQuantum((MagickRealType) QuantumRange*alpha); } if ((channel & OpacityChannel) != 0) { (void) FxEvaluateChannelExpression(fx_info[id],OpacityChannel,x,y, &alpha,exception); if (image->matte == MagickFalse) q->opacity=ClampToQuantum((MagickRealType) QuantumRange*alpha); else q->opacity=ClampToQuantum((MagickRealType) (QuantumRange- QuantumRange*alpha)); } if (((channel & IndexChannel) != 0) && (fx_image->colorspace == CMYKColorspace)) { (void) FxEvaluateChannelExpression(fx_info[id],IndexChannel,x,y, &alpha,exception); fx_indexes[x]=(IndexPacket) ClampToQuantum((MagickRealType) QuantumRange*alpha); } q++; } if (SyncCacheViewAuthenticPixels(fx_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_FxImageChannel) #endif proceed=SetImageProgress(image,FxImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } fx_view=DestroyCacheView(fx_view); fx_info=DestroyFxThreadSet(fx_info); if (status == MagickFalse) fx_image=DestroyImage(fx_image); return(fx_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I m p l o d e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ImplodeImage() creates a new image that is a copy of an existing % one with the image pixels "implode" by the specified percentage. It % allocates the memory necessary for the new Image structure and returns a % pointer to the new image. % % The format of the ImplodeImage method is: % % Image *ImplodeImage(const Image *image,const double amount, % ExceptionInfo *exception) % % A description of each parameter follows: % % o implode_image: Method ImplodeImage returns a pointer to the image % after it is implode. A null image is returned if there is a memory % shortage. % % o image: the image. % % o amount: Define the extent of the implosion. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ImplodeImage(const Image *image,const double amount, ExceptionInfo *exception) { #define ImplodeImageTag "Implode/Image" CacheView *image_view, *implode_view; Image *implode_image; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket zero; MagickRealType radius; PointInfo center, scale; ssize_t y; /* Initialize implode image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); implode_image=CloneImage(image,0,0,MagickTrue,exception); if (implode_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(implode_image,DirectClass) == MagickFalse) { InheritException(exception,&implode_image->exception); implode_image=DestroyImage(implode_image); return((Image *) NULL); } if (implode_image->background_color.opacity != OpaqueOpacity) implode_image->matte=MagickTrue; /* Compute scaling factor. */ scale.x=1.0; scale.y=1.0; center.x=0.5*image->columns; center.y=0.5*image->rows; radius=center.x; if (image->columns > image->rows) scale.y=(double) image->columns/(double) image->rows; else if (image->columns < image->rows) { scale.x=(double) image->rows/(double) image->columns; radius=center.y; } /* Implode image. */ status=MagickTrue; progress=0; GetMagickPixelPacket(implode_image,&zero); image_view=AcquireCacheView(image); implode_view=AcquireCacheView(implode_image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(progress,status) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickPixelPacket pixel; MagickRealType distance; PointInfo delta; register IndexPacket *restrict implode_indexes; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(implode_view,0,y,implode_image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } implode_indexes=GetCacheViewAuthenticIndexQueue(implode_view); delta.y=scale.y*(double) (y-center.y); pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { /* Determine if the pixel is within an ellipse. */ delta.x=scale.x*(double) (x-center.x); distance=delta.x*delta.x+delta.y*delta.y; if (distance < (radius*radius)) { double factor; /* Implode the pixel. */ factor=1.0; if (distance > 0.0) factor=pow(sin((double) (MagickPI*sqrt((double) distance)/ radius/2)),-amount); (void) InterpolateMagickPixelPacket(image,image_view, UndefinedInterpolatePixel,(double) (factor*delta.x/scale.x+ center.x),(double) (factor*delta.y/scale.y+center.y),&pixel, exception); SetPixelPacket(implode_image,&pixel,q,implode_indexes+x); } q++; } if (SyncCacheViewAuthenticPixels(implode_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ImplodeImage) #endif proceed=SetImageProgress(image,ImplodeImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } implode_view=DestroyCacheView(implode_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) implode_image=DestroyImage(implode_image); return(implode_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M o r p h I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % The MorphImages() method requires a minimum of two images. The first % image is transformed into the second by a number of intervening images % as specified by frames. % % The format of the MorphImage method is: % % Image *MorphImages(const Image *image,const size_t number_frames, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o number_frames: Define the number of in-between image to generate. % The more in-between frames, the smoother the morph. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *MorphImages(const Image *image, const size_t number_frames,ExceptionInfo *exception) { #define MorphImageTag "Morph/Image" Image *morph_image, *morph_images; MagickBooleanType status; MagickOffsetType scene; MagickRealType alpha, beta; register const Image *next; register ssize_t i; ssize_t y; /* Clone first frame in sequence. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); morph_images=CloneImage(image,0,0,MagickTrue,exception); if (morph_images == (Image *) NULL) return((Image *) NULL); if (GetNextImageInList(image) == (Image *) NULL) { /* Morph single image. */ for (i=1; i < (ssize_t) number_frames; i++) { morph_image=CloneImage(image,0,0,MagickTrue,exception); if (morph_image == (Image *) NULL) { morph_images=DestroyImageList(morph_images); return((Image *) NULL); } AppendImageToList(&morph_images,morph_image); if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,MorphImageTag,(MagickOffsetType) i, number_frames); if (proceed == MagickFalse) status=MagickFalse; } } return(GetFirstImageInList(morph_images)); } /* Morph image sequence. */ status=MagickTrue; scene=0; next=image; for ( ; GetNextImageInList(next) != (Image *) NULL; next=GetNextImageInList(next)) { for (i=0; i < (ssize_t) number_frames; i++) { CacheView *image_view, *morph_view; beta=(MagickRealType) (i+1.0)/(MagickRealType) (number_frames+1.0); alpha=1.0-beta; morph_image=ResizeImage(next,(size_t) (alpha*next->columns+beta* GetNextImageInList(next)->columns+0.5),(size_t) (alpha* next->rows+beta*GetNextImageInList(next)->rows+0.5), next->filter,next->blur,exception); if (morph_image == (Image *) NULL) { morph_images=DestroyImageList(morph_images); return((Image *) NULL); } if (SetImageStorageClass(morph_image,DirectClass) == MagickFalse) { InheritException(exception,&morph_image->exception); morph_image=DestroyImage(morph_image); return((Image *) NULL); } AppendImageToList(&morph_images,morph_image); morph_images=GetLastImageInList(morph_images); morph_image=ResizeImage(GetNextImageInList(next),morph_images->columns, morph_images->rows,GetNextImageInList(next)->filter, GetNextImageInList(next)->blur,exception); if (morph_image == (Image *) NULL) { morph_images=DestroyImageList(morph_images); return((Image *) NULL); } image_view=AcquireCacheView(morph_image); morph_view=AcquireCacheView(morph_images); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(status) #endif for (y=0; y < (ssize_t) morph_images->rows; y++) { MagickBooleanType sync; register const PixelPacket *restrict p; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,morph_image->columns,1, exception); q=GetCacheViewAuthenticPixels(morph_view,0,y,morph_images->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) morph_images->columns; x++) { q->red=ClampToQuantum(alpha*q->red+beta*GetRedPixelComponent(p)); q->green=ClampToQuantum(alpha*q->green+beta* GetGreenPixelComponent(p)); q->blue=ClampToQuantum(alpha*q->blue+beta*GetBluePixelComponent(p)); q->opacity=ClampToQuantum(alpha*q->opacity+beta* GetOpacityPixelComponent(p)); p++; q++; } sync=SyncCacheViewAuthenticPixels(morph_view,exception); if (sync == MagickFalse) status=MagickFalse; } morph_view=DestroyCacheView(morph_view); image_view=DestroyCacheView(image_view); morph_image=DestroyImage(morph_image); } if (i < (ssize_t) number_frames) break; /* Clone last frame in sequence. */ morph_image=CloneImage(GetNextImageInList(next),0,0,MagickTrue,exception); if (morph_image == (Image *) NULL) { morph_images=DestroyImageList(morph_images); return((Image *) NULL); } AppendImageToList(&morph_images,morph_image); morph_images=GetLastImageInList(morph_images); if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_MorphImages) #endif proceed=SetImageProgress(image,MorphImageTag,scene, GetImageListLength(image)); if (proceed == MagickFalse) status=MagickFalse; } scene++; } if (GetNextImageInList(next) != (Image *) NULL) { morph_images=DestroyImageList(morph_images); return((Image *) NULL); } return(GetFirstImageInList(morph_images)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P l a s m a I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PlasmaImage() initializes an image with plasma fractal values. The image % must be initialized with a base color and the random number generator % seeded before this method is called. % % The format of the PlasmaImage method is: % % MagickBooleanType PlasmaImage(Image *image,const SegmentInfo *segment, % size_t attenuate,size_t depth) % % A description of each parameter follows: % % o image: the image. % % o segment: Define the region to apply plasma fractals values. % % o attenuate: Define the plasma attenuation factor. % % o depth: Limit the plasma recursion depth. % */ static inline Quantum PlasmaPixel(RandomInfo *random_info, const MagickRealType pixel,const MagickRealType noise) { Quantum plasma; plasma=ClampToQuantum(pixel+noise*GetPseudoRandomValue(random_info)- noise/2.0); return(plasma); } MagickExport MagickBooleanType PlasmaImageProxy(Image *image, CacheView *image_view,RandomInfo *random_info,const SegmentInfo *segment, size_t attenuate,size_t depth) { ExceptionInfo *exception; MagickRealType plasma; PixelPacket u, v; ssize_t x, x_mid, y, y_mid; if (((segment->x2-segment->x1) == 0.0) && ((segment->y2-segment->y1) == 0.0)) return(MagickTrue); if (depth != 0) { SegmentInfo local_info; /* Divide the area into quadrants and recurse. */ depth--; attenuate++; x_mid=(ssize_t) ceil((segment->x1+segment->x2)/2-0.5); y_mid=(ssize_t) ceil((segment->y1+segment->y2)/2-0.5); local_info=(*segment); local_info.x2=(double) x_mid; local_info.y2=(double) y_mid; (void) PlasmaImageProxy(image,image_view,random_info,&local_info, attenuate,depth); local_info=(*segment); local_info.y1=(double) y_mid; local_info.x2=(double) x_mid; (void) PlasmaImageProxy(image,image_view,random_info,&local_info, attenuate,depth); local_info=(*segment); local_info.x1=(double) x_mid; local_info.y2=(double) y_mid; (void) PlasmaImageProxy(image,image_view,random_info,&local_info, attenuate,depth); local_info=(*segment); local_info.x1=(double) x_mid; local_info.y1=(double) y_mid; return(PlasmaImageProxy(image,image_view,random_info,&local_info, attenuate,depth)); } x_mid=(ssize_t) ceil((segment->x1+segment->x2)/2-0.5); y_mid=(ssize_t) ceil((segment->y1+segment->y2)/2-0.5); if ((segment->x1 == (double) x_mid) && (segment->x2 == (double) x_mid) && (segment->y1 == (double) y_mid) && (segment->y2 == (double) y_mid)) return(MagickFalse); /* Average pixels and apply plasma. */ exception=(&image->exception); plasma=(MagickRealType) QuantumRange/(2.0*attenuate); if ((segment->x1 != (double) x_mid) || (segment->x2 != (double) x_mid)) { register PixelPacket *restrict q; /* Left pixel. */ x=(ssize_t) ceil(segment->x1-0.5); (void) GetOneCacheViewVirtualPixel(image_view,x,(ssize_t) ceil(segment->y1-0.5),&u,exception); (void) GetOneCacheViewVirtualPixel(image_view,x,(ssize_t) ceil(segment->y2-0.5),&v,exception); q=QueueCacheViewAuthenticPixels(image_view,x,y_mid,1,1,exception); if (q == (PixelPacket *) NULL) return(MagickTrue); q->red=PlasmaPixel(random_info,(MagickRealType) (u.red+v.red)/2.0, plasma); q->green=PlasmaPixel(random_info,(MagickRealType) (u.green+v.green)/2.0, plasma); q->blue=PlasmaPixel(random_info,(MagickRealType) (u.blue+v.blue)/2.0, plasma); (void) SyncCacheViewAuthenticPixels(image_view,exception); if (segment->x1 != segment->x2) { /* Right pixel. */ x=(ssize_t) ceil(segment->x2-0.5); (void) GetOneCacheViewVirtualPixel(image_view,x,(ssize_t) ceil(segment->y1-0.5),&u,exception); (void) GetOneCacheViewVirtualPixel(image_view,x,(ssize_t) ceil(segment->y2-0.5),&v,exception); q=QueueCacheViewAuthenticPixels(image_view,x,y_mid,1,1,exception); if (q == (PixelPacket *) NULL) return(MagickTrue); q->red=PlasmaPixel(random_info,(MagickRealType) (u.red+v.red)/2.0, plasma); q->green=PlasmaPixel(random_info,(MagickRealType) (u.green+v.green)/ 2.0,plasma); q->blue=PlasmaPixel(random_info,(MagickRealType) (u.blue+v.blue)/2.0, plasma); (void) SyncCacheViewAuthenticPixels(image_view,exception); } } if ((segment->y1 != (double) y_mid) || (segment->y2 != (double) y_mid)) { if ((segment->x1 != (double) x_mid) || (segment->y2 != (double) y_mid)) { register PixelPacket *restrict q; /* Bottom pixel. */ y=(ssize_t) ceil(segment->y2-0.5); (void) GetOneCacheViewVirtualPixel(image_view,(ssize_t) ceil(segment->x1-0.5),y,&u,exception); (void) GetOneCacheViewVirtualPixel(image_view,(ssize_t) ceil(segment->x2-0.5),y,&v,exception); q=QueueCacheViewAuthenticPixels(image_view,x_mid,y,1,1,exception); if (q == (PixelPacket *) NULL) return(MagickTrue); q->red=PlasmaPixel(random_info,(MagickRealType) (u.red+v.red)/2.0, plasma); q->green=PlasmaPixel(random_info,(MagickRealType) (u.green+v.green)/ 2.0,plasma); q->blue=PlasmaPixel(random_info,(MagickRealType) (u.blue+v.blue)/2.0, plasma); (void) SyncCacheViewAuthenticPixels(image_view,exception); } if (segment->y1 != segment->y2) { register PixelPacket *restrict q; /* Top pixel. */ y=(ssize_t) ceil(segment->y1-0.5); (void) GetOneCacheViewVirtualPixel(image_view,(ssize_t) ceil(segment->x1-0.5),y,&u,exception); (void) GetOneCacheViewVirtualPixel(image_view,(ssize_t) ceil(segment->x2-0.5),y,&v,exception); q=QueueCacheViewAuthenticPixels(image_view,x_mid,y,1,1,exception); if (q == (PixelPacket *) NULL) return(MagickTrue); q->red=PlasmaPixel(random_info,(MagickRealType) (u.red+v.red)/2.0, plasma); q->green=PlasmaPixel(random_info,(MagickRealType) (u.green+v.green)/ 2.0,plasma); q->blue=PlasmaPixel(random_info,(MagickRealType) (u.blue+v.blue)/2.0, plasma); (void) SyncCacheViewAuthenticPixels(image_view,exception); } } if ((segment->x1 != segment->x2) || (segment->y1 != segment->y2)) { register PixelPacket *restrict q; /* Middle pixel. */ x=(ssize_t) ceil(segment->x1-0.5); y=(ssize_t) ceil(segment->y1-0.5); (void) GetOneVirtualPixel(image,x,y,&u,exception); x=(ssize_t) ceil(segment->x2-0.5); y=(ssize_t) ceil(segment->y2-0.5); (void) GetOneCacheViewVirtualPixel(image_view,x,y,&v,exception); q=QueueCacheViewAuthenticPixels(image_view,x_mid,y_mid,1,1,exception); if (q == (PixelPacket *) NULL) return(MagickTrue); q->red=PlasmaPixel(random_info,(MagickRealType) (u.red+v.red)/2.0, plasma); q->green=PlasmaPixel(random_info,(MagickRealType) (u.green+v.green)/2.0, plasma); q->blue=PlasmaPixel(random_info,(MagickRealType) (u.blue+v.blue)/2.0, plasma); (void) SyncCacheViewAuthenticPixels(image_view,exception); } if (((segment->x2-segment->x1) < 3.0) && ((segment->y2-segment->y1) < 3.0)) return(MagickTrue); return(MagickFalse); } MagickExport MagickBooleanType PlasmaImage(Image *image, const SegmentInfo *segment,size_t attenuate,size_t depth) { CacheView *image_view; MagickBooleanType status; RandomInfo *random_info; if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); image_view=AcquireCacheView(image); random_info=AcquireRandomInfo(); status=PlasmaImageProxy(image,image_view,random_info,segment,attenuate,depth); random_info=DestroyRandomInfo(random_info); image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P o l a r o i d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PolaroidImage() simulates a Polaroid picture. % % The format of the AnnotateImage method is: % % Image *PolaroidImage(const Image *image,const DrawInfo *draw_info, % const double angle,ExceptionInfo exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o angle: Apply the effect along this angle. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *PolaroidImage(const Image *image,const DrawInfo *draw_info, const double angle,ExceptionInfo *exception) { const char *value; Image *bend_image, *caption_image, *flop_image, *picture_image, *polaroid_image, *rotate_image, *trim_image; size_t height; ssize_t quantum; /* Simulate a Polaroid picture. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); quantum=(ssize_t) MagickMax(MagickMax((double) image->columns,(double) image->rows)/25.0,10.0); height=image->rows+2*quantum; caption_image=(Image *) NULL; value=GetImageProperty(image,"Caption"); if (value != (const char *) NULL) { char *caption, geometry[MaxTextExtent]; DrawInfo *annotate_info; MagickBooleanType status; ssize_t count; TypeMetric metrics; /* Generate caption image. */ caption_image=CloneImage(image,image->columns,1,MagickTrue,exception); if (caption_image == (Image *) NULL) return((Image *) NULL); annotate_info=CloneDrawInfo((const ImageInfo *) NULL,draw_info); caption=InterpretImageProperties((ImageInfo *) NULL,(Image *) image, value); (void) CloneString(&annotate_info->text,caption); count=FormatMagickCaption(caption_image,annotate_info,MagickTrue,&metrics, &caption); status=SetImageExtent(caption_image,image->columns,(size_t) ((count+1)*(metrics.ascent-metrics.descent)+0.5)); if (status == MagickFalse) caption_image=DestroyImage(caption_image); else { caption_image->background_color=image->border_color; (void) SetImageBackgroundColor(caption_image); (void) CloneString(&annotate_info->text,caption); (void) FormatMagickString(geometry,MaxTextExtent,"+0+%g", metrics.ascent); if (annotate_info->gravity == UndefinedGravity) (void) CloneString(&annotate_info->geometry,AcquireString( geometry)); (void) AnnotateImage(caption_image,annotate_info); height+=caption_image->rows; } annotate_info=DestroyDrawInfo(annotate_info); caption=DestroyString(caption); } picture_image=CloneImage(image,image->columns+2*quantum,height,MagickTrue, exception); if (picture_image == (Image *) NULL) { if (caption_image != (Image *) NULL) caption_image=DestroyImage(caption_image); return((Image *) NULL); } picture_image->background_color=image->border_color; (void) SetImageBackgroundColor(picture_image); (void) CompositeImage(picture_image,OverCompositeOp,image,quantum,quantum); if (caption_image != (Image *) NULL) { (void) CompositeImage(picture_image,OverCompositeOp,caption_image, quantum,(ssize_t) (image->rows+3*quantum/2)); caption_image=DestroyImage(caption_image); } (void) QueryColorDatabase("none",&picture_image->background_color,exception); (void) SetImageAlphaChannel(picture_image,OpaqueAlphaChannel); rotate_image=RotateImage(picture_image,90.0,exception); picture_image=DestroyImage(picture_image); if (rotate_image == (Image *) NULL) return((Image *) NULL); picture_image=rotate_image; bend_image=WaveImage(picture_image,0.01*picture_image->rows,2.0* picture_image->columns,exception); picture_image=DestroyImage(picture_image); if (bend_image == (Image *) NULL) return((Image *) NULL); InheritException(&bend_image->exception,exception); picture_image=bend_image; rotate_image=RotateImage(picture_image,-90.0,exception); picture_image=DestroyImage(picture_image); if (rotate_image == (Image *) NULL) return((Image *) NULL); picture_image=rotate_image; picture_image->background_color=image->background_color; polaroid_image=ShadowImage(picture_image,80.0,2.0,quantum/3,quantum/3, exception); if (polaroid_image == (Image *) NULL) { picture_image=DestroyImage(picture_image); return(picture_image); } flop_image=FlopImage(polaroid_image,exception); polaroid_image=DestroyImage(polaroid_image); if (flop_image == (Image *) NULL) { picture_image=DestroyImage(picture_image); return(picture_image); } polaroid_image=flop_image; (void) CompositeImage(polaroid_image,OverCompositeOp,picture_image, (ssize_t) (-0.01*picture_image->columns/2.0),0L); picture_image=DestroyImage(picture_image); (void) QueryColorDatabase("none",&polaroid_image->background_color,exception); rotate_image=RotateImage(polaroid_image,angle,exception); polaroid_image=DestroyImage(polaroid_image); if (rotate_image == (Image *) NULL) return((Image *) NULL); polaroid_image=rotate_image; trim_image=TrimImage(polaroid_image,exception); polaroid_image=DestroyImage(polaroid_image); if (trim_image == (Image *) NULL) return((Image *) NULL); polaroid_image=trim_image; return(polaroid_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e p i a T o n e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickSepiaToneImage() applies a special effect to the image, similar to the % effect achieved in a photo darkroom by sepia toning. Threshold ranges from % 0 to QuantumRange and is a measure of the extent of the sepia toning. A % threshold of 80% is a good starting point for a reasonable tone. % % The format of the SepiaToneImage method is: % % Image *SepiaToneImage(const Image *image,const double threshold, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o threshold: the tone threshold. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SepiaToneImage(const Image *image,const double threshold, ExceptionInfo *exception) { #define SepiaToneImageTag "SepiaTone/Image" CacheView *image_view, *sepia_view; Image *sepia_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; /* Initialize sepia-toned image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); sepia_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception); if (sepia_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(sepia_image,DirectClass) == MagickFalse) { InheritException(exception,&sepia_image->exception); sepia_image=DestroyImage(sepia_image); return((Image *) NULL); } /* Tone each row of the image. */ status=MagickTrue; progress=0; image_view=AcquireCacheView(image); sepia_view=AcquireCacheView(sepia_image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(progress,status) #endif for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *restrict p; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(sepia_view,0,y,sepia_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { MagickRealType intensity, tone; intensity=(MagickRealType) PixelIntensityToQuantum(p); tone=intensity > threshold ? (MagickRealType) QuantumRange : intensity+ (MagickRealType) QuantumRange-threshold; q->red=ClampToQuantum(tone); tone=intensity > (7.0*threshold/6.0) ? (MagickRealType) QuantumRange : intensity+(MagickRealType) QuantumRange-7.0*threshold/6.0; q->green=ClampToQuantum(tone); tone=intensity < (threshold/6.0) ? 0 : intensity-threshold/6.0; q->blue=ClampToQuantum(tone); tone=threshold/7.0; if ((MagickRealType) q->green < tone) q->green=ClampToQuantum(tone); if ((MagickRealType) q->blue < tone) q->blue=ClampToQuantum(tone); p++; q++; } if (SyncCacheViewAuthenticPixels(sepia_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SepiaToneImage) #endif proceed=SetImageProgress(image,SepiaToneImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } sepia_view=DestroyCacheView(sepia_view); image_view=DestroyCacheView(image_view); (void) NormalizeImage(sepia_image); (void) ContrastImage(sepia_image,MagickTrue); if (status == MagickFalse) sepia_image=DestroyImage(sepia_image); return(sepia_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S h a d o w I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ShadowImage() simulates a shadow from the specified image and returns it. % % The format of the ShadowImage method is: % % Image *ShadowImage(const Image *image,const double opacity, % const double sigma,const ssize_t x_offset,const ssize_t y_offset, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o opacity: percentage transparency. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o x_offset: the shadow x-offset. % % o y_offset: the shadow y-offset. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ShadowImage(const Image *image,const double opacity, const double sigma,const ssize_t x_offset,const ssize_t y_offset, ExceptionInfo *exception) { #define ShadowImageTag "Shadow/Image" CacheView *image_view; Image *border_image, *clone_image, *shadow_image; MagickBooleanType status; MagickOffsetType progress; RectangleInfo border_info; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); clone_image=CloneImage(image,0,0,MagickTrue,exception); if (clone_image == (Image *) NULL) return((Image *) NULL); (void) SetImageVirtualPixelMethod(clone_image,EdgeVirtualPixelMethod); clone_image->compose=OverCompositeOp; border_info.width=(size_t) floor(2.0*sigma+0.5); border_info.height=(size_t) floor(2.0*sigma+0.5); border_info.x=0; border_info.y=0; (void) QueryColorDatabase("none",&clone_image->border_color,exception); border_image=BorderImage(clone_image,&border_info,exception); clone_image=DestroyImage(clone_image); if (border_image == (Image *) NULL) return((Image *) NULL); if (border_image->matte == MagickFalse) (void) SetImageAlphaChannel(border_image,OpaqueAlphaChannel); /* Shadow image. */ status=MagickTrue; progress=0; image_view=AcquireCacheView(border_image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(progress,status) #endif for (y=0; y < (ssize_t) border_image->rows; y++) { register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,border_image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) border_image->columns; x++) { q->red=border_image->background_color.red; q->green=border_image->background_color.green; q->blue=border_image->background_color.blue; if (border_image->matte == MagickFalse) q->opacity=border_image->background_color.opacity; else q->opacity=ClampToQuantum((MagickRealType) (QuantumRange- GetAlphaPixelComponent(q)*opacity/100.0)); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ShadowImage) #endif proceed=SetImageProgress(image,ShadowImageTag,progress++, border_image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); shadow_image=BlurImageChannel(border_image,AlphaChannel,0.0,sigma,exception); border_image=DestroyImage(border_image); if (shadow_image == (Image *) NULL) return((Image *) NULL); if (shadow_image->page.width == 0) shadow_image->page.width=shadow_image->columns; if (shadow_image->page.height == 0) shadow_image->page.height=shadow_image->rows; shadow_image->page.width+=x_offset-(ssize_t) border_info.width; shadow_image->page.height+=y_offset-(ssize_t) border_info.height; shadow_image->page.x+=x_offset-(ssize_t) border_info.width; shadow_image->page.y+=y_offset-(ssize_t) border_info.height; return(shadow_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S k e t c h I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SketchImage() simulates a pencil sketch. We convolve the image with a % Gaussian operator of the given radius and standard deviation (sigma). For % reasonable results, radius should be larger than sigma. Use a radius of 0 % and SketchImage() selects a suitable radius for you. Angle gives the angle % of the sketch. % % The format of the SketchImage method is: % % Image *SketchImage(const Image *image,const double radius, % const double sigma,const double angle,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting % the center pixel. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o angle: Apply the effect along this angle. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SketchImage(const Image *image,const double radius, const double sigma,const double angle,ExceptionInfo *exception) { CacheView *random_view; Image *blend_image, *blur_image, *dodge_image, *random_image, *sketch_image; MagickBooleanType status; MagickPixelPacket zero; RandomInfo **restrict random_info; ssize_t y; /* Sketch image. */ random_image=CloneImage(image,image->columns << 1,image->rows << 1, MagickTrue,exception); if (random_image == (Image *) NULL) return((Image *) NULL); status=MagickTrue; GetMagickPixelPacket(random_image,&zero); random_info=AcquireRandomInfoThreadSet(); random_view=AcquireCacheView(random_image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(status) #endif for (y=0; y < (ssize_t) random_image->rows; y++) { const int id = GetOpenMPThreadId(); MagickPixelPacket pixel; register IndexPacket *restrict indexes; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(random_view,0,y,random_image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(random_view); pixel=zero; for (x=0; x < (ssize_t) random_image->columns; x++) { pixel.red=(MagickRealType) (QuantumRange* GetPseudoRandomValue(random_info[id])); pixel.green=pixel.red; pixel.blue=pixel.red; if (image->colorspace == CMYKColorspace) pixel.index=pixel.red; SetPixelPacket(random_image,&pixel,q,indexes+x); q++; } if (SyncCacheViewAuthenticPixels(random_view,exception) == MagickFalse) status=MagickFalse; } random_view=DestroyCacheView(random_view); random_info=DestroyRandomInfoThreadSet(random_info); if (status == MagickFalse) { random_image=DestroyImage(random_image); return(random_image); } blur_image=MotionBlurImage(random_image,radius,sigma,angle,exception); random_image=DestroyImage(random_image); if (blur_image == (Image *) NULL) return((Image *) NULL); dodge_image=EdgeImage(blur_image,radius,exception); blur_image=DestroyImage(blur_image); if (dodge_image == (Image *) NULL) return((Image *) NULL); (void) NormalizeImage(dodge_image); (void) NegateImage(dodge_image,MagickFalse); (void) TransformImage(&dodge_image,(char *) NULL,"50%"); sketch_image=CloneImage(image,0,0,MagickTrue,exception); if (sketch_image == (Image *) NULL) { dodge_image=DestroyImage(dodge_image); return((Image *) NULL); } (void) CompositeImage(sketch_image,ColorDodgeCompositeOp,dodge_image,0,0); dodge_image=DestroyImage(dodge_image); blend_image=CloneImage(image,0,0,MagickTrue,exception); if (blend_image == (Image *) NULL) { sketch_image=DestroyImage(sketch_image); return((Image *) NULL); } (void) SetImageArtifact(blend_image,"compose:args","20x80"); (void) CompositeImage(sketch_image,BlendCompositeOp,blend_image,0,0); blend_image=DestroyImage(blend_image); return(sketch_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S o l a r i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SolarizeImage() applies a special effect to the image, similar to the effect % achieved in a photo darkroom by selectively exposing areas of photo % sensitive paper to light. Threshold ranges from 0 to QuantumRange and is a % measure of the extent of the solarization. % % The format of the SolarizeImage method is: % % MagickBooleanType SolarizeImage(Image *image,const double threshold) % % A description of each parameter follows: % % o image: the image. % % o threshold: Define the extent of the solarization. % */ MagickExport MagickBooleanType SolarizeImage(Image *image, const double threshold) { #define SolarizeImageTag "Solarize/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) { register ssize_t i; /* Solarize colormap. */ for (i=0; i < (ssize_t) image->colors; i++) { if ((MagickRealType) image->colormap[i].red > threshold) image->colormap[i].red=(Quantum) QuantumRange-image->colormap[i].red; if ((MagickRealType) image->colormap[i].green > threshold) image->colormap[i].green=(Quantum) QuantumRange- image->colormap[i].green; if ((MagickRealType) image->colormap[i].blue > threshold) image->colormap[i].blue=(Quantum) QuantumRange- image->colormap[i].blue; } } /* Solarize image. */ status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireCacheView(image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(progress,status) #endif for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { if ((MagickRealType) q->red > threshold) q->red=(Quantum) QuantumRange-q->red; if ((MagickRealType) q->green > threshold) q->green=(Quantum) QuantumRange-q->green; if ((MagickRealType) q->blue > threshold) q->blue=(Quantum) QuantumRange-q->blue; q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SolarizeImage) #endif proceed=SetImageProgress(image,SolarizeImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S t e g a n o I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SteganoImage() hides a digital watermark within the image. Recover % the hidden watermark later to prove that the authenticity of an image. % Offset defines the start position within the image to hide the watermark. % % The format of the SteganoImage method is: % % Image *SteganoImage(const Image *image,Image *watermark, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o watermark: the watermark image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SteganoImage(const Image *image,const Image *watermark, ExceptionInfo *exception) { #define GetBit(alpha,i) ((((size_t) (alpha) >> (size_t) (i)) & 0x01) != 0) #define SetBit(alpha,i,set) (alpha)=(Quantum) ((set) != 0 ? (size_t) (alpha) \ | (one << (size_t) (i)) : (size_t) (alpha) & ~(one << (size_t) (i))) #define SteganoImageTag "Stegano/Image" CacheView *stegano_view, *watermark_view; Image *stegano_image; int c; MagickBooleanType status; PixelPacket pixel; register PixelPacket *q; register ssize_t x; size_t depth, one; ssize_t i, j, k, y; /* Initialize steganographic image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(watermark != (const Image *) NULL); assert(watermark->signature == MagickSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); one=1UL; stegano_image=CloneImage(image,0,0,MagickTrue,exception); if (stegano_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(stegano_image,DirectClass) == MagickFalse) { InheritException(exception,&stegano_image->exception); stegano_image=DestroyImage(stegano_image); return((Image *) NULL); } stegano_image->depth=MAGICKCORE_QUANTUM_DEPTH; /* Hide watermark in low-order bits of image. */ c=0; i=0; j=0; depth=stegano_image->depth; k=image->offset; status=MagickTrue; watermark_view=AcquireCacheView(watermark); stegano_view=AcquireCacheView(stegano_image); for (i=(ssize_t) depth-1; (i >= 0) && (j < (ssize_t) depth); i--) { for (y=0; (y < (ssize_t) watermark->rows) && (j < (ssize_t) depth); y++) { for (x=0; (x < (ssize_t) watermark->columns) && (j < (ssize_t) depth); x++) { (void) GetOneCacheViewVirtualPixel(watermark_view,x,y,&pixel,exception); if ((k/(ssize_t) stegano_image->columns) >= (ssize_t) stegano_image->rows) break; q=GetCacheViewAuthenticPixels(stegano_view,k % (ssize_t) stegano_image->columns,k/(ssize_t) stegano_image->columns,1,1, exception); if (q == (PixelPacket *) NULL) break; switch (c) { case 0: { SetBit(q->red,j,GetBit(PixelIntensityToQuantum(&pixel),i)); break; } case 1: { SetBit(q->green,j,GetBit(PixelIntensityToQuantum(&pixel),i)); break; } case 2: { SetBit(q->blue,j,GetBit(PixelIntensityToQuantum(&pixel),i)); break; } } if (SyncCacheViewAuthenticPixels(stegano_view,exception) == MagickFalse) break; c++; if (c == 3) c=0; k++; if (k == (ssize_t) (stegano_image->columns*stegano_image->columns)) k=0; if (k == image->offset) j++; } } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,SteganoImageTag,(MagickOffsetType) (depth-i),depth); if (proceed == MagickFalse) status=MagickFalse; } } stegano_view=DestroyCacheView(stegano_view); watermark_view=DestroyCacheView(watermark_view); if (stegano_image->storage_class == PseudoClass) (void) SyncImage(stegano_image); if (status == MagickFalse) { stegano_image=DestroyImage(stegano_image); return((Image *) NULL); } return(stegano_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S t e r e o A n a g l y p h I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % StereoAnaglyphImage() combines two images and produces a single image that % is the composite of a left and right image of a stereo pair. Special % red-green stereo glasses are required to view this effect. % % The format of the StereoAnaglyphImage method is: % % Image *StereoImage(const Image *left_image,const Image *right_image, % ExceptionInfo *exception) % Image *StereoAnaglyphImage(const Image *left_image, % const Image *right_image,const ssize_t x_offset,const ssize_t y_offset, % ExceptionInfo *exception) % % A description of each parameter follows: % % o left_image: the left image. % % o right_image: the right image. % % o exception: return any errors or warnings in this structure. % % o x_offset: amount, in pixels, by which the left image is offset to the % right of the right image. % % o y_offset: amount, in pixels, by which the left image is offset to the % bottom of the right image. % % */ MagickExport Image *StereoImage(const Image *left_image, const Image *right_image,ExceptionInfo *exception) { return(StereoAnaglyphImage(left_image,right_image,0,0,exception)); } MagickExport Image *StereoAnaglyphImage(const Image *left_image, const Image *right_image,const ssize_t x_offset,const ssize_t y_offset, ExceptionInfo *exception) { #define StereoImageTag "Stereo/Image" const Image *image; Image *stereo_image; MagickBooleanType status; ssize_t y; assert(left_image != (const Image *) NULL); assert(left_image->signature == MagickSignature); if (left_image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", left_image->filename); assert(right_image != (const Image *) NULL); assert(right_image->signature == MagickSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); assert(right_image != (const Image *) NULL); image=left_image; if ((left_image->columns != right_image->columns) || (left_image->rows != right_image->rows)) ThrowImageException(ImageError,"LeftAndRightImageSizesDiffer"); /* Initialize stereo image attributes. */ stereo_image=CloneImage(left_image,left_image->columns,left_image->rows, MagickTrue,exception); if (stereo_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(stereo_image,DirectClass) == MagickFalse) { InheritException(exception,&stereo_image->exception); stereo_image=DestroyImage(stereo_image); return((Image *) NULL); } /* Copy left image to red channel and right image to blue channel. */ status=MagickTrue; for (y=0; y < (ssize_t) stereo_image->rows; y++) { register const PixelPacket *restrict p, *restrict q; register ssize_t x; register PixelPacket *restrict r; p=GetVirtualPixels(left_image,-x_offset,y-y_offset,image->columns,1, exception); q=GetVirtualPixels(right_image,0,y,right_image->columns,1,exception); r=QueueAuthenticPixels(stereo_image,0,y,stereo_image->columns,1,exception); if ((p == (PixelPacket *) NULL) || (q == (PixelPacket *) NULL) || (r == (PixelPacket *) NULL)) break; for (x=0; x < (ssize_t) stereo_image->columns; x++) { r->red=GetRedPixelComponent(p); r->green=q->green; r->blue=q->blue; r->opacity=(Quantum) ((p->opacity+q->opacity)/2); p++; q++; r++; } if (SyncAuthenticPixels(stereo_image,exception) == MagickFalse) break; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,StereoImageTag,(MagickOffsetType) y, stereo_image->rows); if (proceed == MagickFalse) status=MagickFalse; } } if (status == MagickFalse) { stereo_image=DestroyImage(stereo_image); return((Image *) NULL); } return(stereo_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S w i r l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SwirlImage() swirls the pixels about the center of the image, where % degrees indicates the sweep of the arc through which each pixel is moved. % You get a more dramatic effect as the degrees move from 1 to 360. % % The format of the SwirlImage method is: % % Image *SwirlImage(const Image *image,double degrees, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o degrees: Define the tightness of the swirling effect. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SwirlImage(const Image *image,double degrees, ExceptionInfo *exception) { #define SwirlImageTag "Swirl/Image" CacheView *image_view, *swirl_view; Image *swirl_image; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket zero; MagickRealType radius; PointInfo center, scale; ssize_t y; /* Initialize swirl image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); swirl_image=CloneImage(image,0,0,MagickTrue,exception); if (swirl_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(swirl_image,DirectClass) == MagickFalse) { InheritException(exception,&swirl_image->exception); swirl_image=DestroyImage(swirl_image); return((Image *) NULL); } if (swirl_image->background_color.opacity != OpaqueOpacity) swirl_image->matte=MagickTrue; /* Compute scaling factor. */ center.x=(double) image->columns/2.0; center.y=(double) image->rows/2.0; radius=MagickMax(center.x,center.y); scale.x=1.0; scale.y=1.0; if (image->columns > image->rows) scale.y=(double) image->columns/(double) image->rows; else if (image->columns < image->rows) scale.x=(double) image->rows/(double) image->columns; degrees=(double) DegreesToRadians(degrees); /* Swirl image. */ status=MagickTrue; progress=0; GetMagickPixelPacket(swirl_image,&zero); image_view=AcquireCacheView(image); swirl_view=AcquireCacheView(swirl_image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(progress,status) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickPixelPacket pixel; MagickRealType distance; PointInfo delta; register IndexPacket *restrict swirl_indexes; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(swirl_view,0,y,swirl_image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } swirl_indexes=GetCacheViewAuthenticIndexQueue(swirl_view); delta.y=scale.y*(double) (y-center.y); pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { /* Determine if the pixel is within an ellipse. */ delta.x=scale.x*(double) (x-center.x); distance=delta.x*delta.x+delta.y*delta.y; if (distance < (radius*radius)) { MagickRealType cosine, factor, sine; /* Swirl the pixel. */ factor=1.0-sqrt((double) distance)/radius; sine=sin((double) (degrees*factor*factor)); cosine=cos((double) (degrees*factor*factor)); (void) InterpolateMagickPixelPacket(image,image_view, UndefinedInterpolatePixel,(double) ((cosine*delta.x-sine*delta.y)/ scale.x+center.x),(double) ((sine*delta.x+cosine*delta.y)/scale.y+ center.y),&pixel,exception); SetPixelPacket(swirl_image,&pixel,q,swirl_indexes+x); } q++; } if (SyncCacheViewAuthenticPixels(swirl_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SwirlImage) #endif proceed=SetImageProgress(image,SwirlImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } swirl_view=DestroyCacheView(swirl_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) swirl_image=DestroyImage(swirl_image); return(swirl_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T i n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TintImage() applies a color vector to each pixel in the image. The length % of the vector is 0 for black and white and at its maximum for the midtones. % The vector weighting function is f(x)=(1-(4.0*((x-0.5)*(x-0.5)))) % % The format of the TintImage method is: % % Image *TintImage(const Image *image,const char *opacity, % const PixelPacket tint,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o opacity: A color value used for tinting. % % o tint: A color value used for tinting. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *TintImage(const Image *image,const char *opacity, const PixelPacket tint,ExceptionInfo *exception) { #define TintImageTag "Tint/Image" CacheView *image_view, *tint_view; GeometryInfo geometry_info; Image *tint_image; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket color_vector, pixel; MagickStatusType flags; ssize_t y; /* Allocate tint image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); tint_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception); if (tint_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(tint_image,DirectClass) == MagickFalse) { InheritException(exception,&tint_image->exception); tint_image=DestroyImage(tint_image); return((Image *) NULL); } if (opacity == (const char *) NULL) return(tint_image); /* Determine RGB values of the color. */ flags=ParseGeometry(opacity,&geometry_info); pixel.red=geometry_info.rho; if ((flags & SigmaValue) != 0) pixel.green=geometry_info.sigma; else pixel.green=pixel.red; if ((flags & XiValue) != 0) pixel.blue=geometry_info.xi; else pixel.blue=pixel.red; if ((flags & PsiValue) != 0) pixel.opacity=geometry_info.psi; else pixel.opacity=(MagickRealType) OpaqueOpacity; color_vector.red=(MagickRealType) (pixel.red*tint.red/100.0- PixelIntensity(&tint)); color_vector.green=(MagickRealType) (pixel.green*tint.green/100.0- PixelIntensity(&tint)); color_vector.blue=(MagickRealType) (pixel.blue*tint.blue/100.0- PixelIntensity(&tint)); /* Tint image. */ status=MagickTrue; progress=0; image_view=AcquireCacheView(image); tint_view=AcquireCacheView(tint_image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(progress,status) #endif for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *restrict p; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(tint_view,0,y,tint_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { MagickPixelPacket pixel; MagickRealType weight; weight=QuantumScale*p->red-0.5; pixel.red=(MagickRealType) p->red+color_vector.red*(1.0-(4.0* (weight*weight))); SetRedPixelComponent(q,ClampRedPixelComponent(&pixel)); weight=QuantumScale*p->green-0.5; pixel.green=(MagickRealType) p->green+color_vector.green*(1.0-(4.0* (weight*weight))); SetGreenPixelComponent(q,ClampGreenPixelComponent(&pixel)); weight=QuantumScale*p->blue-0.5; pixel.blue=(MagickRealType) p->blue+color_vector.blue*(1.0-(4.0* (weight*weight))); SetBluePixelComponent(q,ClampBluePixelComponent(&pixel)); SetOpacityPixelComponent(q,GetOpacityPixelComponent(p)); p++; q++; } if (SyncCacheViewAuthenticPixels(tint_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_TintImage) #endif proceed=SetImageProgress(image,TintImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } tint_view=DestroyCacheView(tint_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) tint_image=DestroyImage(tint_image); return(tint_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % V i g n e t t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % VignetteImage() softens the edges of the image in vignette style. % % The format of the VignetteImage method is: % % Image *VignetteImage(const Image *image,const double radius, % const double sigma,const ssize_t x,const ssize_t y,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the pixel neighborhood. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o x, y: Define the x and y ellipse offset. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *VignetteImage(const Image *image,const double radius, const double sigma,const ssize_t x,const ssize_t y,ExceptionInfo *exception) { char ellipse[MaxTextExtent]; DrawInfo *draw_info; Image *canvas_image, *blur_image, *oval_image, *vignette_image; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); canvas_image=CloneImage(image,0,0,MagickTrue,exception); if (canvas_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(canvas_image,DirectClass) == MagickFalse) { InheritException(exception,&canvas_image->exception); canvas_image=DestroyImage(canvas_image); return((Image *) NULL); } canvas_image->matte=MagickTrue; oval_image=CloneImage(canvas_image,canvas_image->columns, canvas_image->rows,MagickTrue,exception); if (oval_image == (Image *) NULL) { canvas_image=DestroyImage(canvas_image); return((Image *) NULL); } (void) QueryColorDatabase("#000000",&oval_image->background_color,exception); (void) SetImageBackgroundColor(oval_image); draw_info=CloneDrawInfo((const ImageInfo *) NULL,(const DrawInfo *) NULL); (void) QueryColorDatabase("#ffffff",&draw_info->fill,exception); (void) QueryColorDatabase("#ffffff",&draw_info->stroke,exception); (void) FormatMagickString(ellipse,MaxTextExtent, "ellipse %g,%g,%g,%g,0.0,360.0",image->columns/2.0, image->rows/2.0,image->columns/2.0-x,image->rows/2.0-y); draw_info->primitive=AcquireString(ellipse); (void) DrawImage(oval_image,draw_info); draw_info=DestroyDrawInfo(draw_info); blur_image=BlurImage(oval_image,radius,sigma,exception); oval_image=DestroyImage(oval_image); if (blur_image == (Image *) NULL) { canvas_image=DestroyImage(canvas_image); return((Image *) NULL); } blur_image->matte=MagickFalse; (void) CompositeImage(canvas_image,CopyOpacityCompositeOp,blur_image,0,0); blur_image=DestroyImage(blur_image); vignette_image=MergeImageLayers(canvas_image,FlattenLayer,exception); canvas_image=DestroyImage(canvas_image); return(vignette_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W a v e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WaveImage() creates a "ripple" effect in the image by shifting the pixels % vertically along a sine wave whose amplitude and wavelength is specified % by the given parameters. % % The format of the WaveImage method is: % % Image *WaveImage(const Image *image,const double amplitude, % const double wave_length,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o amplitude, wave_length: Define the amplitude and wave length of the % sine wave. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *WaveImage(const Image *image,const double amplitude, const double wave_length,ExceptionInfo *exception) { #define WaveImageTag "Wave/Image" CacheView *image_view, *wave_view; Image *wave_image; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket zero; MagickRealType *sine_map; register ssize_t i; ssize_t y; /* Initialize wave image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); wave_image=CloneImage(image,image->columns,(size_t) (image->rows+2.0* fabs(amplitude)),MagickTrue,exception); if (wave_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(wave_image,DirectClass) == MagickFalse) { InheritException(exception,&wave_image->exception); wave_image=DestroyImage(wave_image); return((Image *) NULL); } if (wave_image->background_color.opacity != OpaqueOpacity) wave_image->matte=MagickTrue; /* Allocate sine map. */ sine_map=(MagickRealType *) AcquireQuantumMemory((size_t) wave_image->columns, sizeof(*sine_map)); if (sine_map == (MagickRealType *) NULL) { wave_image=DestroyImage(wave_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } for (i=0; i < (ssize_t) wave_image->columns; i++) sine_map[i]=fabs(amplitude)+amplitude*sin((double) ((2.0*MagickPI*i)/ wave_length)); /* Wave image. */ status=MagickTrue; progress=0; GetMagickPixelPacket(wave_image,&zero); image_view=AcquireCacheView(image); wave_view=AcquireCacheView(wave_image); (void) SetCacheViewVirtualPixelMethod(image_view, BackgroundVirtualPixelMethod); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(progress,status) #endif for (y=0; y < (ssize_t) wave_image->rows; y++) { MagickPixelPacket pixel; register IndexPacket *restrict indexes; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(wave_view,0,y,wave_image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(wave_view); pixel=zero; for (x=0; x < (ssize_t) wave_image->columns; x++) { (void) InterpolateMagickPixelPacket(image,image_view, UndefinedInterpolatePixel,(double) x,(double) (y-sine_map[x]),&pixel, exception); SetPixelPacket(wave_image,&pixel,q,indexes+x); q++; } if (SyncCacheViewAuthenticPixels(wave_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_WaveImage) #endif proceed=SetImageProgress(image,WaveImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } wave_view=DestroyCacheView(wave_view); image_view=DestroyCacheView(image_view); sine_map=(MagickRealType *) RelinquishMagickMemory(sine_map); if (status == MagickFalse) wave_image=DestroyImage(wave_image); return(wave_image); }
convolution_1x1_pack8to4_fp16s.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv1x1s1_sgemm_transform_kernel_pack8to4_fp16sa_neon(const Mat& kernel, Mat& kernel_tm_pack8to4, int inch, int outch) { // interleave // src = inch-outch // dst = 4b-8a-inch/8a-outch/4 kernel_tm_pack8to4.create(4 * 8, inch / 8, outch / 8 + (outch % 8) / 4, (size_t)2u * 2, 2); int p = 0; for (; p + 7 < outch; p += 8) { const float* k0 = (const float*)kernel + (p + 0) * inch; const float* k1 = (const float*)kernel + (p + 1) * inch; const float* k2 = (const float*)kernel + (p + 2) * inch; const float* k3 = (const float*)kernel + (p + 3) * inch; const float* k4 = (const float*)kernel + (p + 4) * inch; const float* k5 = (const float*)kernel + (p + 5) * inch; const float* k6 = (const float*)kernel + (p + 6) * inch; const float* k7 = (const float*)kernel + (p + 7) * inch; __fp16* g0 = kernel_tm_pack8to4.channel(p / 8); for (int q = 0; q + 7 < inch; q += 8) { for (int i = 0; i < 8; i++) { g0[0] = (__fp16)k0[i]; g0[1] = (__fp16)k1[i]; g0[2] = (__fp16)k2[i]; g0[3] = (__fp16)k3[i]; g0[4] = (__fp16)k4[i]; g0[5] = (__fp16)k5[i]; g0[6] = (__fp16)k6[i]; g0[7] = (__fp16)k7[i]; g0 += 8; } k0 += 8; k1 += 8; k2 += 8; k3 += 8; k4 += 8; k5 += 8; k6 += 8; k7 += 8; } } for (; p + 3 < outch; p += 4) { const float* k0 = (const float*)kernel + (p + 0) * inch; const float* k1 = (const float*)kernel + (p + 1) * inch; const float* k2 = (const float*)kernel + (p + 2) * inch; const float* k3 = (const float*)kernel + (p + 3) * inch; __fp16* g0 = kernel_tm_pack8to4.channel(p / 8 + (p % 8) / 4); for (int q = 0; q + 7 < inch; q += 8) { for (int i = 0; i < 8; i++) { g0[0] = (__fp16)k0[i]; g0[1] = (__fp16)k1[i]; g0[2] = (__fp16)k2[i]; g0[3] = (__fp16)k3[i]; g0 += 4; } k0 += 8; k1 += 8; k2 += 8; k3 += 8; } } } static void conv1x1s1_sgemm_pack8to4_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outch = top_blob.c; size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; const int size = w * h; const __fp16* bias = _bias; // interleave Mat tmp; if (size >= 8) tmp.create(8, inch, size / 8 + (size % 8) / 4 + size % 4, elemsize, elempack, opt.workspace_allocator); else if (size >= 4) tmp.create(4, inch, size / 4 + size % 4, elemsize, elempack, opt.workspace_allocator); else // if (size >= 1) tmp.create(1, inch, size, elemsize, elempack, opt.workspace_allocator); { int nn_size; int remain_size_start = 0; nn_size = (size - remain_size_start) >> 3; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 8; const __fp16* img0 = bottom_blob.channel(0); img0 += i * 8; __fp16* tmpptr = tmp.channel(i / 8); for (int q = 0; q < inch; q++) { // transpose 8x8 asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld4 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0], #64 \n" "ld4 {v4.8h, v5.8h, v6.8h, v7.8h}, [%0] \n" "sub %0, %0, #64 \n" "uzp1 v16.8h, v0.8h, v4.8h \n" "uzp2 v20.8h, v0.8h, v4.8h \n" "uzp1 v17.8h, v1.8h, v5.8h \n" "uzp2 v21.8h, v1.8h, v5.8h \n" "uzp1 v18.8h, v2.8h, v6.8h \n" "uzp2 v22.8h, v2.8h, v6.8h \n" "uzp1 v19.8h, v3.8h, v7.8h \n" "uzp2 v23.8h, v3.8h, v7.8h \n" "st1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%1], #64 \n" "st1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%1], #64 \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"); img0 += bottom_blob.cstep * 8; } } remain_size_start += nn_size << 3; nn_size = (size - remain_size_start) >> 2; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 4; const __fp16* img0 = bottom_blob.channel(0); img0 += i * 8; __fp16* tmpptr = tmp.channel(i / 8 + (i % 8) / 4); for (int q = 0; q < inch; q++) { // transpose 8x4 asm volatile( "prfm pldl1keep, [%0, #256] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0] \n" "st4 {v0.8h, v1.8h, v2.8h, v3.8h}, [%1], #64 \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "v0", "v1", "v2", "v3"); img0 += bottom_blob.cstep * 8; } } remain_size_start += nn_size << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int i = remain_size_start; i < size; i++) { const __fp16* img0 = bottom_blob.channel(0); img0 += i * 8; __fp16* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + i % 4); for (int q = 0; q < inch; q++) { asm volatile( "prfm pldl1keep, [%0, #128] \n" "ld1 {v0.8h}, [%0] \n" "st1 {v0.8h}, [%1], #16 \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "v0"); img0 += bottom_blob.cstep * 8; } } } int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 1; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 2; __fp16* outptr0 = top_blob.channel(p); __fp16* outptr1 = top_blob.channel(p + 1); const __fp16 zeros[8] = {0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f}; const __fp16* biasptr = bias ? bias + p : zeros; float16x8_t _bias0 = vld1q_f16(biasptr); int i = 0; for (; i + 7 < size; i += 8) { __fp16* tmpptr = tmp.channel(i / 8); const __fp16* kptr = kernel.channel(p / 2); int nn = inch; // inch always > 0 asm volatile( "mov v24.16b, %10.16b \n" "mov v25.16b, %10.16b \n" "mov v26.16b, %10.16b \n" "mov v27.16b, %10.16b \n" "mov v28.16b, %10.16b \n" "mov v29.16b, %10.16b \n" "mov v30.16b, %10.16b \n" "mov v31.16b, %10.16b \n" "0: \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%3], #64 \n" "fmla v24.8h, v16.8h, v0.h[0] \n" "fmla v25.8h, v16.8h, v0.h[1] \n" "fmla v26.8h, v16.8h, v0.h[2] \n" "fmla v27.8h, v16.8h, v0.h[3] \n" "fmla v28.8h, v16.8h, v0.h[4] \n" "fmla v29.8h, v16.8h, v0.h[5] \n" "fmla v30.8h, v16.8h, v0.h[6] \n" "fmla v31.8h, v16.8h, v0.h[7] \n" "fmla v24.8h, v17.8h, v1.h[0] \n" "fmla v25.8h, v17.8h, v1.h[1] \n" "fmla v26.8h, v17.8h, v1.h[2] \n" "fmla v27.8h, v17.8h, v1.h[3] \n" "fmla v28.8h, v17.8h, v1.h[4] \n" "fmla v29.8h, v17.8h, v1.h[5] \n" "fmla v30.8h, v17.8h, v1.h[6] \n" "fmla v31.8h, v17.8h, v1.h[7] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n" "fmla v24.8h, v18.8h, v2.h[0] \n" "fmla v25.8h, v18.8h, v2.h[1] \n" "fmla v26.8h, v18.8h, v2.h[2] \n" "fmla v27.8h, v18.8h, v2.h[3] \n" "fmla v28.8h, v18.8h, v2.h[4] \n" "fmla v29.8h, v18.8h, v2.h[5] \n" "fmla v30.8h, v18.8h, v2.h[6] \n" "fmla v31.8h, v18.8h, v2.h[7] \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%3], #64 \n" "fmla v24.8h, v19.8h, v3.h[0] \n" "fmla v25.8h, v19.8h, v3.h[1] \n" "fmla v26.8h, v19.8h, v3.h[2] \n" "fmla v27.8h, v19.8h, v3.h[3] \n" "fmla v28.8h, v19.8h, v3.h[4] \n" "fmla v29.8h, v19.8h, v3.h[5] \n" "fmla v30.8h, v19.8h, v3.h[6] \n" "fmla v31.8h, v19.8h, v3.h[7] \n" "fmla v24.8h, v20.8h, v4.h[0] \n" "fmla v25.8h, v20.8h, v4.h[1] \n" "fmla v26.8h, v20.8h, v4.h[2] \n" "fmla v27.8h, v20.8h, v4.h[3] \n" "fmla v28.8h, v20.8h, v4.h[4] \n" "fmla v29.8h, v20.8h, v4.h[5] \n" "fmla v30.8h, v20.8h, v4.h[6] \n" "fmla v31.8h, v20.8h, v4.h[7] \n" "fmla v24.8h, v21.8h, v5.h[0] \n" "fmla v25.8h, v21.8h, v5.h[1] \n" "fmla v26.8h, v21.8h, v5.h[2] \n" "fmla v27.8h, v21.8h, v5.h[3] \n" "fmla v28.8h, v21.8h, v5.h[4] \n" "fmla v29.8h, v21.8h, v5.h[5] \n" "fmla v30.8h, v21.8h, v5.h[6] \n" "fmla v31.8h, v21.8h, v5.h[7] \n" "fmla v24.8h, v22.8h, v6.h[0] \n" "fmla v25.8h, v22.8h, v6.h[1] \n" "fmla v26.8h, v22.8h, v6.h[2] \n" "fmla v27.8h, v22.8h, v6.h[3] \n" "fmla v28.8h, v22.8h, v6.h[4] \n" "fmla v29.8h, v22.8h, v6.h[5] \n" "fmla v30.8h, v22.8h, v6.h[6] \n" "fmla v31.8h, v22.8h, v6.h[7] \n" "subs %w0, %w0, #1 \n" "fmla v24.8h, v23.8h, v7.h[0] \n" "fmla v25.8h, v23.8h, v7.h[1] \n" "fmla v26.8h, v23.8h, v7.h[2] \n" "fmla v27.8h, v23.8h, v7.h[3] \n" "fmla v28.8h, v23.8h, v7.h[4] \n" "fmla v29.8h, v23.8h, v7.h[5] \n" "fmla v30.8h, v23.8h, v7.h[6] \n" "fmla v31.8h, v23.8h, v7.h[7] \n" "bne 0b \n" "st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%1], #32 \n" "st1 {v28.4h, v29.4h, v30.4h, v31.4h}, [%1], #32 \n" "ext v24.16b, v24.16b, v24.16b, #8 \n" "ext v25.16b, v25.16b, v25.16b, #8 \n" "ext v26.16b, v26.16b, v26.16b, #8 \n" "ext v27.16b, v27.16b, v27.16b, #8 \n" "ext v28.16b, v28.16b, v28.16b, #8 \n" "ext v29.16b, v29.16b, v29.16b, #8 \n" "ext v30.16b, v30.16b, v30.16b, #8 \n" "ext v31.16b, v31.16b, v31.16b, #8 \n" "st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%2], #32 \n" "st1 {v28.4h, v29.4h, v30.4h, v31.4h}, [%2], #32 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(tmpptr), // %3 "=r"(kptr) // %4 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(tmpptr), "4"(kptr), "w"(_bias0) // %10 : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } for (; i + 3 < size; i += 4) { __fp16* tmpptr = tmp.channel(i / 8 + (i % 8) / 4); const __fp16* kptr = kernel.channel(p / 2); int nn = inch; // inch always > 0 asm volatile( "mov v24.16b, %10.16b \n" "mov v25.16b, %10.16b \n" "mov v26.16b, %10.16b \n" "mov v27.16b, %10.16b \n" "0: \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%3], #64 \n" "fmla v24.8h, v16.8h, v0.h[0] \n" "fmla v25.8h, v16.8h, v0.h[1] \n" "fmla v26.8h, v16.8h, v0.h[2] \n" "fmla v27.8h, v16.8h, v0.h[3] \n" "fmla v24.8h, v17.8h, v0.h[4] \n" "fmla v25.8h, v17.8h, v0.h[5] \n" "fmla v26.8h, v17.8h, v0.h[6] \n" "fmla v27.8h, v17.8h, v0.h[7] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n" "fmla v24.8h, v18.8h, v1.h[0] \n" "fmla v25.8h, v18.8h, v1.h[1] \n" "fmla v26.8h, v18.8h, v1.h[2] \n" "fmla v27.8h, v18.8h, v1.h[3] \n" "fmla v24.8h, v19.8h, v1.h[4] \n" "fmla v25.8h, v19.8h, v1.h[5] \n" "fmla v26.8h, v19.8h, v1.h[6] \n" "fmla v27.8h, v19.8h, v1.h[7] \n" "fmla v24.8h, v20.8h, v2.h[0] \n" "fmla v25.8h, v20.8h, v2.h[1] \n" "fmla v26.8h, v20.8h, v2.h[2] \n" "fmla v27.8h, v20.8h, v2.h[3] \n" "fmla v24.8h, v21.8h, v2.h[4] \n" "fmla v25.8h, v21.8h, v2.h[5] \n" "fmla v26.8h, v21.8h, v2.h[6] \n" "fmla v27.8h, v21.8h, v2.h[7] \n" "subs %w0, %w0, #1 \n" "fmla v24.8h, v22.8h, v3.h[0] \n" "fmla v25.8h, v22.8h, v3.h[1] \n" "fmla v26.8h, v22.8h, v3.h[2] \n" "fmla v27.8h, v22.8h, v3.h[3] \n" "fmla v24.8h, v23.8h, v3.h[4] \n" "fmla v25.8h, v23.8h, v3.h[5] \n" "fmla v26.8h, v23.8h, v3.h[6] \n" "fmla v27.8h, v23.8h, v3.h[7] \n" "bne 0b \n" "st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%1], #32 \n" "ext v24.16b, v24.16b, v24.16b, #8 \n" "ext v25.16b, v25.16b, v25.16b, #8 \n" "ext v26.16b, v26.16b, v26.16b, #8 \n" "ext v27.16b, v27.16b, v27.16b, #8 \n" "st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%2], #32 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(tmpptr), // %3 "=r"(kptr) // %4 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(tmpptr), "4"(kptr), "w"(_bias0) // %10 : "cc", "memory", "v0", "v1", "v2", "v3", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27"); } for (; i < size; i++) { __fp16* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + i % 4); const __fp16* kptr = kernel.channel(p / 2); float16x8_t _sum0 = _bias0; for (int q = 0; q < inch; q++) { float16x8_t _r0 = vld1q_f16(tmpptr); float16x8_t _k0 = vld1q_f16(kptr); float16x8_t _k1 = vld1q_f16(kptr + 8); float16x8_t _k2 = vld1q_f16(kptr + 16); float16x8_t _k3 = vld1q_f16(kptr + 24); float16x8_t _k4 = vld1q_f16(kptr + 32); float16x8_t _k5 = vld1q_f16(kptr + 40); float16x8_t _k6 = vld1q_f16(kptr + 48); float16x8_t _k7 = vld1q_f16(kptr + 56); _sum0 = vfmaq_laneq_f16(_sum0, _k0, _r0, 0); _sum0 = vfmaq_laneq_f16(_sum0, _k1, _r0, 1); _sum0 = vfmaq_laneq_f16(_sum0, _k2, _r0, 2); _sum0 = vfmaq_laneq_f16(_sum0, _k3, _r0, 3); _sum0 = vfmaq_laneq_f16(_sum0, _k4, _r0, 4); _sum0 = vfmaq_laneq_f16(_sum0, _k5, _r0, 5); _sum0 = vfmaq_laneq_f16(_sum0, _k6, _r0, 6); _sum0 = vfmaq_laneq_f16(_sum0, _k7, _r0, 7); kptr += 64; tmpptr += 8; } vst1_f16(outptr0, vget_low_f16(_sum0)); vst1_f16(outptr1, vget_high_f16(_sum0)); outptr0 += 4; outptr1 += 4; } } remain_outch_start += nn_outch << 1; #pragma omp parallel for num_threads(opt.num_threads) for (int p = remain_outch_start; p < outch; p++) { __fp16* outptr0 = top_blob.channel(p); const __fp16 zeros[4] = {0.f, 0.f, 0.f, 0.f}; const __fp16* biasptr = bias ? bias + p * 4 : zeros; float16x4_t _bias0 = vld1_f16(biasptr); int i = 0; for (; i + 7 < size; i += 8) { __fp16* tmpptr = tmp.channel(i / 8); const __fp16* kptr = kernel.channel(p / 2 + p % 2); int nn = inch; // inch always > 0 asm volatile( "mov v24.16b, %8.16b \n" "mov v25.16b, %8.16b \n" "mov v26.16b, %8.16b \n" "mov v27.16b, %8.16b \n" "mov v28.16b, %8.16b \n" "mov v29.16b, %8.16b \n" "mov v30.16b, %8.16b \n" "mov v31.16b, %8.16b \n" "0: \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%3], #32 \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%2], #64 \n" "fmla v24.4h, v16.4h, v0.h[0] \n" "fmla v25.4h, v16.4h, v0.h[1] \n" "fmla v26.4h, v16.4h, v0.h[2] \n" "fmla v27.4h, v16.4h, v0.h[3] \n" "fmla v28.4h, v16.4h, v0.h[4] \n" "fmla v29.4h, v16.4h, v0.h[5] \n" "fmla v30.4h, v16.4h, v0.h[6] \n" "fmla v31.4h, v16.4h, v0.h[7] \n" "fmla v24.4h, v17.4h, v1.h[0] \n" "fmla v25.4h, v17.4h, v1.h[1] \n" "fmla v26.4h, v17.4h, v1.h[2] \n" "fmla v27.4h, v17.4h, v1.h[3] \n" "fmla v28.4h, v17.4h, v1.h[4] \n" "fmla v29.4h, v17.4h, v1.h[5] \n" "fmla v30.4h, v17.4h, v1.h[6] \n" "fmla v31.4h, v17.4h, v1.h[7] \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%3], #32 \n" "fmla v24.4h, v18.4h, v2.h[0] \n" "fmla v25.4h, v18.4h, v2.h[1] \n" "fmla v26.4h, v18.4h, v2.h[2] \n" "fmla v27.4h, v18.4h, v2.h[3] \n" "fmla v28.4h, v18.4h, v2.h[4] \n" "fmla v29.4h, v18.4h, v2.h[5] \n" "fmla v30.4h, v18.4h, v2.h[6] \n" "fmla v31.4h, v18.4h, v2.h[7] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%2], #64 \n" "fmla v24.4h, v19.4h, v3.h[0] \n" "fmla v25.4h, v19.4h, v3.h[1] \n" "fmla v26.4h, v19.4h, v3.h[2] \n" "fmla v27.4h, v19.4h, v3.h[3] \n" "fmla v28.4h, v19.4h, v3.h[4] \n" "fmla v29.4h, v19.4h, v3.h[5] \n" "fmla v30.4h, v19.4h, v3.h[6] \n" "fmla v31.4h, v19.4h, v3.h[7] \n" "fmla v24.4h, v20.4h, v4.h[0] \n" "fmla v25.4h, v20.4h, v4.h[1] \n" "fmla v26.4h, v20.4h, v4.h[2] \n" "fmla v27.4h, v20.4h, v4.h[3] \n" "fmla v28.4h, v20.4h, v4.h[4] \n" "fmla v29.4h, v20.4h, v4.h[5] \n" "fmla v30.4h, v20.4h, v4.h[6] \n" "fmla v31.4h, v20.4h, v4.h[7] \n" "fmla v24.4h, v21.4h, v5.h[0] \n" "fmla v25.4h, v21.4h, v5.h[1] \n" "fmla v26.4h, v21.4h, v5.h[2] \n" "fmla v27.4h, v21.4h, v5.h[3] \n" "fmla v28.4h, v21.4h, v5.h[4] \n" "fmla v29.4h, v21.4h, v5.h[5] \n" "fmla v30.4h, v21.4h, v5.h[6] \n" "fmla v31.4h, v21.4h, v5.h[7] \n" "fmla v24.4h, v22.4h, v6.h[0] \n" "fmla v25.4h, v22.4h, v6.h[1] \n" "fmla v26.4h, v22.4h, v6.h[2] \n" "fmla v27.4h, v22.4h, v6.h[3] \n" "fmla v28.4h, v22.4h, v6.h[4] \n" "fmla v29.4h, v22.4h, v6.h[5] \n" "fmla v30.4h, v22.4h, v6.h[6] \n" "fmla v31.4h, v22.4h, v6.h[7] \n" "subs %w0, %w0, #1 \n" "fmla v24.4h, v23.4h, v7.h[0] \n" "fmla v25.4h, v23.4h, v7.h[1] \n" "fmla v26.4h, v23.4h, v7.h[2] \n" "fmla v27.4h, v23.4h, v7.h[3] \n" "fmla v28.4h, v23.4h, v7.h[4] \n" "fmla v29.4h, v23.4h, v7.h[5] \n" "fmla v30.4h, v23.4h, v7.h[6] \n" "fmla v31.4h, v23.4h, v7.h[7] \n" "bne 0b \n" "st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%1], #32 \n" "st1 {v28.4h, v29.4h, v30.4h, v31.4h}, [%1], #32 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(tmpptr), // %2 "=r"(kptr) // %3 : "0"(nn), "1"(outptr0), "2"(tmpptr), "3"(kptr), "w"(_bias0) // %8 : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } for (; i + 3 < size; i += 4) { __fp16* tmpptr = tmp.channel(i / 8 + (i % 8) / 4); const __fp16* kptr = kernel.channel(p / 2 + p % 2); int nn = inch; // inch always > 0 asm volatile( "mov v24.16b, %8.16b \n" "mov v25.16b, %8.16b \n" "mov v26.16b, %8.16b \n" "mov v27.16b, %8.16b \n" "0: \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%3], #32 \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%2], #64 \n" "fmla v24.4h, v16.4h, v0.h[0] \n" "fmla v25.4h, v16.4h, v0.h[1] \n" "fmla v26.4h, v16.4h, v0.h[2] \n" "fmla v27.4h, v16.4h, v0.h[3] \n" "fmla v24.4h, v17.4h, v0.h[4] \n" "fmla v25.4h, v17.4h, v0.h[5] \n" "fmla v26.4h, v17.4h, v0.h[6] \n" "fmla v27.4h, v17.4h, v0.h[7] \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%3], #32 \n" "fmla v24.4h, v18.4h, v1.h[0] \n" "fmla v25.4h, v18.4h, v1.h[1] \n" "fmla v26.4h, v18.4h, v1.h[2] \n" "fmla v27.4h, v18.4h, v1.h[3] \n" "fmla v24.4h, v19.4h, v1.h[4] \n" "fmla v25.4h, v19.4h, v1.h[5] \n" "fmla v26.4h, v19.4h, v1.h[6] \n" "fmla v27.4h, v19.4h, v1.h[7] \n" "fmla v24.4h, v20.4h, v2.h[0] \n" "fmla v25.4h, v20.4h, v2.h[1] \n" "fmla v26.4h, v20.4h, v2.h[2] \n" "fmla v27.4h, v20.4h, v2.h[3] \n" "fmla v24.4h, v21.4h, v2.h[4] \n" "fmla v25.4h, v21.4h, v2.h[5] \n" "fmla v26.4h, v21.4h, v2.h[6] \n" "fmla v27.4h, v21.4h, v2.h[7] \n" "subs %w0, %w0, #1 \n" "fmla v24.4h, v22.4h, v3.h[0] \n" "fmla v25.4h, v22.4h, v3.h[1] \n" "fmla v26.4h, v22.4h, v3.h[2] \n" "fmla v27.4h, v22.4h, v3.h[3] \n" "fmla v24.4h, v23.4h, v3.h[4] \n" "fmla v25.4h, v23.4h, v3.h[5] \n" "fmla v26.4h, v23.4h, v3.h[6] \n" "fmla v27.4h, v23.4h, v3.h[7] \n" "bne 0b \n" "st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%1], #32 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(tmpptr), // %2 "=r"(kptr) // %3 : "0"(nn), "1"(outptr0), "2"(tmpptr), "3"(kptr), "w"(_bias0) // %8 : "cc", "memory", "v0", "v1", "v2", "v3", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27"); } for (; i < size; i++) { __fp16* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + i % 4); const __fp16* kptr = kernel.channel(p / 2 + p % 2); float16x4_t _sum0 = _bias0; for (int q = 0; q < inch; q++) { float16x8_t _r0 = vld1q_f16(tmpptr); float16x4_t _k0 = vld1_f16(kptr); float16x4_t _k1 = vld1_f16(kptr + 4); float16x4_t _k2 = vld1_f16(kptr + 8); float16x4_t _k3 = vld1_f16(kptr + 12); float16x4_t _k4 = vld1_f16(kptr + 16); float16x4_t _k5 = vld1_f16(kptr + 20); float16x4_t _k6 = vld1_f16(kptr + 24); float16x4_t _k7 = vld1_f16(kptr + 28); _sum0 = vfma_laneq_f16(_sum0, _k0, _r0, 0); _sum0 = vfma_laneq_f16(_sum0, _k1, _r0, 1); _sum0 = vfma_laneq_f16(_sum0, _k2, _r0, 2); _sum0 = vfma_laneq_f16(_sum0, _k3, _r0, 3); _sum0 = vfma_laneq_f16(_sum0, _k4, _r0, 4); _sum0 = vfma_laneq_f16(_sum0, _k5, _r0, 5); _sum0 = vfma_laneq_f16(_sum0, _k6, _r0, 6); _sum0 = vfma_laneq_f16(_sum0, _k7, _r0, 7); kptr += 32; tmpptr += 8; } vst1_f16(outptr0, _sum0); outptr0 += 4; } } // // NOTE sgemm // for (; p<outch; p++) // { // Mat out0 = top_blob.channel(p); // // const float bias0 = bias ? bias[p] : 0.f; // // __fp16* outptr0 = out0; // // for (int i=0; i<size; i++) // { // float sum = bias0; // // const __fp16* kptr = _kernel.channel(p); // // for (int q=0; q<inch; q++) // { // const __fp16* img0 = bottom_blob.channel(q); // // sum += img0[i] * kptr[0]; // kptr ++; // } // // outptr0[i] = sum; // } // } } static void conv1x1s2_pack8to4_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int channels = bottom_blob.c; size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; int outw = top_blob.w; int outh = top_blob.h; const int tailstep = (w - 2 * outw + w) * 8; Mat bottom_blob_shrinked; bottom_blob_shrinked.create(outw, outh, channels, elemsize, elempack, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < channels; p++) { const __fp16* r0 = bottom_blob.channel(p); __fp16* outptr = bottom_blob_shrinked.channel(p); for (int i = 0; i < outh; i++) { int j = 0; for (; j + 3 < outw; j += 4) { float16x8_t _v0 = vld1q_f16(r0); float16x8_t _v1 = vld1q_f16(r0 + 16); float16x8_t _v2 = vld1q_f16(r0 + 32); float16x8_t _v3 = vld1q_f16(r0 + 48); vst1q_f16(outptr, _v0); vst1q_f16(outptr + 8, _v1); vst1q_f16(outptr + 16, _v2); vst1q_f16(outptr + 24, _v3); r0 += 64; outptr += 32; } for (; j + 1 < outw; j += 2) { float16x8_t _v0 = vld1q_f16(r0); float16x8_t _v1 = vld1q_f16(r0 + 16); vst1q_f16(outptr, _v0); vst1q_f16(outptr + 8, _v1); r0 += 32; outptr += 16; } for (; j < outw; j++) { float16x8_t _v = vld1q_f16(r0); vst1q_f16(outptr, _v); r0 += 16; outptr += 8; } r0 += tailstep; } } conv1x1s1_sgemm_pack8to4_fp16sa_neon(bottom_blob_shrinked, top_blob, kernel, _bias, opt); }
DelayedUpdate.h
////////////////////////////////////////////////////////////////////////////////////// // This file is distributed under the University of Illinois/NCSA Open Source License. // See LICENSE file in top directory for details. // // Copyright (c) 2019 QMCPACK developers. // // File developed by: Ye Luo, yeluo@anl.gov, Argonne National Laboratory // // File created by: Ye Luo, yeluo@anl.gov, Argonne National Laboratory ////////////////////////////////////////////////////////////////////////////////////// #ifndef QMCPLUSPLUS_DELAYED_UPDATE_H #define QMCPLUSPLUS_DELAYED_UPDATE_H #include <OhmmsPETE/OhmmsVector.h> #include <OhmmsPETE/OhmmsMatrix.h> #include <CPU/BLAS.hpp> #include <CPU/BlasThreadingEnv.h> #include "QMCWaveFunctions/Fermion/DiracMatrix.h" #include "config.h" namespace qmcplusplus { /** implements delayed update on CPU using BLAS * @tparam T base precision for most computation * @tparam T_FP high precision for matrix inversion, T_FP >= T */ template<typename T, typename T_FP> class DelayedUpdate { /// orbital values of delayed electrons Matrix<T> U; /// rows of Ainv corresponding to delayed electrons Matrix<T> V; /// Matrix inverse of B, at maximum KxK Matrix<T> Binv; /// scratch space, used during inverse update Matrix<T> tempMat; /// temporal scratch space used by SM-1 Vector<T> temp; /// new column of B Vector<T> p; /// list of delayed electrons std::vector<int> delay_list; /// current number of delays, increase one for each acceptance, reset to 0 after updating Ainv int delay_count; /// matrix inversion engine DiracMatrix<T_FP> detEng; public: /// default constructor DelayedUpdate() : delay_count(0) {} /** resize the internal storage * @param norb number of electrons/orbitals * @param delay, maximum delay 0<delay<=norb */ inline void resize(int norb, int delay) { V.resize(delay, norb); U.resize(delay, norb); p.resize(delay); temp.resize(norb); tempMat.resize(norb, delay); Binv.resize(delay, delay); delay_list.resize(delay); } /** compute the inverse of the transpose of matrix A * @param logdetT orbital value matrix * @param Ainv inverse matrix */ template<typename TREAL> inline void invert_transpose(const Matrix<T>& logdetT, Matrix<T>& Ainv, std::complex<TREAL>& LogValue) { detEng.invert_transpose(logdetT, Ainv, LogValue); // safe mechanism delay_count = 0; } /** initialize internal objects when Ainv is refreshed * @param Ainv inverse matrix */ inline void initializeInv(const Matrix<T>& Ainv) { // safe mechanism delay_count = 0; } inline int getDelayCount() const { return delay_count; } /** compute the row of up-to-date Ainv * @param Ainv inverse matrix * @param rowchanged the row id corresponding to the proposed electron */ template<typename VVT> inline void getInvRow(const Matrix<T>& Ainv, int rowchanged, VVT& invRow) { if (delay_count == 0) { // Ainv is fresh, directly access Ainv std::copy_n(Ainv[rowchanged], invRow.size(), invRow.data()); return; } const T cone(1); const T czero(0); const int norb = Ainv.rows(); const int lda_Binv = Binv.cols(); // save Ainv[rowchanged] to invRow std::copy_n(Ainv[rowchanged], norb, invRow.data()); // multiply V (NxK) Binv(KxK) U(KxN) invRow right to the left BLAS::gemv('T', norb, delay_count, cone, U.data(), norb, invRow.data(), 1, czero, p.data(), 1); BLAS::gemv('N', delay_count, delay_count, cone, Binv.data(), lda_Binv, p.data(), 1, czero, Binv[delay_count], 1); BLAS::gemv('N', norb, delay_count, -cone, V.data(), norb, Binv[delay_count], 1, cone, invRow.data(), 1); } /** accept a move with the update delayed * @param Ainv inverse matrix * @param rowchanged the row id corresponding to the proposed electron * @param psiV new orbital values * * Before delay_count reaches the maximum delay, only Binv is updated with a recursive algorithm */ template<typename VVT> inline void acceptRow(Matrix<T>& Ainv, int rowchanged, const VVT& psiV) { const T cminusone(-1); const T czero(0); const int norb = Ainv.rows(); const int lda_Binv = Binv.cols(); std::copy_n(Ainv[rowchanged], norb, V[delay_count]); std::copy_n(psiV.data(), norb, U[delay_count]); delay_list[delay_count] = rowchanged; // the new Binv is [[X Y] [Z x]] BLAS::gemv('T', norb, delay_count + 1, cminusone, V.data(), norb, psiV.data(), 1, czero, p.data(), 1); // x T y = -p[delay_count]; for (int i = 0; i < delay_count; i++) y += Binv[delay_count][i] * p[i]; Binv[delay_count][delay_count] = y = T(1) / y; // Y BLAS::gemv('T', delay_count, delay_count, y, Binv.data(), lda_Binv, p.data(), 1, czero, Binv.data() + delay_count, lda_Binv); // X BLAS::ger(delay_count, delay_count, cminusone, Binv[delay_count], 1, Binv.data() + delay_count, lda_Binv, Binv.data(), lda_Binv); // Z for (int i = 0; i < delay_count; i++) Binv[delay_count][i] *= -y; delay_count++; // update Ainv when maximal delay is reached if (delay_count == lda_Binv) updateInvMat(Ainv); } /** update the full Ainv and reset delay_count * @param Ainv inverse matrix */ inline void updateInvMat(Matrix<T>& Ainv) { if (delay_count == 0) return; // update the inverse matrix const T cone(1); const T czero(0); const int norb = Ainv.rows(); if (delay_count == 1) { // this is a special case invoking the Fahy's variant of Sherman-Morrison update. // Only use the first norb elements of tempMat as a temporal array BLAS::gemv('T', norb, norb, cone, Ainv.data(), norb, U[0], 1, czero, temp.data(), 1); temp[delay_list[0]] -= cone; BLAS::ger(norb, norb, -Binv[0][0], V[0], 1, temp.data(), 1, Ainv.data(), norb); } else { const int lda_Binv = Binv.cols(); // number of threads at the next level, forced to 1 if the problem is small. const int num_threads = (norb < 256 ? 1 : getNextLevelNumThreads()); if (num_threads == 1 || BlasThreadingEnv::NestedThreadingSupported()) { // threading depends on BLAS BlasThreadingEnv knob(num_threads); BLAS::gemm('T', 'N', delay_count, norb, norb, cone, U.data(), norb, Ainv.data(), norb, czero, tempMat.data(), lda_Binv); for (int i = 0; i < delay_count; i++) tempMat(delay_list[i], i) -= cone; BLAS::gemm('N', 'N', norb, delay_count, delay_count, cone, V.data(), norb, Binv.data(), lda_Binv, czero, U.data(), norb); BLAS::gemm('N', 'N', norb, norb, delay_count, -cone, U.data(), norb, tempMat.data(), lda_Binv, cone, Ainv.data(), norb); } else { // manually threaded version of the above GEMM calls #pragma omp parallel { const int block_size = getAlignedSize<T>((norb + num_threads - 1) / num_threads); int num_block = (norb + block_size - 1) / block_size; #pragma omp for for (int ix = 0; ix < num_block; ix++) { int x_offset = ix * block_size; BLAS::gemm('T', 'N', delay_count, std::min(norb - x_offset, block_size), norb, cone, U.data(), norb, Ainv[x_offset], norb, czero, tempMat[x_offset], lda_Binv); } #pragma omp master for (int i = 0; i < delay_count; i++) tempMat(delay_list[i], i) -= cone; #pragma omp for for (int iy = 0; iy < num_block; iy++) { int y_offset = iy * block_size; BLAS::gemm('N', 'N', std::min(norb - y_offset, block_size), delay_count, delay_count, cone, V.data() + y_offset, norb, Binv.data(), lda_Binv, czero, U.data() + y_offset, norb); } #pragma omp for collapse(2) nowait for (int iy = 0; iy < num_block; iy++) for (int ix = 0; ix < num_block; ix++) { int x_offset = ix * block_size; int y_offset = iy * block_size; BLAS::gemm('N', 'N', std::min(norb - y_offset, block_size), std::min(norb - x_offset, block_size), delay_count, -cone, U.data() + y_offset, norb, tempMat[x_offset], lda_Binv, cone, Ainv[x_offset] + y_offset, norb); } } } } delay_count = 0; } }; } // namespace qmcplusplus #endif // QMCPLUSPLUS_DELAYED_UPDATE_H
simd1.c
/* { dg-do compile } */ /* { dg-options "-fopenmp" } */ /* { dg-additional-options "-std=c99" { target c } } */ extern int a[1024], b[1024], k, l, m; void foo () { int i; #pragma omp simd safelen(16) aligned(a, b : 32) for (i = 0; i < 1024; i++) a[i] *= b[i]; } void bar (int *p) { int i; #pragma omp simd safelen(16) aligned(a, p : 32) linear(k, l : m + 1) for (i = 0; i < 1024; i++) a[i] *= p[i], k += m + 1; } void baz (int *p) { #pragma omp simd safelen(16) aligned(a, p : 32) linear(k, l : m + 1) for (int i = 0; i < 1024; i++) a[i] *= p[i], k += m + 1; }
simde-diagnostic.h
/* SPDX-License-Identifier: MIT * * Permission is hereby granted, free of charge, to any person * obtaining a copy of this software and associated documentation * files (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, * modify, merge, publish, distribute, sublicense, and/or sell copies * of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be * included in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * * Copyright: * 2017-2020 Evan Nemerson <evan@nemerson.com> */ /* SIMDe targets a very wide range of standards and compilers, and our * goal is to compile cleanly even with extremely aggressive warnings * (i.e., -Weverything in clang, -Wextra in GCC, /W4 for MSVC, etc.) * treated as errors. * * While our preference is to resolve the underlying issue a given * diagnostic is warning us about, sometimes that's not possible. * Fixing a warning in one compiler may cause problems in another. * Sometimes a warning doesn't really apply to us (false positives), * and sometimes adhering to a warning would mean dropping a feature * we *know* the compiler supports since we have tested specifically * for the compiler or feature. * * When practical, warnings are only disabled for specific code. For * a list of warnings which are enabled by default in all SIMDe code, * see SIMDE_DISABLE_UNWANTED_DIAGNOSTICS. Note that we restore the * warning stack when SIMDe is done parsing, so code which includes * SIMDe is not deprived of these warnings. */ #if !defined(SIMDE_DIAGNOSTIC_H) #define SIMDE_DIAGNOSTIC_H #include "hedley.h" /* This is only to help us implement functions like _mm_undefined_ps. */ #if defined(SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_) #undef SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_ #endif #if HEDLEY_HAS_WARNING("-Wuninitialized") #define SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_ _Pragma("clang diagnostic ignored \"-Wuninitialized\"") #elif HEDLEY_GCC_VERSION_CHECK(4,2,0) #define SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_ _Pragma("GCC diagnostic ignored \"-Wuninitialized\"") #elif HEDLEY_PGI_VERSION_CHECK(19,10,0) #define SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_ _Pragma("diag_suppress 549") #elif HEDLEY_SUNPRO_VERSION_CHECK(5,14,0) && defined(__cplusplus) #define SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_ _Pragma("error_messages(off,SEC_UNINITIALIZED_MEM_READ,SEC_UNDEFINED_RETURN_VALUE,unassigned)") #elif HEDLEY_SUNPRO_VERSION_CHECK(5,14,0) #define SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_ _Pragma("error_messages(off,SEC_UNINITIALIZED_MEM_READ,SEC_UNDEFINED_RETURN_VALUE)") #elif HEDLEY_SUNPRO_VERSION_CHECK(5,12,0) && defined(__cplusplus) #define SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_ _Pragma("error_messages(off,unassigned)") #elif \ HEDLEY_TI_VERSION_CHECK(16,9,9) || \ HEDLEY_TI_CL6X_VERSION_CHECK(8,0,0) || \ HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \ HEDLEY_TI_CLPRU_VERSION_CHECK(2,3,2) #define SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_ _Pragma("diag_suppress 551") #elif HEDLEY_INTEL_VERSION_CHECK(13,0,0) #define SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_ _Pragma("warning(disable:592)") #elif HEDLEY_MSVC_VERSION_CHECK(19,0,0) && !defined(__MSVC_RUNTIME_CHECKS) #define SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_ __pragma(warning(disable:4700)) #endif /* GCC emits a lot of "notes" about the ABI being different for things * in newer versions of GCC. We don't really care because all our * functions are inlined and don't generate ABI. */ #if HEDLEY_GCC_VERSION_CHECK(7,0,0) #define SIMDE_DIAGNOSTIC_DISABLE_PSABI_ _Pragma("GCC diagnostic ignored \"-Wpsabi\"") #else #define SIMDE_DIAGNOSTIC_DISABLE_PSABI_ #endif /* Since MMX uses x87 FP registers, you're supposed to call _mm_empty() * after each MMX function before any floating point instructions. * Some compilers warn about functions which use MMX functions but * don't call _mm_empty(). However, since SIMDe is implementyng the * MMX API we shouldn't be calling _mm_empty(); we leave it to the * caller to invoke simde_mm_empty(). */ #if HEDLEY_INTEL_VERSION_CHECK(19,0,0) #define SIMDE_DIAGNOSTIC_DISABLE_NO_EMMS_INSTRUCTION_ _Pragma("warning(disable:13200 13203)") #elif defined(HEDLEY_MSVC_VERSION) #define SIMDE_DIAGNOSTIC_DISABLE_NO_EMMS_INSTRUCTION_ __pragma(warning(disable:4799)) #else #define SIMDE_DIAGNOSTIC_DISABLE_NO_EMMS_INSTRUCTION_ #endif /* Intel is pushing people to use OpenMP SIMD instead of Cilk+, so they * emit a diagnostic if you use #pragma simd instead of * #pragma omp simd. SIMDe supports OpenMP SIMD, you just need to * compile with -qopenmp or -qopenmp-simd and define * SIMDE_ENABLE_OPENMP. Cilk+ is just a fallback. */ #if HEDLEY_INTEL_VERSION_CHECK(18,0,0) #define SIMDE_DIAGNOSTIC_DISABLE_SIMD_PRAGMA_DEPRECATED_ _Pragma("warning(disable:3948)") #else #define SIMDE_DIAGNOSTIC_DISABLE_SIMD_PRAGMA_DEPRECATED_ #endif /* MSVC emits a diagnostic when we call a function (like * simde_mm_set_epi32) while initializing a struct. We currently do * this a *lot* in the tests. */ #if \ defined(HEDLEY_MSVC_VERSION) #define SIMDE_DIAGNOSTIC_DISABLE_NON_CONSTANT_AGGREGATE_INITIALIZER_ __pragma(warning(disable:4204)) #else #define SIMDE_DIAGNOSTIC_DISABLE_NON_CONSTANT_AGGREGATE_INITIALIZER_ #endif /* This warning needs a lot of work. It is triggered if all you do is * pass the value to memcpy/__builtin_memcpy, or if you initialize a * member of the union, even if that member takes up the entire union. * Last tested with clang-10, hopefully things will improve in the * future; if clang fixes this I'd love to enable it. */ #if \ HEDLEY_HAS_WARNING("-Wconditional-uninitialized") #define SIMDE_DIAGNOSTIC_DISABLE_CONDITIONAL_UNINITIALIZED_ _Pragma("clang diagnostic ignored \"-Wconditional-uninitialized\"") #else #define SIMDE_DIAGNOSTIC_DISABLE_CONDITIONAL_UNINITIALIZED_ #endif /* This warning is meant to catch things like `0.3 + 0.4 == 0.7`, which * will is false. However, SIMDe uses these operations exclusively * for things like _mm_cmpeq_ps, for which we really do want to check * for equality (or inequality). * * If someone wants to put together a SIMDE_FLOAT_EQUAL(a, op, b) macro * which just wraps a check in some code do disable this diagnostic I'd * be happy to accept it. */ #if \ HEDLEY_HAS_WARNING("-Wfloat-equal") || \ HEDLEY_GCC_VERSION_CHECK(3,0,0) #define SIMDE_DIAGNOSTIC_DISABLE_FLOAT_EQUAL_ _Pragma("GCC diagnostic ignored \"-Wfloat-equal\"") #else #define SIMDE_DIAGNOSTIC_DISABLE_FLOAT_EQUAL_ #endif /* This is because we use HEDLEY_STATIC_ASSERT for static assertions. * If Hedley can't find an implementation it will preprocess to * nothing, which means there will be a trailing semi-colon. */ #if HEDLEY_HAS_WARNING("-Wextra-semi") #define SIMDE_DIAGNOSTIC_DISABLE_EXTRA_SEMI_ _Pragma("clang diagnostic ignored \"-Wextra-semi\"") #elif HEDLEY_GCC_VERSION_CHECK(8,1,0) && defined(__cplusplus) #define SIMDE_DIAGNOSTIC_DISABLE_EXTRA_SEMI_ _Pragma("GCC diagnostic ignored \"-Wextra-semi\"") #else #define SIMDE_DIAGNOSTIC_DISABLE_EXTRA_SEMI_ #endif /* We do use a few variadic macros, which technically aren't available * until C99 and C++11, but every compiler I'm aware of has supported * them for much longer. That said, usage is isolated to the test * suite and compilers known to support them. */ #if HEDLEY_HAS_WARNING("-Wvariadic-macros") || HEDLEY_GCC_VERSION_CHECK(4,0,0) #if HEDLEY_HAS_WARNING("-Wc++98-compat-pedantic") #define SIMDE_DIAGNOSTIC_DISABLE_VARIADIC_MACROS_ \ _Pragma("clang diagnostic ignored \"-Wvariadic-macros\"") \ _Pragma("clang diagnostic ignored \"-Wc++98-compat-pedantic\"") #else #define SIMDE_DIAGNOSTIC_DISABLE_VARIADIC_MACROS_ _Pragma("GCC diagnostic ignored \"-Wvariadic-macros\"") #endif #else #define SIMDE_DIAGNOSTIC_DISABLE_VARIADIC_MACROS_ #endif /* emscripten requires us to use a __wasm_unimplemented_simd128__ macro * before we can access certain SIMD intrinsics, but this diagnostic * warns about it being a reserved name. It is a reserved name, but * it's reserved for the compiler and we are using it to convey * information to the compiler. */ #if HEDLEY_HAS_WARNING("-Wdouble-promotion") #define SIMDE_DIAGNOSTIC_DISABLE_RESERVED_ID_MACRO_ _Pragma("clang diagnostic ignored \"-Wreserved-id-macro\"") #else #define SIMDE_DIAGNOSTIC_DISABLE_RESERVED_ID_MACRO_ #endif /* clang 3.8 warns about the packed attribute being unnecessary when * used in the _mm_loadu_* functions. That *may* be true for version * 3.8, but for later versions it is crucial in order to make unaligned * access safe. */ #if HEDLEY_HAS_WARNING("-Wpacked") #define SIMDE_DIAGNOSTIC_DISABLE_PACKED_ _Pragma("clang diagnostic ignored \"-Wpacked\"") #else #define SIMDE_DIAGNOSTIC_DISABLE_PACKED_ #endif /* Triggered when assigning a float to a double implicitly. We use * explicit casts in SIMDe, this is only used in the test suite. */ #if HEDLEY_HAS_WARNING("-Wdouble-promotion") #define SIMDE_DIAGNOSTIC_DISABLE_DOUBLE_PROMOTION_ _Pragma("clang diagnostic ignored \"-Wdouble-promotion\"") #else #define SIMDE_DIAGNOSTIC_DISABLE_DOUBLE_PROMOTION_ #endif /* Several compilers treat conformant array parameters as VLAs. We * test to make sure we're in C mode (C++ doesn't support CAPs), and * that the version of the standard supports CAPs. We also reject * some buggy compilers like MSVC (the logic is in Hedley if you want * to take a look), but with certain warnings enabled some compilers * still like to emit a diagnostic. */ #if HEDLEY_HAS_WARNING("-Wvla") #define SIMDE_DIAGNOSTIC_DISABLE_VLA_ _Pragma("clang diagnostic ignored \"-Wvla\"") #elif HEDLEY_GCC_VERSION_CHECK(4,3,0) #define SIMDE_DIAGNOSTIC_DISABLE_VLA_ _Pragma("GCC diagnostic ignored \"-Wvla\"") #else #define SIMDE_DIAGNOSTIC_DISABLE_VLA_ #endif #if HEDLEY_HAS_WARNING("-Wused-but-marked-unused") #define SIMDE_DIAGNOSTIC_DISABLE_USED_BUT_MARKED_UNUSED_ _Pragma("clang diagnostic ignored \"-Wused-but-marked-unused\"") #else #define SIMDE_DIAGNOSTIC_DISABLE_USED_BUT_MARKED_UNUSED_ #endif #if HEDLEY_HAS_WARNING("-Wunused-function") #define SIMDE_DIAGNOSTIC_DISABLE_UNUSED_FUNCTION_ _Pragma("clang diagnostic ignored \"-Wunused-function\"") #elif HEDLEY_GCC_VERSION_CHECK(3,4,0) #define SIMDE_DIAGNOSTIC_DISABLE_UNUSED_FUNCTION_ _Pragma("GCC diagnostic ignored \"-Wunused-function\"") #elif HEDLEY_MSVC_VERSION_CHECK(19,0,0) /* Likely goes back further */ #define SIMDE_DIAGNOSTIC_DISABLE_UNUSED_FUNCTION_ __pragma(warning(disable:4505)) #else #define SIMDE_DIAGNOSTIC_DISABLE_UNUSED_FUNCTION_ #endif #if HEDLEY_HAS_WARNING("-Wpass-failed") #define SIMDE_DIAGNOSTIC_DISABLE_PASS_FAILED_ _Pragma("clang diagnostic ignored \"-Wpass-failed\"") #else #define SIMDE_DIAGNOSTIC_DISABLE_PASS_FAILED_ #endif #if HEDLEY_HAS_WARNING("-Wpadded") #define SIMDE_DIAGNOSTIC_DISABLE_PADDED_ _Pragma("clang diagnostic ignored \"-Wpadded\"") #elif HEDLEY_MSVC_VERSION_CHECK(19,0,0) /* Likely goes back further */ #define SIMDE_DIAGNOSTIC_DISABLE_PADDED_ __pragma(warning(disable:4324)) #else #define SIMDE_DIAGNOSTIC_DISABLE_PADDED_ #endif #if HEDLEY_HAS_WARNING("-Wzero-as-null-pointer-constant") #define SIMDE_DIAGNOSTIC_DISABLE_ZERO_AS_NULL_POINTER_CONSTANT_ _Pragma("clang diagnostic ignored \"-Wzero-as-null-pointer-constant\"") #else #define SIMDE_DIAGNOSTIC_DISABLE_ZERO_AS_NULL_POINTER_CONSTANT_ #endif #if HEDLEY_HAS_WARNING("-Wold-style-cast") #define SIMDE_DIAGNOSTIC_DISABLE_OLD_STYLE_CAST_ _Pragma("clang diagnostic ignored \"-Wold-style-cast\"") #else #define SIMDE_DIAGNOSTIC_DISABLE_OLD_STYLE_CAST_ #endif #if HEDLEY_HAS_WARNING("-Wcast-function-type") || HEDLEY_GCC_VERSION_CHECK(8,0,0) #define SIMDE_DIAGNOSTIC_DISABLE_CAST_FUNCTION_TYPE_ _Pragma("GCC diagnostic ignored \"-Wcast-function-type\"") #else #define SIMDE_DIAGNOSTIC_DISABLE_CAST_FUNCTION_TYPE_ #endif #if HEDLEY_HAS_WARNING("-Wc99-extensions") #define SIMDE_DIAGNOSTIC_DISABLE_C99_EXTENSIONS_ _Pragma("clang diagnostic ignored \"-Wc99-extensions\"") #else #define SIMDE_DIAGNOSTIC_DISABLE_C99_EXTENSIONS_ #endif /* https://github.com/simd-everywhere/simde/issues/277 */ #if defined(HEDLEY_GCC_VERSION) && HEDLEY_GCC_VERSION_CHECK(4,6,0) && !HEDLEY_GCC_VERSION_CHECK(6,0,0) && defined(__cplusplus) #define SIMDE_DIAGNOSTIC_DISABLE_BUGGY_UNUSED_BUT_SET_VARIBALE_ _Pragma("GCC diagnostic ignored \"-Wunused-but-set-variable\"") #else #define SIMDE_DIAGNOSTIC_DISABLE_BUGGY_UNUSED_BUT_SET_VARIBALE_ #endif /* This is the warning that you normally define _CRT_SECURE_NO_WARNINGS * to silence, but you have to do that before including anything and * that would require reordering includes. */ #if defined(_MSC_VER) #define SIMDE_DIAGNOSTIC_DISABLE_ANNEX_K_ __pragma(warning(disable:4996)) #else #define SIMDE_DIAGNOSTIC_DISABLE_ANNEX_K_ #endif /* Some compilers, such as clang, may use `long long` for 64-bit * integers, but `long long` triggers a diagnostic with * -Wc++98-compat-pedantic which says 'long long' is incompatible with * C++98. */ #if HEDLEY_HAS_WARNING("-Wc++98-compat-pedantic") #define SIMDE_DIAGNOSTIC_DISABLE_CPP98_COMPAT_PEDANTIC_ _Pragma("clang diagnostic ignored \"-Wc++98-compat-pedantic\"") #else #define SIMDE_DIAGNOSTIC_DISABLE_CPP98_COMPAT_PEDANTIC_ #endif /* emscripten emits this whenever stdin/stdout/stderr is used in a * macro. */ #if HEDLEY_HAS_WARNING("-Wdisabled-macro-expansion") #define SIMDE_DIAGNOSTIC_DISABLE_DISABLED_MACRO_EXPANSION_ _Pragma("clang diagnostic ignored \"-Wdisabled-macro-expansion\"") #else #define SIMDE_DIAGNOSTIC_DISABLE_DISABLED_MACRO_EXPANSION_ #endif #define SIMDE_DISABLE_UNWANTED_DIAGNOSTICS \ SIMDE_DIAGNOSTIC_DISABLE_PSABI_ \ SIMDE_DIAGNOSTIC_DISABLE_NO_EMMS_INSTRUCTION_ \ SIMDE_DIAGNOSTIC_DISABLE_SIMD_PRAGMA_DEPRECATED_ \ SIMDE_DIAGNOSTIC_DISABLE_CONDITIONAL_UNINITIALIZED_ \ SIMDE_DIAGNOSTIC_DISABLE_FLOAT_EQUAL_ \ SIMDE_DIAGNOSTIC_DISABLE_NON_CONSTANT_AGGREGATE_INITIALIZER_ \ SIMDE_DIAGNOSTIC_DISABLE_EXTRA_SEMI_ \ SIMDE_DIAGNOSTIC_DISABLE_VLA_ \ SIMDE_DIAGNOSTIC_DISABLE_USED_BUT_MARKED_UNUSED_ \ SIMDE_DIAGNOSTIC_DISABLE_UNUSED_FUNCTION_ \ SIMDE_DIAGNOSTIC_DISABLE_PASS_FAILED_ \ SIMDE_DIAGNOSTIC_DISABLE_CPP98_COMPAT_PEDANTIC_ \ SIMDE_DIAGNOSTIC_DISABLE_BUGGY_UNUSED_BUT_SET_VARIBALE_ #endif /* !defined(SIMDE_DIAGNOSTIC_H) */
mvt_teams.c
/** * mvt.c: This file was adapted from PolyBench/GPU 1.0 test suite * to run on GPU with OpenMP 4.0 pragmas and OpenCL driver. * * http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU * * Contacts: Marcio M Pereira <mpereira@ic.unicamp.br> * Rafael Cardoso F Sousa <rafael.cardoso@students.ic.unicamp.br> * Luís Felipe Mattos <ra107822@students.ic.unicamp.br> */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <assert.h> #include <unistd.h> #include <sys/time.h> #include <omp.h> #include "../../common/polybenchUtilFuncts.h" //define the error threshold for the results "not matching" #define PERCENT_DIFF_ERROR_THRESHOLD 0.05 /* Problem size */ #define N 4096 /* Can switch DATA_TYPE between float and double */ typedef float DATA_TYPE; void init_array(DATA_TYPE* A, DATA_TYPE* x1, DATA_TYPE* x2, DATA_TYPE* y1, DATA_TYPE* y2, DATA_TYPE* x1_gpu, DATA_TYPE* x2_gpu) { int i, j; for (i = 0; i < N; i++) { x1[i] = ((DATA_TYPE) i) / N; x2[i] = ((DATA_TYPE) i + 1) / N; x1_gpu[i] = x1[i]; x2_gpu[i] = x2[i]; y1[i] = ((DATA_TYPE) i + 3) / N; y2[i] = ((DATA_TYPE) i + 4) / N; for (j = 0; j < N; j++) { A[i*N + j] = ((DATA_TYPE) i*j) / N; } } } void runMvt(DATA_TYPE* a, DATA_TYPE* x1, DATA_TYPE* x2, DATA_TYPE* y1, DATA_TYPE* y2) { int i, j; for (i=0; i<N; i++) { for (j=0; j<N; j++) { x1[i] = x1[i] + a[i*N + j] * y1[j]; } } for (i=0; i<N; i++) { for (j=0; j<N; j++) { x2[i] = x2[i] + a[j*N + i] * y2[j]; } } } void runMvt_OMP(DATA_TYPE* a, DATA_TYPE* x1, DATA_TYPE* x2, DATA_TYPE* y1, DATA_TYPE* y2) { int i; #pragma omp target map(to: a[:N*N], y1[:N], y2[:N]) map(tofrom: x1[:N], x2[:N]) { #pragma omp teams { #pragma omp distribute parallel for for (i=0; i<N; i++) { int j; for (j=0; j<N; j++) { x1[i] = x1[i] + a[i*N + j] * y1[j]; } } #pragma omp distribute parallel for for (i=0; i<N; i++) { int j; for (j=0; j<N; j++) { x2[i] = x2[i] + a[j*N + i] * y2[j]; } } } } } void compareResults(DATA_TYPE* x1, DATA_TYPE* x1_outputFromGpu, DATA_TYPE* x2, DATA_TYPE* x2_outputFromGpu) { int i, fail; fail = 0; for (i=0; i<N; i++) { if (percentDiff(x1[i], x1_outputFromGpu[i]) > PERCENT_DIFF_ERROR_THRESHOLD) { fail++; } if (percentDiff(x2[i], x2_outputFromGpu[i]) > PERCENT_DIFF_ERROR_THRESHOLD) { fail++; } } // Print results printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail); } int main() { double t_start, t_end; DATA_TYPE* a; DATA_TYPE* x1; DATA_TYPE* x2; DATA_TYPE* x1_outputFromGpu; DATA_TYPE* x2_outputFromGpu; DATA_TYPE* y_1; DATA_TYPE* y_2; a = (DATA_TYPE*)malloc(N*N*sizeof(DATA_TYPE)); x1 = (DATA_TYPE*)malloc(N*sizeof(DATA_TYPE)); x2 = (DATA_TYPE*)malloc(N*sizeof(DATA_TYPE)); x1_outputFromGpu = (DATA_TYPE*)malloc(N*sizeof(DATA_TYPE)); x2_outputFromGpu = (DATA_TYPE*)malloc(N*sizeof(DATA_TYPE)); y_1 = (DATA_TYPE*)malloc(N*sizeof(DATA_TYPE)); y_2 = (DATA_TYPE*)malloc(N*sizeof(DATA_TYPE)); fprintf(stdout, "<< Matrix Vector Product and Transpose >>\n"); init_array(a, x1, x2, y_1, y_2, x1_outputFromGpu, x2_outputFromGpu); t_start = rtclock(); runMvt_OMP(a, x1_outputFromGpu, x2_outputFromGpu, y_1, y_2); t_end = rtclock(); fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start); t_start = rtclock(); //run the algorithm on the CPU runMvt(a, x1, x2, y_1, y_2); t_end = rtclock(); fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start); compareResults(x1, x1_outputFromGpu, x2, x2_outputFromGpu); free(a); free(x1); free(x2); free(x1_outputFromGpu); free(x2_outputFromGpu); free(y_1); free(y_2); return 0; }
Example_copyprivate.2.c
/* * @@name: copyprivate.2c * @@type: C * @@compilable: yes * @@linkable: no * @@expect: success */ #include <stdio.h> #include <stdlib.h> float read_next( ) { float * tmp; float return_val; #pragma omp single copyprivate(tmp) { tmp = (float *) malloc(sizeof(float)); } /* copies the pointer only */ #pragma omp master { scanf("%f", tmp); } #pragma omp barrier return_val = *tmp; #pragma omp barrier #pragma omp single nowait { free(tmp); } return return_val; }
arithmetic.h
/* Copyright 2015 The math21 Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #pragma once #include <limits> #include "inner.h" namespace math21 { ///////////////// template<typename VecType> NumB math21_operator_container_isfinite(const VecType &A) { MATH21_ASSERT(!A.isEmpty(), "empty matrix"); NumN i; NumN n = A.size(); for (i = 1; i <= n; ++i) { if (!xjisfinite(A(i))) { return 0; } } return 1; } // T is basic type template<typename T> NumB math21_operator_isfinite(const T &x) { return xjisfinite(x); } // set MATH21_MAX to x with value infinite ignoring sign. template<typename T> void math21_operator_clip_pos_inf(T &x) { if (xjisinf(x)) { x = MATH21_MAX; } } // set MATH21_MIN to x with value infinite ignoring sign. template<typename T> void math21_operator_clip_neg_inf(T &x) { if (xjisinf(x)) { x = MATH21_MIN; } } // set MATH21_MAX to x with value infinite ignoring sign. template<typename VecType> void math21_operator_container_clip_pos_inf(VecType &A) { MATH21_ASSERT(!A.isEmpty(), "empty matrix"); NumN i; NumN n = A.size(); for (i = 1; i <= n; ++i) { if (xjisinf(A(i))) { A(i) = MATH21_MAX; } } } // set MATH21_MIN to x with value infinite ignoring sign. template<typename VecType> void math21_operator_container_clip_neg_inf(VecType &A) { MATH21_ASSERT(!A.isEmpty(), "empty matrix"); NumN i; NumN n = A.size(); for (i = 1; i <= n; ++i) { if (xjisinf(A(i))) { A(i) = MATH21_MIN; } } } template<typename T> void math21_operator_clip_not_less_than_eps(T &x) { if (x < MATH21_EPS) { x = MATH21_EPS; } } template<typename VecType> void math21_operator_container_clip_not_less_than_eps(VecType &A) { MATH21_ASSERT(!A.isEmpty(), "empty matrix"); NumN i; NumN n = A.size(); for (i = 1; i <= n; ++i) { if (A(i) < MATH21_EPS) { A(i) = MATH21_EPS; } } } // set MATH21_MAX to x with value infinite ignoring sign. template<typename VecType> void math21_operator_container_clip_zero_and_pos_inf(VecType &A) { MATH21_ASSERT(!A.isEmpty(), "empty matrix"); NumN i; NumN n = A.size(); for (i = 1; i <= n; ++i) { if (xjisinf(A(i))) { A(i) = MATH21_MAX; } else if (A(i) == 0) { A(i) = MATH21_EPS; } } } // a <= A(i) <= b template<typename VecType> void math21_operator_container_clip_in(VecType &A, NumR a, NumR b) { MATH21_ASSERT(!A.isEmpty(), "empty matrix"); NumN i; NumN n = A.size(); //#pragma omp parallel for for (i = 1; i <= n; ++i) { if (A(i) > b) { A(i) = b; } else if (A(i) < a) { A(i) = a; } } } ////////////////////// /* RULE: all container ops can not call set size method. * * */ template<typename T, template<typename> class Container> NumB math21_clip_container(Container<T> &A) { NumB clipped = 0; NumN i, n; n = A.size(); for (i = 1; i <= n; ++i) { T &val = A(i); if (val > MATH21_MAX) { val = MATH21_MAX; if (clipped == 0) { clipped = 1; } } else if (val < MATH21_MIN) { val = MATH21_MIN; if (clipped == 0) { clipped = 1; } } } return clipped; } template<typename T, template<typename> class Container> NumB math21_check_clip_container(const Container<T> &A) { NumB clipped = 0; NumN i, n; n = A.size(); for (i = 1; i <= n; ++i) { const T &val = A(i); if (val > MATH21_MAX) { clipped = 1; return clipped; } else if (val < MATH21_MIN) { clipped = 1; return clipped; } } return clipped; } //////////////////// // see math21_point_isEqual template<typename T, typename S> NumB math21_operator_num_isEqual(T x, S y, NumR epsilon = 0) { NumR tmp; tmp = y - x; if (xjabs(tmp) > epsilon) { return 0; } return 1; } template<typename T, typename S> NumB math21_operator_num_isLarger(T x, S y, NumR epsilon = 0) { NumR tmp; tmp = x - y; if (tmp > epsilon) { return 0; } return 1; } template<typename VecType> NumN math21_operator_container_number_of_equal(NumR k, const VecType &x, NumN offset = 0) { NumN n = x.size(); MATH21_ASSERT(n >= 1); NumN sum = 0; for (NumN i = offset + 1; i <= n; ++i) { if (k == x(i)) { sum = sum + 1; } } return sum; } template<typename VecType1, typename VecType2> NumN math21_operator_container_count_element_equal_at_same_pos(const VecType1 &x, const VecType2 &y) { NumN n = xjmin(x.size(), y.size()); NumN count = 0; for (NumN i = 1; i <= n; ++i) { if (x(i) == y(i)) { count = count + 1; } } return count; } // indexes.size <= xjmin(x.size(), y.size()); template<typename VecType1, typename VecType2, typename VecType3> void _math21_operator_container_arg_equal_or_not( const VecType1 &x, const VecType2 &y, VecType3 &indexes, NumB isEqual, NumB isCheckingCommonOnly = 1) { NumN n_equal = math21_operator_container_count_element_equal_at_same_pos(x, y); NumN n = xjmin(x.size(), y.size()); NumN n_max = xjmax(x.size(), y.size()); if (isEqual) { if (n_equal == 0) { indexes.clear(); return; } indexes.setSize(n_equal); } else { NumN n_not_equal; if (isCheckingCommonOnly) { n_not_equal = n - n_equal; } else { n_not_equal = n_max - n_equal; } if (n_not_equal == 0) { indexes.clear(); return; } indexes.setSize(n_not_equal); } NumN j = 1; for (NumN i = 1; i <= n; ++i) { if (x(i) == y(i)) { if (isEqual == 1) { indexes(j) = i; ++j; } } else { if (!isEqual) { indexes(j) = i; ++j; } } } if (!isEqual && !isCheckingCommonOnly) { for (NumN i = n + 1; i <= n_max; ++i) { indexes(j) = i; ++j; } } } template<typename VecType1, typename VecType2, typename VecType3> void math21_operator_container_argequal( const VecType1 &x, const VecType2 &y, VecType3 &indexes) { _math21_operator_container_arg_equal_or_not(x, y, indexes, 1); } template<typename VecType1, typename VecType2, typename VecType3> void math21_operator_container_arg_not_equal( const VecType1 &x, const VecType2 &y, VecType3 &indexes, NumB isCheckingCommonOnly = 1) { _math21_operator_container_arg_equal_or_not(x, y, indexes, 0, isCheckingCommonOnly); } template<typename VecType> NumB math21_operator_container_is_positive(const VecType &x, NumR epsilon = MATH21_MIN_POSITIVE_NUMR) { NumN n = x.size(); MATH21_ASSERT(n >= 1); for (NumN i = 1; i <= n; ++i) { if (x(i) <= epsilon) { return 0; } } return 1; } template<typename VecType> NumN math21_operator_container_not_positive_first(const VecType &x, NumR epsilon = MATH21_MIN_POSITIVE_NUMR) { NumN n = x.size(); MATH21_ASSERT(n >= 1); for (NumN i = 1; i <= n; ++i) { if (x(i) <= epsilon) { return i; } } return 0; } template<typename VecType> NumB math21_operator_container_is_positive_debug(const VecType &x) { if (!math21_operator_container_is_positive(x)) { NumN i = math21_operator_container_not_positive_first(x); // std::cout << "i = " << i // << "\nx(i) = " << x(i); return 0; } return 1; } template<typename VecType> NumB math21_operator_container_is_larger_number(const VecType &x, NumR k) { NumN n = x.size(); MATH21_ASSERT(n >= 1); for (NumN i = 1; i <= n; ++i) { if (x(i) <= k) { return 0; } } return 1; } // xi, yi can not compare directly. // Note: comparing VecN and VecZ // x(i)>y(i), for any i template<typename VecType, typename VecType2> NumB math21_operator_container_is_larger(const VecType &x, const VecType2 &y) { MATH21_ASSERT(x.size() == y.size()) NumN n = x.size(); MATH21_ASSERT(n >= 1); for (NumN i = 1; i <= n; ++i) { if (x(i) >= 0 && y(i) >= 0) { if (x(i) <= y(i)) { return 0; } } else if (x(i) < 0 && y(i) < 0) { if (x(i) <= y(i)) { return 0; } } else if (x(i) < 0) { return 0; } } return 1; } // xi, yi can not compare directly. // Note: comparing VecN and VecZ // x(i)>=y(i), for any i template<typename VecType, typename VecType2> NumB math21_operator_container_is_not_less(const VecType &x, const VecType2 &y) { MATH21_ASSERT(x.size() == y.size()) NumN n = x.size(); MATH21_ASSERT(n >= 1); for (NumN i = 1; i <= n; ++i) { if (x(i) >= 0 && y(i) >= 0) { if (x(i) < y(i)) { return 0; } } else if (x(i) < 0 && y(i) < 0) { if (x(i) < y(i)) { return 0; } } else if (x(i) < 0) { return 0; } } return 1; } template<typename VecType> NumB math21_operator_container_is_less(const VecType &x, NumR k) { NumN n = x.size(); MATH21_ASSERT(n >= 1); for (NumN i = 1; i <= n; ++i) { if (x(i) >= k) { return 0; } } return 1; } template<typename VecType> NumB math21_operator_container_is_not_larger(const VecType &x, NumR k) { NumN n = x.size(); MATH21_ASSERT(n >= 1); // NumN sum = 0; for (NumN i = 1; i <= n; ++i) { if (x(i) > k) { return 0; } } return 1; } template<typename VecType> NumB math21_operator_container_is_not_less(const VecType &x, NumR k) { NumN n = x.size(); MATH21_ASSERT(n >= 1); for (NumN i = 1; i <= n; ++i) { if (x(i) < k) { return 0; } } return 1; } //////////////////// template<typename S, typename VecType, template<typename> class Container> void math21_operator_container_add(NumR k, const VecType &x, Container<S> &y) { NumN n = x.size(); MATH21_ASSERT(y.size() == n); //#pragma omp parallel for for (NumN i = 1; i <= n; ++i) { y(i) = (S) (k + x(i)); } MATH21_ASSERT_FINITE(math21_operator_container_isfinite(y)) } template<typename S, template<typename> class Container> void math21_operator_container_addTo(NumR k, Container<S> &x) { NumN n = x.size(); MATH21_ASSERT(n >= 1); //#pragma omp parallel for for (NumN i = 1; i <= n; ++i) { x(i) = (S) (k + x(i)); } } template<typename T, typename S, template<typename> class Container, template<typename> class Container2> void math21_operator_container_divide(NumR k, const Container<T> &x, Container2<S> &y) { NumN n = x.size(); MATH21_ASSERT(y.size() == n); //#pragma omp parallel for T EPS = std::numeric_limits<T>::epsilon(); for (NumN i = 1; i <= n; ++i) { MATH21_ASSERT(xjabs(x(i)) > EPS, "divide zero!" << x(i)); y(i) = (S) (k / (x(i))); } } template<typename S, template<typename> class Container> void math21_operator_container_divide_to(NumR k, Container<S> &x) { NumN n = x.size(); //#pragma omp parallel for for (NumN i = 1; i <= n; ++i) { MATH21_ASSERT(x(i) > MATH21_EPS, "divide zero!" << x(i)); x(i) = (S) (k / (x(i))); } } template<typename S, typename VecType, template<typename> class Container> void math21_operator_container_sqrt(const VecType &x, Container<S> &y) { NumN n = x.size(); MATH21_ASSERT(y.size() == n); //#pragma omp parallel for for (NumN i = 1; i <= n; ++i) { y(i) = (S) (xjsqrt(x(i))); } } template<typename S, template<typename> class Container> void math21_operator_container_sqrt_to(Container<S> &x) { MATH21_ASSERT_POSITIVE(math21_operator_container_is_positive_debug(x)) NumN n = x.size(); //#pragma omp parallel for for (NumN i = 1; i <= n; ++i) { x(i) = (S) (xjsqrt(x(i))); } } template<typename S, typename VecType, template<typename> class Container> void math21_operator_container_square(const VecType &x, Container<S> &y) { NumN n = x.size(); MATH21_ASSERT(y.size() == n); //#pragma omp parallel for for (NumN i = 1; i <= n; ++i) { y(i) = (S) (xjsquare(x(i))); } } template<typename S, template<typename> class Container> void math21_operator_container_square_to(Container<S> &x) { math21_operator_container_square(x, x); } template<typename VecType> void math21_operator_container_cos(const VecType &A, VecType &B) { MATH21_ASSERT(A.size() == B.size(), "vector size doesn't match"); NumN i; NumN n = A.size(); //#pragma omp parallel for for (i = 1; i <= n; ++i) { B.at(i) = xjcos(A(i)); } } template<typename VecType> void math21_operator_container_cosTo(VecType &A) { NumN i; NumN n = A.size(); //#pragma omp parallel for for (i = 1; i <= n; ++i) { A.at(i) = xjcos(A(i)); } } template<typename VecType> void math21_operator_container_sin(const VecType &A, VecType &B) { MATH21_ASSERT(A.size() == B.size(), "vector size doesn't match"); NumN i; NumN n = A.size(); //#pragma omp parallel for for (i = 1; i <= n; ++i) { B.at(i) = xjsin(A(i)); } } template<typename VecType1, typename VecType2> void math21_operator_container_log_can_onto(const VecType1 &x, VecType2 &y) { MATH21_ASSERT_CHECK_VALUE(math21_operator_container_is_larger_number(x, 0), "" << x.log("x")) NumN n = x.size(); MATH21_ASSERT(y.size() == n); //#pragma omp parallel for for (NumN i = 1; i <= n; ++i) { y(i) = xjlog(x(i)); } MATH21_ASSERT_FINITE(math21_operator_container_isfinite(y)) } //must clip, because exp(800) is infinite in computer. template<typename VecType1, typename VecType2> void math21_operator_container_exp(const VecType1 &x, VecType2 &y) { NumN n = x.size(); MATH21_ASSERT(y.size() == n); //#pragma omp parallel for for (NumN i = 1; i <= n; ++i) { y(i) = xjexp(x(i)); } math21_operator_container_clip_zero_and_pos_inf(y); } template<typename VecType1, typename VecType2> void math21_operator_container_round(const VecType1 &x, VecType2 &y, NumN m) { NumN n = x.size(); MATH21_ASSERT(y.size() == n); //#pragma omp parallel for for (NumN i = 1; i <= n; ++i) { y(i) = xjround(x(i), m); } } template<typename VecType> void math21_operator_container_round_to(VecType &x, NumN m) { math21_operator_container_round(x, x, m); } template<typename VecType> NumB math21_operator_container_isContinuousIntegers(const VecType &x, NumZ k = 1) { NumN n = x.size(); MATH21_ASSERT(n >= 1); for (NumN i = 1; i <= n; ++i) { if (x(i) != k) { return 0; } ++k; } return 1; } // replace A by r where A(i) = x. template<typename T, typename VecType> void math21_operator_container_replace_by_number(VecType &A, const T &r, const T &x) { for (NumN i = 1; i <= A.size(); ++i) { if (A(i) == x)A(i) = r; } } }
Analyzer.h
#ifndef ANALYZER_H #define ANALYZER_H /************************************************************* * Copyright: (C) 2012 by Markus Schordan * * Author : Markus Schordan * * License : see file LICENSE in the CodeThorn distribution * *************************************************************/ #include <iostream> #include <fstream> #include <set> #include <string> #include <sstream> #include <list> #include <vector> #include <omp.h> #include <boost/unordered_set.hpp> #include <boost/unordered_map.hpp> #include "Timer.h" #include "AstTerm.h" #include "Labeler.h" #include "CFAnalysis.h" #include "RoseAst.h" #include "SgNodeHelper.h" #include "ExprAnalyzer.h" #include "EState.h" #include "TransitionGraph.h" #include "PropertyValueTable.h" #include "CTIOLabeler.h" #include "VariableValueMonitor.h" // we use INT_MIN, INT_MAX #include "limits.h" #include "AstNodeInfo.h" #include "SgTypeSizeMapping.h" namespace CodeThorn { /*! * \author Markus Schordan * \date 2012. */ typedef std::list<const EState*> EStateWorkList; typedef std::pair<int, const EState*> FailedAssertion; typedef std::pair<PState, std::list<int> > PStatePlusIOHistory; enum AnalyzerMode { AM_ALL_STATES, AM_LTL_STATES }; class SpotConnection; /*! * \author Markus Schordan * \date 2012. */ class Analyzer { friend class Visualizer; friend class VariableValueMonitor; public: Analyzer(); ~Analyzer(); protected: static Sawyer::Message::Facility logger; public: static void initDiagnostics(); static std::string nodeToString(SgNode* node); void initAstNodeInfo(SgNode* node); bool isActiveGlobalTopify(); void initializeSolver1(std::string functionToStartAt,SgNode* root, bool oneFunctionOnly); void initializeTraceSolver(std::string functionToStartAt,SgNode* root); void continueAnalysisFrom(EState* newStartEState); // set the size of an element determined by this type void setElementSize(VariableId variableId, SgType* elementType); EState analyzeVariableDeclaration(SgVariableDeclaration* nextNodeToAnalyze1,EState currentEState, Label targetLabel); PState analyzeAssignRhsExpr(PState currentPState,VariableId lhsVar, SgNode* rhs,ConstraintSet& cset); // to be replaced by above function PState analyzeAssignRhs(PState currentPState,VariableId lhsVar, SgNode* rhs,ConstraintSet& cset); void addToWorkList(const EState* estate); const EState* addToWorkListIfNew(EState estate); const EState* takeFromWorkList(); bool isInWorkList(const EState* estate); bool isEmptyWorkList(); const EState* topWorkList(); const EState* popWorkList(); void swapWorkLists(); size_t memorySizeContentEStateWorkLists(); void setOptionStatusMessages(bool flag); bool getOptionStatusMessages(); // thread save; only prints if option status messages is enabled. void printStatusMessage(string s); void printStatusMessageLine(string s); void printStatusMessage(string s, bool newLineFlag); std::string analyzerStateToString(); static string lineColSource(SgNode* node); void recordTransition(const EState* sourceEState, Edge e, const EState* targetEState); void printStatusMessage(bool); bool isStartLabel(Label label); // determines whether lab is a function call label of a function // call of the form 'x=f(...)' and returns the varible-id of the // lhs, if a valid pointer is provided bool isFunctionCallWithAssignment(Label lab,VariableId* varId=0); std::list<EState> transferEdgeEState(Edge edge, const EState* estate); std::list<EState> transferFunctionCall(Edge edge, const EState* estate); std::list<EState> transferFunctionCallLocalEdge(Edge edge, const EState* estate); std::list<EState> transferFunctionCallExternal(Edge edge, const EState* estate); std::list<EState> transferFunctionCallReturn(Edge edge, const EState* estate); std::list<EState> transferFunctionExit(Edge edge, const EState* estate); std::list<EState> transferReturnStmt(Edge edge, const EState* estate); std::list<EState> transferVariableDeclaration(SgVariableDeclaration* decl,Edge edge, const EState* estate); std::list<EState> transferExprStmt(SgNode* nextNodeToAnalyze1, Edge edge, const EState* estate); std::list<EState> transferIdentity(Edge edge, const EState* estate); std::list<EState> transferAssignOp(SgAssignOp* assignOp, Edge edge, const EState* estate); std::list<EState> transferIncDecOp(SgNode* nextNodeToAnalyze2, Edge edge, const EState* estate); std::list<EState> transferTrueFalseEdge(SgNode* nextNodeToAnalyze2, Edge edge, const EState* estate); SgNode* getCond(SgNode* node); void generateAstNodeInfo(SgNode* node); bool checkEStateSet(); bool isConsistentEStatePtrSet(std::set<const EState*> estatePtrSet); bool checkTransitionGraph(); //! requires init void runSolver4(); void runSolver5(); void runSolver8(); void runSolver10(); void runSolver11(); void runSolver12(); void runSolver(); //! The analyzer requires a CFAnalysis to obtain the ICFG. void setCFAnalyzer(CFAnalysis* cf) { cfanalyzer=cf; } CFAnalysis* getCFAnalyzer() const { return cfanalyzer; } //void initializeVariableIdMapping(SgProject* project) { variableIdMapping.computeVariableSymbolMapping(project); } // access functions for computed information VariableIdMapping* getVariableIdMapping() { return &variableIdMapping; } CTIOLabeler* getLabeler() const { CTIOLabeler* ioLabeler=dynamic_cast<CTIOLabeler*>(cfanalyzer->getLabeler()); ROSE_ASSERT(ioLabeler); return ioLabeler; } Flow* getFlow() { return &flow; } PStateSet* getPStateSet() { return &pstateSet; } EStateSet* getEStateSet() { return &estateSet; } TransitionGraph* getTransitionGraph() { return &transitionGraph; } ConstraintSetMaintainer* getConstraintSetMaintainer() { return &constraintSetMaintainer; } //private: TODO Flow flow; SgNode* startFunRoot; CFAnalysis* cfanalyzer; enum ExplorationMode { EXPL_DEPTH_FIRST, EXPL_BREADTH_FIRST, EXPL_LOOP_AWARE, EXPL_LOOP_AWARE_SYNC, EXPL_RANDOM_MODE1 }; void eventGlobalTopifyTurnedOn(); bool isIncompleteSTGReady(); bool isPrecise(); PropertyValueTable reachabilityResults; int reachabilityAssertCode(const EState* currentEStatePtr); void setExplorationMode(ExplorationMode em) { _explorationMode=em; } ExplorationMode getExplorationMode() { return _explorationMode; } void setSkipSelectedFunctionCalls(bool defer) { _skipSelectedFunctionCalls=true; exprAnalyzer.setSkipSelectedFunctionCalls(true); } void setSkipArrayAccesses(bool skip) { exprAnalyzer.setSkipArrayAccesses(skip); } bool getSkipArrayAccesses() { return exprAnalyzer.getSkipArrayAccesses(); } ExprAnalyzer* getExprAnalyzer(); std::list<FailedAssertion> getFirstAssertionOccurences(){return _firstAssertionOccurences;} void incIterations() { if(isPrecise()) { #pragma omp atomic _iterations+=1; } else { #pragma omp atomic _approximated_iterations+=1; } } bool isLoopCondLabel(Label lab); int getApproximatedIterations() { return _approximated_iterations; } int getIterations() { return _iterations; } string getVarNameByIdCode(int varIdCode) {return variableIdMapping.variableName(variableIdMapping.variableIdFromCode(varIdCode));}; void mapGlobalVarInsert(std::string name, int* addr); //! compute the VariableIds of variable declarations VariableIdMapping::VariableIdSet determineVariableIdsOfVariableDeclarations(set<SgVariableDeclaration*> decls); //! compute the VariableIds of SgInitializedNamePtrList VariableIdMapping::VariableIdSet determineVariableIdsOfSgInitializedNames(SgInitializedNamePtrList& namePtrList); std::set<std::string> variableIdsToVariableNames(CodeThorn::AbstractValueSet); std::set<std::string> variableIdsToVariableNames(SPRAY::VariableIdSet); typedef std::list<SgVariableDeclaration*> VariableDeclarationList; VariableDeclarationList computeUnusedGlobalVariableDeclarationList(SgProject* root); VariableDeclarationList computeUsedGlobalVariableDeclarationList(SgProject* root); bool isFailedAssertEState(const EState* estate); bool isVerificationErrorEState(const EState* estate); //! adds a specific code to the io-info of an estate which is checked by isFailedAsserEState and determines a failed-assert estate. Note that the actual assert (and its label) is associated with the previous estate (this information can therefore be obtained from a transition-edge in the transition graph). EState createFailedAssertEState(const EState estate, Label target); EState createVerificationErrorEState(const EState estate, Label target); //! list of all asserts in a program std::list<SgNode*> listOfAssertNodes(SgProject *root); //! rers-specific error_x: assert(0) version std::list<std::pair<SgLabelStatement*,SgNode*> > listOfLabeledAssertNodes(SgProject *root); void initLabeledAssertNodes(SgProject* root) { _assertNodes=listOfLabeledAssertNodes(root); } size_t getNumberOfErrorLabels(); std::string labelNameOfAssertLabel(Label lab) { std::string labelName; for(std::list<std::pair<SgLabelStatement*,SgNode*> >::iterator i=_assertNodes.begin();i!=_assertNodes.end();++i) if(lab==getLabeler()->getLabel((*i).second)) labelName=SgNodeHelper::getLabelName((*i).first); //assert(labelName.size()>0); return labelName; } bool isCppLabeledAssertLabel(Label lab) { return labelNameOfAssertLabel(lab).size()>0; } InputOutput::OpType ioOp(const EState* estate) const; void setDisplayDiff(int diff) { _displayDiff=diff; } void setResourceLimitDiff(int diff) { _resourceLimitDiff=diff; } void setSolver(int solver); int getSolver(); void setSemanticFoldThreshold(int t) { _semanticFoldThreshold=t; } void setNumberOfThreadsToUse(int n) { _numberOfThreadsToUse=n; } int getNumberOfThreadsToUse() { return _numberOfThreadsToUse; } std::list<std::pair<SgLabelStatement*,SgNode*> > _assertNodes; VariableId globalVarIdByName(std::string varName) { return globalVarName2VarIdMapping[varName]; } void setTreatStdErrLikeFailedAssert(bool x) { _treatStdErrLikeFailedAssert=x; } boost::unordered_map <std::string,int*> mapGlobalVarAddress; boost::unordered_map <int*,std::string> mapAddressGlobalVar; void setCompoundIncVarsSet(set<AbstractValue> ciVars); void setSmallActivityVarsSet(set<AbstractValue> ciVars); void setAssertCondVarsSet(set<AbstractValue> acVars); enum GlobalTopifyMode {GTM_IO, GTM_IOCF, GTM_IOCFPTR, GTM_COMPOUNDASSIGN, GTM_FLAGS}; void setGlobalTopifyMode(GlobalTopifyMode mode); void setExternalErrorFunctionName(std::string externalErrorFunctionName); // enables external function semantics void enableSVCompFunctionSemantics(); void disableSVCompFunctionSemantics(); bool svCompFunctionSemantics() { return _svCompFunctionSemantics; } bool stdFunctionSemantics() { return _stdFunctionSemantics; } void setModeLTLDriven(bool ltlDriven) { transitionGraph.setModeLTLDriven(ltlDriven); } bool getModeLTLDriven() { return transitionGraph.getModeLTLDriven(); } // only used in LTL-driven mode void setSpotConnection(SpotConnection* connection) { _spotConnection = connection; } long analysisRunTimeInSeconds(); void set_finished(std::vector<bool>& v, bool val); bool all_false(std::vector<bool>& v); void setTypeSizeMapping(SgTypeSizeMapping* typeSizeMapping); SgTypeSizeMapping* getTypeSizeMapping(); private: GlobalTopifyMode _globalTopifyMode; set<AbstractValue> _compoundIncVarsSet; set<AbstractValue> _smallActivityVarsSet; set<AbstractValue> _assertCondVarsSet; set<int> _inputVarValues; std::list<int> _inputSequence; std::list<int>::iterator _inputSequenceIterator; ExprAnalyzer exprAnalyzer; VariableIdMapping variableIdMapping; EStateWorkList* estateWorkListCurrent; EStateWorkList* estateWorkListNext; EStateWorkList estateWorkListOne; EStateWorkList estateWorkListTwo; EStateSet estateSet; PStateSet pstateSet; ConstraintSetMaintainer constraintSetMaintainer; TransitionGraph transitionGraph; TransitionGraph backupTransitionGraph; set<const EState*> transitionSourceEStateSetOfLabel(Label lab); int _displayDiff; int _resourceLimitDiff; int _numberOfThreadsToUse; int _semanticFoldThreshold; VariableIdMapping::VariableIdSet _variablesToIgnore; int _solver; AnalyzerMode _analyzerMode; set<const EState*> _newNodesToFold; long int _maxTransitions; long int _maxIterations; long int _maxBytes; long int _maxSeconds; long int _maxTransitionsForcedTop; long int _maxIterationsForcedTop; long int _maxBytesForcedTop; long int _maxSecondsForcedTop; // only used in LTL-driven mode size_t _prevStateSetSizeDisplay = 0; size_t _prevStateSetSizeResource = 0; PState _startPState; bool _optionStatusMessages; std::list<EState> elistify(); std::list<EState> elistify(EState res); // only used in LTL-driven mode void setStartEState(const EState* estate); /*! if state exists in stateSet, a pointer to the existing state is returned otherwise a new state is entered into stateSet and a pointer to it is returned. */ const PState* processNew(PState& s); const PState* processNewOrExisting(PState& s); const EState* processNew(EState& s); const EState* processNewOrExisting(EState& s); const EState* processCompleteNewOrExisting(const EState* es); void topifyVariable(PState& pstate, ConstraintSet& cset, AbstractValue varId); bool isTopified(EState& s); EStateSet::ProcessingResult process(EState& s); EStateSet::ProcessingResult process(Label label, PState pstate, ConstraintSet cset, InputOutput io); const ConstraintSet* processNewOrExisting(ConstraintSet& cset); EState createEState(Label label, PState pstate, ConstraintSet cset); EState createEState(Label label, PState pstate, ConstraintSet cset, InputOutput io); VariableValueMonitor variableValueMonitor; bool _treatStdErrLikeFailedAssert; bool _skipSelectedFunctionCalls; ExplorationMode _explorationMode; bool _topifyModeActive; bool _explicitArrays; // loop-aware mode int _swapWorkListsCount; // currently only used for debugging purposes int _iterations; int _approximated_iterations; int _curr_iteration_cnt; int _next_iteration_cnt; bool _stdFunctionSemantics=true; bool _svCompFunctionSemantics; string _externalErrorFunctionName; // the call of this function causes termination of analysis string _externalNonDetIntFunctionName; string _externalNonDetLongFunctionName; string _externalExitFunctionName; Timer _analysisTimer; bool _timerRunning = false; // ======================================================================= // ========================== LTLAnalyzer ================================ // ======================================================================= public: bool isTerminationRelevantLabel(Label label); bool isLTLRelevantEState(const EState* estate); bool isLTLRelevantLabel(Label label); bool isStdIOLabel(Label label); std::set<const EState*> nonLTLRelevantEStates(); // reduces all states different to stdin and stdout. void stdIOFoldingOfTransitionGraph(); void semanticFoldingOfTransitionGraph(); // bypasses and removes all states that are not standard I/O states // (old version, works correctly, but has a long execution time) void removeNonIOStates(); // bypasses and removes all states that are not stdIn/stdOut/stdErr/failedAssert states void reduceToObservableBehavior(); // erases transitions that lead directly from one output state to another output state void removeOutputOutputTransitions(); // erases transitions that lead directly from one input state to another input state void removeInputInputTransitions(); // cuts off all paths in the transition graph that lead to leaves // (recursively until only paths of infinite length remain) void pruneLeavesRec(); // connects start, input, output and worklist states according to possible paths in the transition graph. // removes all states and transitions that are not necessary for the graph that only consists of these new transitions. The two parameters allow to select input and/or output states to remain in the STG. void reduceGraphInOutWorklistOnly(bool includeIn=true, bool includeOut=true, bool includeErr=false); // extracts input sequences leading to each discovered failing assertion where discovered for the first time. // stores results in PropertyValueTable "reachabilityResults". // returns length of the longest of these sequences if it can be guaranteed that all processed traces are the // shortest ones leading to the individual failing assertion (returns -1 otherwise). int extractAssertionTraces(); // LTLAnalyzer private: //returns a list of transitions representing existing paths from "startState" to all possible input/output/error states (no output -> output) // collection of transitions to worklist states currently disabled. the returned set has to be deleted by the calling function. boost::unordered_set<Transition*>* transitionsToInOutErrAndWorklist( const EState* startState, bool includeIn, bool includeOut, bool includeErr); boost::unordered_set<Transition*>* transitionsToInOutErrAndWorklist( const EState* currentState, const EState* startState, boost::unordered_set<Transition*>* results, boost::unordered_set<const EState*>* visited, bool includeIn, bool includeOut, bool includeErr); // adds a string representation of the shortest input path from start state to assertEState to reachabilityResults. returns the length of the // counterexample input sequence. int addCounterexample(int assertCode, const EState* assertEState); // returns a list of EStates from source to target. Target has to come before source in the STG (reversed trace). std::list<const EState*>reverseInOutSequenceBreadthFirst(const EState* source, const EState* target, bool counterexampleWithOutput = false); // returns a list of EStates from source to target (shortest input path). // please note: target has to be a predecessor of source (reversed trace) std::list<const EState*> reverseInOutSequenceDijkstra(const EState* source, const EState* target, bool counterexampleWithOutput = false); std::list<const EState*> filterStdInOutOnly(std::list<const EState*>& states, bool counterexampleWithOutput = false) const; std::string reversedInOutRunToString(std::list<const EState*>& run); //returns the shortest possible number of input states on the path leading to "target". int inputSequenceLength(const EState* target); // begin of solver 10 functions (black-box pattern search) bool containsPatternTwoRepetitions(std::list<int>& sequence, int startIndex, int endIndex); bool computePStateAfterInputs(PState& pState, std::list<int>& inputs, int thread_id, std::list<int>* iOSequence=NULL);; std::list<int> inputsFromPatternTwoRepetitions(std::list<int> pattern2r); string convertToCeString(std::list<int>& ceAsIntegers, int maxInputVal); int pStateDepthFirstSearch(PState* startPState, int maxDepth, int thread_id, std::list<int>* partialTrace, int maxInputVal, int patternLength, int PatternIterations); // end of solver 10 functions (black-box pattern search) //less than comparisions on two states according to (#input transitions * #output transitions) bool indegreeTimesOutdegreeLessThan(const EState* a, const EState* b); public: //stores a backup of the created transitionGraph void storeStgBackup(); //load previous backup of the transitionGraph, storing the current version as a backup instead void swapStgWithBackup(); //solver 8 becomes the active solver used by the analyzer. Deletion of previous data iff "resetAnalyzerData" is set to true. void setAnalyzerToSolver8(EState* startEState, bool resetAnalyzerData); // first: list of new states (worklist), second: set of found existing states typedef pair<EStateWorkList,std::set<const EState*> > SubSolverResultType; SubSolverResultType subSolver(const EState* currentEStatePtr); void insertInputVarValue(int i) { _inputVarValues.insert(i); } void addInputSequenceValue(int i) { _inputSequence.push_back(i); } void resetToEmptyInputSequence() { _inputSequence.clear(); } void resetInputSequenceIterator() { _inputSequenceIterator=_inputSequence.begin(); } const EState* getEstateBeforeMissingInput() {return _estateBeforeMissingInput;} const EState* getLatestErrorEState() {return _latestErrorEState;} int numberOfInputVarValues() { return _inputVarValues.size(); } std::set<int> getInputVarValues() { return _inputVarValues; } void setStgTraceFileName(std::string filename) { _stg_trace_filename=filename; std::ofstream fout; fout.open(_stg_trace_filename.c_str()); // create new file/overwrite existing file fout<<"START"<<endl; fout.close(); // close. Will be used with append. } private: std::string _stg_trace_filename; public: // only used temporarily for binary-binding prototype std::map<std::string,VariableId> globalVarName2VarIdMapping; std::vector<bool> binaryBindingAssert; void setAnalyzerMode(AnalyzerMode am) { _analyzerMode=am; } void setMaxTransitions(size_t maxTransitions) { _maxTransitions=maxTransitions; } void setMaxIterations(size_t maxIterations) { _maxIterations=maxIterations; } void setMaxTransitionsForcedTop(size_t maxTransitions) { _maxTransitionsForcedTop=maxTransitions; } void setMaxIterationsForcedTop(size_t maxIterations) { _maxIterationsForcedTop=maxIterations; } void setMaxBytes(long int maxBytes) { _maxBytes=maxBytes; } void setMaxBytesForcedTop(long int maxBytesForcedTop) { _maxBytesForcedTop=maxBytesForcedTop; } void setMaxSeconds(long int maxSeconds) { _maxSeconds=maxSeconds; } void setMaxSecondsForcedTop(long int maxSecondsForcedTop) { _maxSecondsForcedTop=maxSecondsForcedTop; } void setStartPState(PState startPState) { _startPState=startPState; } void setPatternSearchMaxDepth(size_t iODepth) { _patternSearchMaxDepth=iODepth; } void setPatternSearchRepetitions(size_t patternReps) { _patternSearchRepetitions=patternReps; } void setPatternSearchMaxSuffixDepth(size_t suffixDepth) { _patternSearchMaxSuffixDepth=suffixDepth; } void setPatternSearchAssertTable(PropertyValueTable* patternSearchAsserts) { _patternSearchAssertTable = patternSearchAsserts; }; void setPatternSearchExploration(ExplorationMode explorationMode) { _patternSearchExplorationMode = explorationMode; }; private: PropertyValueTable* _patternSearchAssertTable; int _patternSearchMaxDepth; int _patternSearchRepetitions; int _patternSearchMaxSuffixDepth; ExplorationMode _patternSearchExplorationMode; std::list<FailedAssertion> _firstAssertionOccurences; const EState* _estateBeforeMissingInput; const EState* _latestOutputEState; const EState* _latestErrorEState; // only used in LTL-driven mode SpotConnection* _spotConnection = nullptr; }; // end of class Analyzer } // end of namespace CodeThorn #include "RersSpecialization.h" #endif
GB_binop__lor_int64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__lor_int64) // A.*B function (eWiseMult): GB (_AemultB_08__lor_int64) // A.*B function (eWiseMult): GB (_AemultB_02__lor_int64) // A.*B function (eWiseMult): GB (_AemultB_04__lor_int64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__lor_int64) // A*D function (colscale): GB (_AxD__lor_int64) // D*A function (rowscale): GB (_DxB__lor_int64) // C+=B function (dense accum): GB (_Cdense_accumB__lor_int64) // C+=b function (dense accum): GB (_Cdense_accumb__lor_int64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__lor_int64) // C=scalar+B GB (_bind1st__lor_int64) // C=scalar+B' GB (_bind1st_tran__lor_int64) // C=A+scalar GB (_bind2nd__lor_int64) // C=A'+scalar GB (_bind2nd_tran__lor_int64) // C type: int64_t // A type: int64_t // A pattern? 0 // B type: int64_t // B pattern? 0 // BinaryOp: cij = ((aij != 0) || (bij != 0)) #define GB_ATYPE \ int64_t #define GB_BTYPE \ int64_t #define GB_CTYPE \ int64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int64_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int64_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = ((x != 0) || (y != 0)) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LOR || GxB_NO_INT64 || GxB_NO_LOR_INT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__lor_int64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__lor_int64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__lor_int64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int64_t int64_t bwork = (*((int64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__lor_int64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *restrict Cx = (int64_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__lor_int64) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *restrict Cx = (int64_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__lor_int64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int64_t alpha_scalar ; int64_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int64_t *) alpha_scalar_in)) ; beta_scalar = (*((int64_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__lor_int64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__lor_int64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__lor_int64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__lor_int64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__lor_int64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *Cx = (int64_t *) Cx_output ; int64_t x = (*((int64_t *) x_input)) ; int64_t *Bx = (int64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int64_t bij = GBX (Bx, p, false) ; Cx [p] = ((x != 0) || (bij != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__lor_int64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int64_t *Cx = (int64_t *) Cx_output ; int64_t *Ax = (int64_t *) Ax_input ; int64_t y = (*((int64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int64_t aij = GBX (Ax, p, false) ; Cx [p] = ((aij != 0) || (y != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = ((x != 0) || (aij != 0)) ; \ } GrB_Info GB (_bind1st_tran__lor_int64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t x = (*((const int64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = ((aij != 0) || (y != 0)) ; \ } GrB_Info GB (_bind2nd_tran__lor_int64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t y = (*((const int64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
loop1.c
#include <stdio.h> #include <omp.h> int main() { int i,j; int innerreps = 100; #pragma omp parallel private(j) { // for (j=0; j<innerreps; j++) { #pragma omp for schedule(static,2) for (i=0; i<32; i++) { printf ("thread %d is executing %d \n",omp_get_thread_num(),i); // delay(500); } } } }
profile.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % PPPP RRRR OOO FFFFF IIIII L EEEEE % % P P R R O O F I L E % % PPPP RRRR O O FFF I L EEE % % P R R O O F I L E % % P R R OOO F IIIII LLLLL EEEEE % % % % % % MagickCore Image Profile Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/attribute.h" #include "MagickCore/cache.h" #include "MagickCore/color.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/configure.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/image.h" #include "MagickCore/linked-list.h" #include "MagickCore/memory_.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/option-private.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/profile.h" #include "MagickCore/profile-private.h" #include "MagickCore/property.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/resource_.h" #include "MagickCore/splay-tree.h" #include "MagickCore/string_.h" #include "MagickCore/thread-private.h" #include "MagickCore/token.h" #include "MagickCore/utility.h" #if defined(MAGICKCORE_LCMS_DELEGATE) #if defined(MAGICKCORE_HAVE_LCMS_LCMS2_H) #include <wchar.h> #include <lcms/lcms2.h> #else #include <wchar.h> #include "lcms2.h" #endif #endif #if defined(MAGICKCORE_XML_DELEGATE) # if defined(MAGICKCORE_WINDOWS_SUPPORT) # if !defined(__MINGW32__) # include <win32config.h> # endif # endif # include <libxml/parser.h> # include <libxml/tree.h> #endif /* Definitions */ #define LCMSHDRI #if !defined(MAGICKCORE_HDRI_SUPPORT) #if (MAGICKCORE_QUANTUM_DEPTH == 8) #undef LCMSHDRI #define LCMSScaleSource(pixel) ScaleQuantumToShort(pixel) #define LCMSScaleTarget(pixel) ScaleShortToQuantum(pixel) typedef unsigned short LCMSType; #elif (MAGICKCORE_QUANTUM_DEPTH == 16) #undef LCMSHDRI #define LCMSScaleSource(pixel) (pixel) #define LCMSScaleTarget(pixel) (pixel) typedef unsigned short LCMSType; #endif #endif #if defined(LCMSHDRI) #define LCMSScaleSource(pixel) (source_scale*QuantumScale*(pixel)) #define LCMSScaleTarget(pixel) ClampToQuantum(target_scale*QuantumRange*(pixel)) typedef double LCMSType; #endif /* Forward declarations */ static MagickBooleanType SetImageProfileInternal(Image *,const char *,const StringInfo *, const MagickBooleanType,ExceptionInfo *); static void WriteTo8BimProfile(Image *,const char*,const StringInfo *); /* Typedef declarations */ struct _ProfileInfo { char *name; size_t length; unsigned char *info; size_t signature; }; typedef struct _CMSExceptionInfo { Image *image; ExceptionInfo *exception; } CMSExceptionInfo; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e I m a g e P r o f i l e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneImageProfiles() clones one or more image profiles. % % The format of the CloneImageProfiles method is: % % MagickBooleanType CloneImageProfiles(Image *image, % const Image *clone_image) % % A description of each parameter follows: % % o image: the image. % % o clone_image: the clone image. % */ MagickExport MagickBooleanType CloneImageProfiles(Image *image, const Image *clone_image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(clone_image != (const Image *) NULL); assert(clone_image->signature == MagickCoreSignature); if (clone_image->profiles != (void *) NULL) { if (image->profiles != (void *) NULL) DestroyImageProfiles(image); image->profiles=CloneSplayTree((SplayTreeInfo *) clone_image->profiles, (void *(*)(void *)) ConstantString,(void *(*)(void *)) CloneStringInfo); } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e l e t e I m a g e P r o f i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DeleteImageProfile() deletes a profile from the image by its name. % % The format of the DeleteImageProfile method is: % % MagickBooleanTyupe DeleteImageProfile(Image *image,const char *name) % % A description of each parameter follows: % % o image: the image. % % o name: the profile name. % */ MagickExport MagickBooleanType DeleteImageProfile(Image *image,const char *name) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->profiles == (SplayTreeInfo *) NULL) return(MagickFalse); WriteTo8BimProfile(image,name,(StringInfo *) NULL); return(DeleteNodeFromSplayTree((SplayTreeInfo *) image->profiles,name)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y I m a g e P r o f i l e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyImageProfiles() releases memory associated with an image profile map. % % The format of the DestroyProfiles method is: % % void DestroyImageProfiles(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport void DestroyImageProfiles(Image *image) { if (image->profiles != (SplayTreeInfo *) NULL) image->profiles=DestroySplayTree((SplayTreeInfo *) image->profiles); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e P r o f i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageProfile() gets a profile associated with an image by name. % % The format of the GetImageProfile method is: % % const StringInfo *GetImageProfile(const Image *image,const char *name) % % A description of each parameter follows: % % o image: the image. % % o name: the profile name. % */ MagickExport const StringInfo *GetImageProfile(const Image *image, const char *name) { const StringInfo *profile; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->profiles == (SplayTreeInfo *) NULL) return((StringInfo *) NULL); profile=(const StringInfo *) GetValueFromSplayTree((SplayTreeInfo *) image->profiles,name); return(profile); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t N e x t I m a g e P r o f i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetNextImageProfile() gets the next profile name for an image. % % The format of the GetNextImageProfile method is: % % char *GetNextImageProfile(const Image *image) % % A description of each parameter follows: % % o hash_info: the hash info. % */ MagickExport char *GetNextImageProfile(const Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->profiles == (SplayTreeInfo *) NULL) return((char *) NULL); return((char *) GetNextKeyInSplayTree((SplayTreeInfo *) image->profiles)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P r o f i l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ProfileImage() associates, applies, or removes an ICM, IPTC, or generic % profile with / to / from an image. If the profile is NULL, it is removed % from the image otherwise added or applied. Use a name of '*' and a profile % of NULL to remove all profiles from the image. % % ICC and ICM profiles are handled as follows: If the image does not have % an associated color profile, the one you provide is associated with the % image and the image pixels are not transformed. Otherwise, the colorspace % transform defined by the existing and new profile are applied to the image % pixels and the new profile is associated with the image. % % The format of the ProfileImage method is: % % MagickBooleanType ProfileImage(Image *image,const char *name, % const void *datum,const size_t length,const MagickBooleanType clone) % % A description of each parameter follows: % % o image: the image. % % o name: Name of profile to add or remove: ICC, IPTC, or generic profile. % % o datum: the profile data. % % o length: the length of the profile. % % o clone: should be MagickFalse. % */ #if defined(MAGICKCORE_LCMS_DELEGATE) static LCMSType **DestroyPixelThreadSet(LCMSType **pixels) { register ssize_t i; if (pixels != (LCMSType **) NULL) return((LCMSType **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (pixels[i] != (LCMSType *) NULL) pixels[i]=(LCMSType *) RelinquishMagickMemory(pixels[i]); pixels=(LCMSType **) RelinquishMagickMemory(pixels); return(pixels); } static LCMSType **AcquirePixelThreadSet(const size_t columns, const size_t channels) { LCMSType **pixels; register ssize_t i; size_t number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); pixels=(LCMSType **) AcquireQuantumMemory(number_threads,sizeof(*pixels)); if (pixels == (LCMSType **) NULL) return((LCMSType **) NULL); (void) memset(pixels,0,number_threads*sizeof(*pixels)); for (i=0; i < (ssize_t) number_threads; i++) { pixels[i]=(LCMSType *) AcquireQuantumMemory(columns,channels* sizeof(**pixels)); if (pixels[i] == (LCMSType *) NULL) return(DestroyPixelThreadSet(pixels)); } return(pixels); } static cmsHTRANSFORM *DestroyTransformThreadSet(cmsHTRANSFORM *transform) { register ssize_t i; assert(transform != (cmsHTRANSFORM *) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (transform[i] != (cmsHTRANSFORM) NULL) cmsDeleteTransform(transform[i]); transform=(cmsHTRANSFORM *) RelinquishMagickMemory(transform); return(transform); } static cmsHTRANSFORM *AcquireTransformThreadSet(Image *image, const cmsHPROFILE source_profile,const cmsUInt32Number source_type, const cmsHPROFILE target_profile,const cmsUInt32Number target_type, const int intent,const cmsUInt32Number flags, CMSExceptionInfo *cms_exception) { cmsHTRANSFORM *transform; register ssize_t i; size_t number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); transform=(cmsHTRANSFORM *) AcquireQuantumMemory(number_threads, sizeof(*transform)); if (transform == (cmsHTRANSFORM *) NULL) return((cmsHTRANSFORM *) NULL); (void) memset(transform,0,number_threads*sizeof(*transform)); for (i=0; i < (ssize_t) number_threads; i++) { transform[i]=cmsCreateTransformTHR((cmsContext) cms_exception, source_profile,source_type,target_profile,target_type,intent,flags); if (transform[i] == (cmsHTRANSFORM) NULL) return(DestroyTransformThreadSet(transform)); } return(transform); } #endif #if defined(MAGICKCORE_LCMS_DELEGATE) static void CMSExceptionHandler(cmsContext context,cmsUInt32Number severity, const char *message) { CMSExceptionInfo *cms_exception; ExceptionInfo *exception; Image *image; cms_exception=(CMSExceptionInfo *) context; if (cms_exception == (CMSExceptionInfo *) NULL) return; exception=cms_exception->exception; if (exception == (ExceptionInfo *) NULL) return; image=cms_exception->image; if (image == (Image *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),ImageWarning, "UnableToTransformColorspace","`%s'","unknown context"); return; } if (image->debug != MagickFalse) (void) LogMagickEvent(TransformEvent,GetMagickModule(),"lcms: #%u, %s", severity,message != (char *) NULL ? message : "no message"); (void) ThrowMagickException(exception,GetMagickModule(),ImageWarning, "UnableToTransformColorspace","`%s'",image->filename); } #endif static MagickBooleanType SetsRGBImageProfile(Image *image, ExceptionInfo *exception) { static unsigned char sRGBProfile[] = { 0x00, 0x00, 0x0c, 0x8c, 0x61, 0x72, 0x67, 0x6c, 0x02, 0x20, 0x00, 0x00, 0x6d, 0x6e, 0x74, 0x72, 0x52, 0x47, 0x42, 0x20, 0x58, 0x59, 0x5a, 0x20, 0x07, 0xde, 0x00, 0x01, 0x00, 0x06, 0x00, 0x16, 0x00, 0x0f, 0x00, 0x3a, 0x61, 0x63, 0x73, 0x70, 0x4d, 0x53, 0x46, 0x54, 0x00, 0x00, 0x00, 0x00, 0x49, 0x45, 0x43, 0x20, 0x73, 0x52, 0x47, 0x42, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf6, 0xd6, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0xd3, 0x2d, 0x61, 0x72, 0x67, 0x6c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11, 0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x01, 0x50, 0x00, 0x00, 0x00, 0x99, 0x63, 0x70, 0x72, 0x74, 0x00, 0x00, 0x01, 0xec, 0x00, 0x00, 0x00, 0x67, 0x64, 0x6d, 0x6e, 0x64, 0x00, 0x00, 0x02, 0x54, 0x00, 0x00, 0x00, 0x70, 0x64, 0x6d, 0x64, 0x64, 0x00, 0x00, 0x02, 0xc4, 0x00, 0x00, 0x00, 0x88, 0x74, 0x65, 0x63, 0x68, 0x00, 0x00, 0x03, 0x4c, 0x00, 0x00, 0x00, 0x0c, 0x76, 0x75, 0x65, 0x64, 0x00, 0x00, 0x03, 0x58, 0x00, 0x00, 0x00, 0x67, 0x76, 0x69, 0x65, 0x77, 0x00, 0x00, 0x03, 0xc0, 0x00, 0x00, 0x00, 0x24, 0x6c, 0x75, 0x6d, 0x69, 0x00, 0x00, 0x03, 0xe4, 0x00, 0x00, 0x00, 0x14, 0x6d, 0x65, 0x61, 0x73, 0x00, 0x00, 0x03, 0xf8, 0x00, 0x00, 0x00, 0x24, 0x77, 0x74, 0x70, 0x74, 0x00, 0x00, 0x04, 0x1c, 0x00, 0x00, 0x00, 0x14, 0x62, 0x6b, 0x70, 0x74, 0x00, 0x00, 0x04, 0x30, 0x00, 0x00, 0x00, 0x14, 0x72, 0x58, 0x59, 0x5a, 0x00, 0x00, 0x04, 0x44, 0x00, 0x00, 0x00, 0x14, 0x67, 0x58, 0x59, 0x5a, 0x00, 0x00, 0x04, 0x58, 0x00, 0x00, 0x00, 0x14, 0x62, 0x58, 0x59, 0x5a, 0x00, 0x00, 0x04, 0x6c, 0x00, 0x00, 0x00, 0x14, 0x72, 0x54, 0x52, 0x43, 0x00, 0x00, 0x04, 0x80, 0x00, 0x00, 0x08, 0x0c, 0x67, 0x54, 0x52, 0x43, 0x00, 0x00, 0x04, 0x80, 0x00, 0x00, 0x08, 0x0c, 0x62, 0x54, 0x52, 0x43, 0x00, 0x00, 0x04, 0x80, 0x00, 0x00, 0x08, 0x0c, 0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f, 0x73, 0x52, 0x47, 0x42, 0x20, 0x49, 0x45, 0x43, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x20, 0x28, 0x45, 0x71, 0x75, 0x69, 0x76, 0x61, 0x6c, 0x65, 0x6e, 0x74, 0x20, 0x74, 0x6f, 0x20, 0x77, 0x77, 0x77, 0x2e, 0x73, 0x72, 0x67, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x20, 0x31, 0x39, 0x39, 0x38, 0x20, 0x48, 0x50, 0x20, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f, 0x73, 0x52, 0x47, 0x42, 0x20, 0x49, 0x45, 0x43, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x20, 0x28, 0x45, 0x71, 0x75, 0x69, 0x76, 0x61, 0x6c, 0x65, 0x6e, 0x74, 0x20, 0x74, 0x6f, 0x20, 0x77, 0x77, 0x77, 0x2e, 0x73, 0x72, 0x67, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x20, 0x31, 0x39, 0x39, 0x38, 0x20, 0x48, 0x50, 0x20, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x74, 0x65, 0x78, 0x74, 0x00, 0x00, 0x00, 0x00, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x20, 0x62, 0x79, 0x20, 0x47, 0x72, 0x61, 0x65, 0x6d, 0x65, 0x20, 0x57, 0x2e, 0x20, 0x47, 0x69, 0x6c, 0x6c, 0x2e, 0x20, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x64, 0x20, 0x69, 0x6e, 0x74, 0x6f, 0x20, 0x74, 0x68, 0x65, 0x20, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x20, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x2e, 0x20, 0x4e, 0x6f, 0x20, 0x57, 0x61, 0x72, 0x72, 0x61, 0x6e, 0x74, 0x79, 0x2c, 0x20, 0x55, 0x73, 0x65, 0x20, 0x61, 0x74, 0x20, 0x79, 0x6f, 0x75, 0x72, 0x20, 0x6f, 0x77, 0x6e, 0x20, 0x72, 0x69, 0x73, 0x6b, 0x2e, 0x00, 0x00, 0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x16, 0x49, 0x45, 0x43, 0x20, 0x68, 0x74, 0x74, 0x70, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x69, 0x65, 0x63, 0x2e, 0x63, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x16, 0x49, 0x45, 0x43, 0x20, 0x68, 0x74, 0x74, 0x70, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x69, 0x65, 0x63, 0x2e, 0x63, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2e, 0x49, 0x45, 0x43, 0x20, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x20, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x20, 0x52, 0x47, 0x42, 0x20, 0x63, 0x6f, 0x6c, 0x6f, 0x75, 0x72, 0x20, 0x73, 0x70, 0x61, 0x63, 0x65, 0x20, 0x2d, 0x20, 0x73, 0x52, 0x47, 0x42, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2e, 0x49, 0x45, 0x43, 0x20, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x20, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x20, 0x52, 0x47, 0x42, 0x20, 0x63, 0x6f, 0x6c, 0x6f, 0x75, 0x72, 0x20, 0x73, 0x70, 0x61, 0x63, 0x65, 0x20, 0x2d, 0x20, 0x73, 0x52, 0x47, 0x42, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x73, 0x69, 0x67, 0x20, 0x00, 0x00, 0x00, 0x00, 0x43, 0x52, 0x54, 0x20, 0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0d, 0x49, 0x45, 0x43, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0d, 0x49, 0x45, 0x43, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x76, 0x69, 0x65, 0x77, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, 0xa4, 0x7c, 0x00, 0x14, 0x5f, 0x30, 0x00, 0x10, 0xce, 0x02, 0x00, 0x03, 0xed, 0xb2, 0x00, 0x04, 0x13, 0x0a, 0x00, 0x03, 0x5c, 0x67, 0x00, 0x00, 0x00, 0x01, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4c, 0x0a, 0x3d, 0x00, 0x50, 0x00, 0x00, 0x00, 0x57, 0x1e, 0xb8, 0x6d, 0x65, 0x61, 0x73, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x8f, 0x00, 0x00, 0x00, 0x02, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf3, 0x51, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x16, 0xcc, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6f, 0xa0, 0x00, 0x00, 0x38, 0xf5, 0x00, 0x00, 0x03, 0x90, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x62, 0x97, 0x00, 0x00, 0xb7, 0x87, 0x00, 0x00, 0x18, 0xd9, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x24, 0x9f, 0x00, 0x00, 0x0f, 0x84, 0x00, 0x00, 0xb6, 0xc4, 0x63, 0x75, 0x72, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x0a, 0x00, 0x0f, 0x00, 0x14, 0x00, 0x19, 0x00, 0x1e, 0x00, 0x23, 0x00, 0x28, 0x00, 0x2d, 0x00, 0x32, 0x00, 0x37, 0x00, 0x3b, 0x00, 0x40, 0x00, 0x45, 0x00, 0x4a, 0x00, 0x4f, 0x00, 0x54, 0x00, 0x59, 0x00, 0x5e, 0x00, 0x63, 0x00, 0x68, 0x00, 0x6d, 0x00, 0x72, 0x00, 0x77, 0x00, 0x7c, 0x00, 0x81, 0x00, 0x86, 0x00, 0x8b, 0x00, 0x90, 0x00, 0x95, 0x00, 0x9a, 0x00, 0x9f, 0x00, 0xa4, 0x00, 0xa9, 0x00, 0xae, 0x00, 0xb2, 0x00, 0xb7, 0x00, 0xbc, 0x00, 0xc1, 0x00, 0xc6, 0x00, 0xcb, 0x00, 0xd0, 0x00, 0xd5, 0x00, 0xdb, 0x00, 0xe0, 0x00, 0xe5, 0x00, 0xeb, 0x00, 0xf0, 0x00, 0xf6, 0x00, 0xfb, 0x01, 0x01, 0x01, 0x07, 0x01, 0x0d, 0x01, 0x13, 0x01, 0x19, 0x01, 0x1f, 0x01, 0x25, 0x01, 0x2b, 0x01, 0x32, 0x01, 0x38, 0x01, 0x3e, 0x01, 0x45, 0x01, 0x4c, 0x01, 0x52, 0x01, 0x59, 0x01, 0x60, 0x01, 0x67, 0x01, 0x6e, 0x01, 0x75, 0x01, 0x7c, 0x01, 0x83, 0x01, 0x8b, 0x01, 0x92, 0x01, 0x9a, 0x01, 0xa1, 0x01, 0xa9, 0x01, 0xb1, 0x01, 0xb9, 0x01, 0xc1, 0x01, 0xc9, 0x01, 0xd1, 0x01, 0xd9, 0x01, 0xe1, 0x01, 0xe9, 0x01, 0xf2, 0x01, 0xfa, 0x02, 0x03, 0x02, 0x0c, 0x02, 0x14, 0x02, 0x1d, 0x02, 0x26, 0x02, 0x2f, 0x02, 0x38, 0x02, 0x41, 0x02, 0x4b, 0x02, 0x54, 0x02, 0x5d, 0x02, 0x67, 0x02, 0x71, 0x02, 0x7a, 0x02, 0x84, 0x02, 0x8e, 0x02, 0x98, 0x02, 0xa2, 0x02, 0xac, 0x02, 0xb6, 0x02, 0xc1, 0x02, 0xcb, 0x02, 0xd5, 0x02, 0xe0, 0x02, 0xeb, 0x02, 0xf5, 0x03, 0x00, 0x03, 0x0b, 0x03, 0x16, 0x03, 0x21, 0x03, 0x2d, 0x03, 0x38, 0x03, 0x43, 0x03, 0x4f, 0x03, 0x5a, 0x03, 0x66, 0x03, 0x72, 0x03, 0x7e, 0x03, 0x8a, 0x03, 0x96, 0x03, 0xa2, 0x03, 0xae, 0x03, 0xba, 0x03, 0xc7, 0x03, 0xd3, 0x03, 0xe0, 0x03, 0xec, 0x03, 0xf9, 0x04, 0x06, 0x04, 0x13, 0x04, 0x20, 0x04, 0x2d, 0x04, 0x3b, 0x04, 0x48, 0x04, 0x55, 0x04, 0x63, 0x04, 0x71, 0x04, 0x7e, 0x04, 0x8c, 0x04, 0x9a, 0x04, 0xa8, 0x04, 0xb6, 0x04, 0xc4, 0x04, 0xd3, 0x04, 0xe1, 0x04, 0xf0, 0x04, 0xfe, 0x05, 0x0d, 0x05, 0x1c, 0x05, 0x2b, 0x05, 0x3a, 0x05, 0x49, 0x05, 0x58, 0x05, 0x67, 0x05, 0x77, 0x05, 0x86, 0x05, 0x96, 0x05, 0xa6, 0x05, 0xb5, 0x05, 0xc5, 0x05, 0xd5, 0x05, 0xe5, 0x05, 0xf6, 0x06, 0x06, 0x06, 0x16, 0x06, 0x27, 0x06, 0x37, 0x06, 0x48, 0x06, 0x59, 0x06, 0x6a, 0x06, 0x7b, 0x06, 0x8c, 0x06, 0x9d, 0x06, 0xaf, 0x06, 0xc0, 0x06, 0xd1, 0x06, 0xe3, 0x06, 0xf5, 0x07, 0x07, 0x07, 0x19, 0x07, 0x2b, 0x07, 0x3d, 0x07, 0x4f, 0x07, 0x61, 0x07, 0x74, 0x07, 0x86, 0x07, 0x99, 0x07, 0xac, 0x07, 0xbf, 0x07, 0xd2, 0x07, 0xe5, 0x07, 0xf8, 0x08, 0x0b, 0x08, 0x1f, 0x08, 0x32, 0x08, 0x46, 0x08, 0x5a, 0x08, 0x6e, 0x08, 0x82, 0x08, 0x96, 0x08, 0xaa, 0x08, 0xbe, 0x08, 0xd2, 0x08, 0xe7, 0x08, 0xfb, 0x09, 0x10, 0x09, 0x25, 0x09, 0x3a, 0x09, 0x4f, 0x09, 0x64, 0x09, 0x79, 0x09, 0x8f, 0x09, 0xa4, 0x09, 0xba, 0x09, 0xcf, 0x09, 0xe5, 0x09, 0xfb, 0x0a, 0x11, 0x0a, 0x27, 0x0a, 0x3d, 0x0a, 0x54, 0x0a, 0x6a, 0x0a, 0x81, 0x0a, 0x98, 0x0a, 0xae, 0x0a, 0xc5, 0x0a, 0xdc, 0x0a, 0xf3, 0x0b, 0x0b, 0x0b, 0x22, 0x0b, 0x39, 0x0b, 0x51, 0x0b, 0x69, 0x0b, 0x80, 0x0b, 0x98, 0x0b, 0xb0, 0x0b, 0xc8, 0x0b, 0xe1, 0x0b, 0xf9, 0x0c, 0x12, 0x0c, 0x2a, 0x0c, 0x43, 0x0c, 0x5c, 0x0c, 0x75, 0x0c, 0x8e, 0x0c, 0xa7, 0x0c, 0xc0, 0x0c, 0xd9, 0x0c, 0xf3, 0x0d, 0x0d, 0x0d, 0x26, 0x0d, 0x40, 0x0d, 0x5a, 0x0d, 0x74, 0x0d, 0x8e, 0x0d, 0xa9, 0x0d, 0xc3, 0x0d, 0xde, 0x0d, 0xf8, 0x0e, 0x13, 0x0e, 0x2e, 0x0e, 0x49, 0x0e, 0x64, 0x0e, 0x7f, 0x0e, 0x9b, 0x0e, 0xb6, 0x0e, 0xd2, 0x0e, 0xee, 0x0f, 0x09, 0x0f, 0x25, 0x0f, 0x41, 0x0f, 0x5e, 0x0f, 0x7a, 0x0f, 0x96, 0x0f, 0xb3, 0x0f, 0xcf, 0x0f, 0xec, 0x10, 0x09, 0x10, 0x26, 0x10, 0x43, 0x10, 0x61, 0x10, 0x7e, 0x10, 0x9b, 0x10, 0xb9, 0x10, 0xd7, 0x10, 0xf5, 0x11, 0x13, 0x11, 0x31, 0x11, 0x4f, 0x11, 0x6d, 0x11, 0x8c, 0x11, 0xaa, 0x11, 0xc9, 0x11, 0xe8, 0x12, 0x07, 0x12, 0x26, 0x12, 0x45, 0x12, 0x64, 0x12, 0x84, 0x12, 0xa3, 0x12, 0xc3, 0x12, 0xe3, 0x13, 0x03, 0x13, 0x23, 0x13, 0x43, 0x13, 0x63, 0x13, 0x83, 0x13, 0xa4, 0x13, 0xc5, 0x13, 0xe5, 0x14, 0x06, 0x14, 0x27, 0x14, 0x49, 0x14, 0x6a, 0x14, 0x8b, 0x14, 0xad, 0x14, 0xce, 0x14, 0xf0, 0x15, 0x12, 0x15, 0x34, 0x15, 0x56, 0x15, 0x78, 0x15, 0x9b, 0x15, 0xbd, 0x15, 0xe0, 0x16, 0x03, 0x16, 0x26, 0x16, 0x49, 0x16, 0x6c, 0x16, 0x8f, 0x16, 0xb2, 0x16, 0xd6, 0x16, 0xfa, 0x17, 0x1d, 0x17, 0x41, 0x17, 0x65, 0x17, 0x89, 0x17, 0xae, 0x17, 0xd2, 0x17, 0xf7, 0x18, 0x1b, 0x18, 0x40, 0x18, 0x65, 0x18, 0x8a, 0x18, 0xaf, 0x18, 0xd5, 0x18, 0xfa, 0x19, 0x20, 0x19, 0x45, 0x19, 0x6b, 0x19, 0x91, 0x19, 0xb7, 0x19, 0xdd, 0x1a, 0x04, 0x1a, 0x2a, 0x1a, 0x51, 0x1a, 0x77, 0x1a, 0x9e, 0x1a, 0xc5, 0x1a, 0xec, 0x1b, 0x14, 0x1b, 0x3b, 0x1b, 0x63, 0x1b, 0x8a, 0x1b, 0xb2, 0x1b, 0xda, 0x1c, 0x02, 0x1c, 0x2a, 0x1c, 0x52, 0x1c, 0x7b, 0x1c, 0xa3, 0x1c, 0xcc, 0x1c, 0xf5, 0x1d, 0x1e, 0x1d, 0x47, 0x1d, 0x70, 0x1d, 0x99, 0x1d, 0xc3, 0x1d, 0xec, 0x1e, 0x16, 0x1e, 0x40, 0x1e, 0x6a, 0x1e, 0x94, 0x1e, 0xbe, 0x1e, 0xe9, 0x1f, 0x13, 0x1f, 0x3e, 0x1f, 0x69, 0x1f, 0x94, 0x1f, 0xbf, 0x1f, 0xea, 0x20, 0x15, 0x20, 0x41, 0x20, 0x6c, 0x20, 0x98, 0x20, 0xc4, 0x20, 0xf0, 0x21, 0x1c, 0x21, 0x48, 0x21, 0x75, 0x21, 0xa1, 0x21, 0xce, 0x21, 0xfb, 0x22, 0x27, 0x22, 0x55, 0x22, 0x82, 0x22, 0xaf, 0x22, 0xdd, 0x23, 0x0a, 0x23, 0x38, 0x23, 0x66, 0x23, 0x94, 0x23, 0xc2, 0x23, 0xf0, 0x24, 0x1f, 0x24, 0x4d, 0x24, 0x7c, 0x24, 0xab, 0x24, 0xda, 0x25, 0x09, 0x25, 0x38, 0x25, 0x68, 0x25, 0x97, 0x25, 0xc7, 0x25, 0xf7, 0x26, 0x27, 0x26, 0x57, 0x26, 0x87, 0x26, 0xb7, 0x26, 0xe8, 0x27, 0x18, 0x27, 0x49, 0x27, 0x7a, 0x27, 0xab, 0x27, 0xdc, 0x28, 0x0d, 0x28, 0x3f, 0x28, 0x71, 0x28, 0xa2, 0x28, 0xd4, 0x29, 0x06, 0x29, 0x38, 0x29, 0x6b, 0x29, 0x9d, 0x29, 0xd0, 0x2a, 0x02, 0x2a, 0x35, 0x2a, 0x68, 0x2a, 0x9b, 0x2a, 0xcf, 0x2b, 0x02, 0x2b, 0x36, 0x2b, 0x69, 0x2b, 0x9d, 0x2b, 0xd1, 0x2c, 0x05, 0x2c, 0x39, 0x2c, 0x6e, 0x2c, 0xa2, 0x2c, 0xd7, 0x2d, 0x0c, 0x2d, 0x41, 0x2d, 0x76, 0x2d, 0xab, 0x2d, 0xe1, 0x2e, 0x16, 0x2e, 0x4c, 0x2e, 0x82, 0x2e, 0xb7, 0x2e, 0xee, 0x2f, 0x24, 0x2f, 0x5a, 0x2f, 0x91, 0x2f, 0xc7, 0x2f, 0xfe, 0x30, 0x35, 0x30, 0x6c, 0x30, 0xa4, 0x30, 0xdb, 0x31, 0x12, 0x31, 0x4a, 0x31, 0x82, 0x31, 0xba, 0x31, 0xf2, 0x32, 0x2a, 0x32, 0x63, 0x32, 0x9b, 0x32, 0xd4, 0x33, 0x0d, 0x33, 0x46, 0x33, 0x7f, 0x33, 0xb8, 0x33, 0xf1, 0x34, 0x2b, 0x34, 0x65, 0x34, 0x9e, 0x34, 0xd8, 0x35, 0x13, 0x35, 0x4d, 0x35, 0x87, 0x35, 0xc2, 0x35, 0xfd, 0x36, 0x37, 0x36, 0x72, 0x36, 0xae, 0x36, 0xe9, 0x37, 0x24, 0x37, 0x60, 0x37, 0x9c, 0x37, 0xd7, 0x38, 0x14, 0x38, 0x50, 0x38, 0x8c, 0x38, 0xc8, 0x39, 0x05, 0x39, 0x42, 0x39, 0x7f, 0x39, 0xbc, 0x39, 0xf9, 0x3a, 0x36, 0x3a, 0x74, 0x3a, 0xb2, 0x3a, 0xef, 0x3b, 0x2d, 0x3b, 0x6b, 0x3b, 0xaa, 0x3b, 0xe8, 0x3c, 0x27, 0x3c, 0x65, 0x3c, 0xa4, 0x3c, 0xe3, 0x3d, 0x22, 0x3d, 0x61, 0x3d, 0xa1, 0x3d, 0xe0, 0x3e, 0x20, 0x3e, 0x60, 0x3e, 0xa0, 0x3e, 0xe0, 0x3f, 0x21, 0x3f, 0x61, 0x3f, 0xa2, 0x3f, 0xe2, 0x40, 0x23, 0x40, 0x64, 0x40, 0xa6, 0x40, 0xe7, 0x41, 0x29, 0x41, 0x6a, 0x41, 0xac, 0x41, 0xee, 0x42, 0x30, 0x42, 0x72, 0x42, 0xb5, 0x42, 0xf7, 0x43, 0x3a, 0x43, 0x7d, 0x43, 0xc0, 0x44, 0x03, 0x44, 0x47, 0x44, 0x8a, 0x44, 0xce, 0x45, 0x12, 0x45, 0x55, 0x45, 0x9a, 0x45, 0xde, 0x46, 0x22, 0x46, 0x67, 0x46, 0xab, 0x46, 0xf0, 0x47, 0x35, 0x47, 0x7b, 0x47, 0xc0, 0x48, 0x05, 0x48, 0x4b, 0x48, 0x91, 0x48, 0xd7, 0x49, 0x1d, 0x49, 0x63, 0x49, 0xa9, 0x49, 0xf0, 0x4a, 0x37, 0x4a, 0x7d, 0x4a, 0xc4, 0x4b, 0x0c, 0x4b, 0x53, 0x4b, 0x9a, 0x4b, 0xe2, 0x4c, 0x2a, 0x4c, 0x72, 0x4c, 0xba, 0x4d, 0x02, 0x4d, 0x4a, 0x4d, 0x93, 0x4d, 0xdc, 0x4e, 0x25, 0x4e, 0x6e, 0x4e, 0xb7, 0x4f, 0x00, 0x4f, 0x49, 0x4f, 0x93, 0x4f, 0xdd, 0x50, 0x27, 0x50, 0x71, 0x50, 0xbb, 0x51, 0x06, 0x51, 0x50, 0x51, 0x9b, 0x51, 0xe6, 0x52, 0x31, 0x52, 0x7c, 0x52, 0xc7, 0x53, 0x13, 0x53, 0x5f, 0x53, 0xaa, 0x53, 0xf6, 0x54, 0x42, 0x54, 0x8f, 0x54, 0xdb, 0x55, 0x28, 0x55, 0x75, 0x55, 0xc2, 0x56, 0x0f, 0x56, 0x5c, 0x56, 0xa9, 0x56, 0xf7, 0x57, 0x44, 0x57, 0x92, 0x57, 0xe0, 0x58, 0x2f, 0x58, 0x7d, 0x58, 0xcb, 0x59, 0x1a, 0x59, 0x69, 0x59, 0xb8, 0x5a, 0x07, 0x5a, 0x56, 0x5a, 0xa6, 0x5a, 0xf5, 0x5b, 0x45, 0x5b, 0x95, 0x5b, 0xe5, 0x5c, 0x35, 0x5c, 0x86, 0x5c, 0xd6, 0x5d, 0x27, 0x5d, 0x78, 0x5d, 0xc9, 0x5e, 0x1a, 0x5e, 0x6c, 0x5e, 0xbd, 0x5f, 0x0f, 0x5f, 0x61, 0x5f, 0xb3, 0x60, 0x05, 0x60, 0x57, 0x60, 0xaa, 0x60, 0xfc, 0x61, 0x4f, 0x61, 0xa2, 0x61, 0xf5, 0x62, 0x49, 0x62, 0x9c, 0x62, 0xf0, 0x63, 0x43, 0x63, 0x97, 0x63, 0xeb, 0x64, 0x40, 0x64, 0x94, 0x64, 0xe9, 0x65, 0x3d, 0x65, 0x92, 0x65, 0xe7, 0x66, 0x3d, 0x66, 0x92, 0x66, 0xe8, 0x67, 0x3d, 0x67, 0x93, 0x67, 0xe9, 0x68, 0x3f, 0x68, 0x96, 0x68, 0xec, 0x69, 0x43, 0x69, 0x9a, 0x69, 0xf1, 0x6a, 0x48, 0x6a, 0x9f, 0x6a, 0xf7, 0x6b, 0x4f, 0x6b, 0xa7, 0x6b, 0xff, 0x6c, 0x57, 0x6c, 0xaf, 0x6d, 0x08, 0x6d, 0x60, 0x6d, 0xb9, 0x6e, 0x12, 0x6e, 0x6b, 0x6e, 0xc4, 0x6f, 0x1e, 0x6f, 0x78, 0x6f, 0xd1, 0x70, 0x2b, 0x70, 0x86, 0x70, 0xe0, 0x71, 0x3a, 0x71, 0x95, 0x71, 0xf0, 0x72, 0x4b, 0x72, 0xa6, 0x73, 0x01, 0x73, 0x5d, 0x73, 0xb8, 0x74, 0x14, 0x74, 0x70, 0x74, 0xcc, 0x75, 0x28, 0x75, 0x85, 0x75, 0xe1, 0x76, 0x3e, 0x76, 0x9b, 0x76, 0xf8, 0x77, 0x56, 0x77, 0xb3, 0x78, 0x11, 0x78, 0x6e, 0x78, 0xcc, 0x79, 0x2a, 0x79, 0x89, 0x79, 0xe7, 0x7a, 0x46, 0x7a, 0xa5, 0x7b, 0x04, 0x7b, 0x63, 0x7b, 0xc2, 0x7c, 0x21, 0x7c, 0x81, 0x7c, 0xe1, 0x7d, 0x41, 0x7d, 0xa1, 0x7e, 0x01, 0x7e, 0x62, 0x7e, 0xc2, 0x7f, 0x23, 0x7f, 0x84, 0x7f, 0xe5, 0x80, 0x47, 0x80, 0xa8, 0x81, 0x0a, 0x81, 0x6b, 0x81, 0xcd, 0x82, 0x30, 0x82, 0x92, 0x82, 0xf4, 0x83, 0x57, 0x83, 0xba, 0x84, 0x1d, 0x84, 0x80, 0x84, 0xe3, 0x85, 0x47, 0x85, 0xab, 0x86, 0x0e, 0x86, 0x72, 0x86, 0xd7, 0x87, 0x3b, 0x87, 0x9f, 0x88, 0x04, 0x88, 0x69, 0x88, 0xce, 0x89, 0x33, 0x89, 0x99, 0x89, 0xfe, 0x8a, 0x64, 0x8a, 0xca, 0x8b, 0x30, 0x8b, 0x96, 0x8b, 0xfc, 0x8c, 0x63, 0x8c, 0xca, 0x8d, 0x31, 0x8d, 0x98, 0x8d, 0xff, 0x8e, 0x66, 0x8e, 0xce, 0x8f, 0x36, 0x8f, 0x9e, 0x90, 0x06, 0x90, 0x6e, 0x90, 0xd6, 0x91, 0x3f, 0x91, 0xa8, 0x92, 0x11, 0x92, 0x7a, 0x92, 0xe3, 0x93, 0x4d, 0x93, 0xb6, 0x94, 0x20, 0x94, 0x8a, 0x94, 0xf4, 0x95, 0x5f, 0x95, 0xc9, 0x96, 0x34, 0x96, 0x9f, 0x97, 0x0a, 0x97, 0x75, 0x97, 0xe0, 0x98, 0x4c, 0x98, 0xb8, 0x99, 0x24, 0x99, 0x90, 0x99, 0xfc, 0x9a, 0x68, 0x9a, 0xd5, 0x9b, 0x42, 0x9b, 0xaf, 0x9c, 0x1c, 0x9c, 0x89, 0x9c, 0xf7, 0x9d, 0x64, 0x9d, 0xd2, 0x9e, 0x40, 0x9e, 0xae, 0x9f, 0x1d, 0x9f, 0x8b, 0x9f, 0xfa, 0xa0, 0x69, 0xa0, 0xd8, 0xa1, 0x47, 0xa1, 0xb6, 0xa2, 0x26, 0xa2, 0x96, 0xa3, 0x06, 0xa3, 0x76, 0xa3, 0xe6, 0xa4, 0x56, 0xa4, 0xc7, 0xa5, 0x38, 0xa5, 0xa9, 0xa6, 0x1a, 0xa6, 0x8b, 0xa6, 0xfd, 0xa7, 0x6e, 0xa7, 0xe0, 0xa8, 0x52, 0xa8, 0xc4, 0xa9, 0x37, 0xa9, 0xa9, 0xaa, 0x1c, 0xaa, 0x8f, 0xab, 0x02, 0xab, 0x75, 0xab, 0xe9, 0xac, 0x5c, 0xac, 0xd0, 0xad, 0x44, 0xad, 0xb8, 0xae, 0x2d, 0xae, 0xa1, 0xaf, 0x16, 0xaf, 0x8b, 0xb0, 0x00, 0xb0, 0x75, 0xb0, 0xea, 0xb1, 0x60, 0xb1, 0xd6, 0xb2, 0x4b, 0xb2, 0xc2, 0xb3, 0x38, 0xb3, 0xae, 0xb4, 0x25, 0xb4, 0x9c, 0xb5, 0x13, 0xb5, 0x8a, 0xb6, 0x01, 0xb6, 0x79, 0xb6, 0xf0, 0xb7, 0x68, 0xb7, 0xe0, 0xb8, 0x59, 0xb8, 0xd1, 0xb9, 0x4a, 0xb9, 0xc2, 0xba, 0x3b, 0xba, 0xb5, 0xbb, 0x2e, 0xbb, 0xa7, 0xbc, 0x21, 0xbc, 0x9b, 0xbd, 0x15, 0xbd, 0x8f, 0xbe, 0x0a, 0xbe, 0x84, 0xbe, 0xff, 0xbf, 0x7a, 0xbf, 0xf5, 0xc0, 0x70, 0xc0, 0xec, 0xc1, 0x67, 0xc1, 0xe3, 0xc2, 0x5f, 0xc2, 0xdb, 0xc3, 0x58, 0xc3, 0xd4, 0xc4, 0x51, 0xc4, 0xce, 0xc5, 0x4b, 0xc5, 0xc8, 0xc6, 0x46, 0xc6, 0xc3, 0xc7, 0x41, 0xc7, 0xbf, 0xc8, 0x3d, 0xc8, 0xbc, 0xc9, 0x3a, 0xc9, 0xb9, 0xca, 0x38, 0xca, 0xb7, 0xcb, 0x36, 0xcb, 0xb6, 0xcc, 0x35, 0xcc, 0xb5, 0xcd, 0x35, 0xcd, 0xb5, 0xce, 0x36, 0xce, 0xb6, 0xcf, 0x37, 0xcf, 0xb8, 0xd0, 0x39, 0xd0, 0xba, 0xd1, 0x3c, 0xd1, 0xbe, 0xd2, 0x3f, 0xd2, 0xc1, 0xd3, 0x44, 0xd3, 0xc6, 0xd4, 0x49, 0xd4, 0xcb, 0xd5, 0x4e, 0xd5, 0xd1, 0xd6, 0x55, 0xd6, 0xd8, 0xd7, 0x5c, 0xd7, 0xe0, 0xd8, 0x64, 0xd8, 0xe8, 0xd9, 0x6c, 0xd9, 0xf1, 0xda, 0x76, 0xda, 0xfb, 0xdb, 0x80, 0xdc, 0x05, 0xdc, 0x8a, 0xdd, 0x10, 0xdd, 0x96, 0xde, 0x1c, 0xde, 0xa2, 0xdf, 0x29, 0xdf, 0xaf, 0xe0, 0x36, 0xe0, 0xbd, 0xe1, 0x44, 0xe1, 0xcc, 0xe2, 0x53, 0xe2, 0xdb, 0xe3, 0x63, 0xe3, 0xeb, 0xe4, 0x73, 0xe4, 0xfc, 0xe5, 0x84, 0xe6, 0x0d, 0xe6, 0x96, 0xe7, 0x1f, 0xe7, 0xa9, 0xe8, 0x32, 0xe8, 0xbc, 0xe9, 0x46, 0xe9, 0xd0, 0xea, 0x5b, 0xea, 0xe5, 0xeb, 0x70, 0xeb, 0xfb, 0xec, 0x86, 0xed, 0x11, 0xed, 0x9c, 0xee, 0x28, 0xee, 0xb4, 0xef, 0x40, 0xef, 0xcc, 0xf0, 0x58, 0xf0, 0xe5, 0xf1, 0x72, 0xf1, 0xff, 0xf2, 0x8c, 0xf3, 0x19, 0xf3, 0xa7, 0xf4, 0x34, 0xf4, 0xc2, 0xf5, 0x50, 0xf5, 0xde, 0xf6, 0x6d, 0xf6, 0xfb, 0xf7, 0x8a, 0xf8, 0x19, 0xf8, 0xa8, 0xf9, 0x38, 0xf9, 0xc7, 0xfa, 0x57, 0xfa, 0xe7, 0xfb, 0x77, 0xfc, 0x07, 0xfc, 0x98, 0xfd, 0x29, 0xfd, 0xba, 0xfe, 0x4b, 0xfe, 0xdc, 0xff, 0x6d, 0xff, 0xff }; StringInfo *profile; MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (GetImageProfile(image,"icc") != (const StringInfo *) NULL) return(MagickFalse); profile=AcquireStringInfo(sizeof(sRGBProfile)); SetStringInfoDatum(profile,sRGBProfile); status=SetImageProfile(image,"icc",profile,exception); profile=DestroyStringInfo(profile); return(status); } MagickExport MagickBooleanType ProfileImage(Image *image,const char *name, const void *datum,const size_t length,ExceptionInfo *exception) { #define ProfileImageTag "Profile/Image" #define ThrowProfileException(severity,tag,context) \ { \ if (source_profile != (cmsHPROFILE) NULL) \ (void) cmsCloseProfile(source_profile); \ if (target_profile != (cmsHPROFILE) NULL) \ (void) cmsCloseProfile(target_profile); \ ThrowBinaryException(severity,tag,context); \ } MagickBooleanType status; StringInfo *profile; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(name != (const char *) NULL); if ((datum == (const void *) NULL) || (length == 0)) { char *next; /* Delete image profile(s). */ ResetImageProfileIterator(image); for (next=GetNextImageProfile(image); next != (const char *) NULL; ) { if (IsOptionMember(next,name) != MagickFalse) { (void) DeleteImageProfile(image,next); ResetImageProfileIterator(image); } next=GetNextImageProfile(image); } return(MagickTrue); } /* Add a ICC, IPTC, or generic profile to the image. */ status=MagickTrue; profile=AcquireStringInfo((size_t) length); SetStringInfoDatum(profile,(unsigned char *) datum); if ((LocaleCompare(name,"icc") != 0) && (LocaleCompare(name,"icm") != 0)) status=SetImageProfile(image,name,profile,exception); else { const StringInfo *icc_profile; icc_profile=GetImageProfile(image,"icc"); if ((icc_profile != (const StringInfo *) NULL) && (CompareStringInfo(icc_profile,profile) == 0)) { const char *value; value=GetImageProperty(image,"exif:ColorSpace",exception); (void) value; if (LocaleCompare(value,"1") != 0) (void) SetsRGBImageProfile(image,exception); value=GetImageProperty(image,"exif:InteroperabilityIndex",exception); if (LocaleCompare(value,"R98.") != 0) (void) SetsRGBImageProfile(image,exception); /* Future. value=GetImageProperty(image,"exif:InteroperabilityIndex",exception); if (LocaleCompare(value,"R03.") != 0) (void) SetAdobeRGB1998ImageProfile(image,exception); */ icc_profile=GetImageProfile(image,"icc"); } if ((icc_profile != (const StringInfo *) NULL) && (CompareStringInfo(icc_profile,profile) == 0)) { profile=DestroyStringInfo(profile); return(MagickTrue); } #if !defined(MAGICKCORE_LCMS_DELEGATE) (void) ThrowMagickException(exception,GetMagickModule(), MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn", "'%s' (LCMS)",image->filename); #else { cmsHPROFILE source_profile; CMSExceptionInfo cms_exception; /* Transform pixel colors as defined by the color profiles. */ cmsSetLogErrorHandler(CMSExceptionHandler); cms_exception.image=image; cms_exception.exception=exception; (void) cms_exception; source_profile=cmsOpenProfileFromMemTHR((cmsContext) &cms_exception, GetStringInfoDatum(profile),(cmsUInt32Number) GetStringInfoLength(profile)); if (source_profile == (cmsHPROFILE) NULL) ThrowBinaryException(ResourceLimitError, "ColorspaceColorProfileMismatch",name); if ((cmsGetDeviceClass(source_profile) != cmsSigLinkClass) && (icc_profile == (StringInfo *) NULL)) status=SetImageProfile(image,name,profile,exception); else { CacheView *image_view; ColorspaceType source_colorspace, target_colorspace; cmsColorSpaceSignature signature; cmsHPROFILE target_profile; cmsHTRANSFORM *magick_restrict transform; cmsUInt32Number flags, source_type, target_type; int intent; LCMSType **magick_restrict source_pixels, **magick_restrict target_pixels; #if defined(LCMSHDRI) LCMSType source_scale, target_scale; #endif MagickOffsetType progress; size_t source_channels, target_channels; ssize_t y; target_profile=(cmsHPROFILE) NULL; if (icc_profile != (StringInfo *) NULL) { target_profile=source_profile; source_profile=cmsOpenProfileFromMemTHR((cmsContext) &cms_exception,GetStringInfoDatum(icc_profile), (cmsUInt32Number) GetStringInfoLength(icc_profile)); if (source_profile == (cmsHPROFILE) NULL) ThrowProfileException(ResourceLimitError, "ColorspaceColorProfileMismatch",name); } #if defined(LCMSHDRI) source_scale=1.0; #endif source_colorspace=sRGBColorspace; source_channels=3; switch (cmsGetColorSpace(source_profile)) { case cmsSigCmykData: { source_colorspace=CMYKColorspace; source_channels=4; #if defined(LCMSHDRI) source_type=(cmsUInt32Number) TYPE_CMYK_DBL; source_scale=100.0; #else source_type=(cmsUInt32Number) TYPE_CMYK_16; #endif break; } case cmsSigGrayData: { source_colorspace=GRAYColorspace; source_channels=1; #if defined(LCMSHDRI) source_type=(cmsUInt32Number) TYPE_GRAY_DBL; #else source_type=(cmsUInt32Number) TYPE_GRAY_16; #endif break; } case cmsSigLabData: { source_colorspace=LabColorspace; #if defined(LCMSHDRI) source_type=(cmsUInt32Number) TYPE_Lab_DBL; source_scale=100.0; #else source_type=(cmsUInt32Number) TYPE_Lab_16; #endif break; } #if !defined(LCMSHDRI) case cmsSigLuvData: { source_colorspace=YUVColorspace; source_type=(cmsUInt32Number) TYPE_YUV_16; break; } #endif case cmsSigRgbData: { source_colorspace=sRGBColorspace; #if defined(LCMSHDRI) source_type=(cmsUInt32Number) TYPE_RGB_DBL; #else source_type=(cmsUInt32Number) TYPE_RGB_16; #endif break; } case cmsSigXYZData: { source_colorspace=XYZColorspace; #if defined(LCMSHDRI) source_type=(cmsUInt32Number) TYPE_XYZ_DBL; #else source_type=(cmsUInt32Number) TYPE_XYZ_16; #endif break; } #if !defined(LCMSHDRI) case cmsSigYCbCrData: { source_colorspace=YUVColorspace; source_type=(cmsUInt32Number) TYPE_YCbCr_16; break; } #endif default: ThrowProfileException(ImageError, "ColorspaceColorProfileMismatch",name); } (void) source_colorspace; signature=cmsGetPCS(source_profile); if (target_profile != (cmsHPROFILE) NULL) signature=cmsGetColorSpace(target_profile); #if defined(LCMSHDRI) target_scale=1.0; #endif target_channels=3; switch (signature) { case cmsSigCmykData: { target_colorspace=CMYKColorspace; target_channels=4; #if defined(LCMSHDRI) target_type=(cmsUInt32Number) TYPE_CMYK_DBL; target_scale=0.01; #else target_type=(cmsUInt32Number) TYPE_CMYK_16; #endif break; } case cmsSigGrayData: { target_colorspace=GRAYColorspace; target_channels=1; #if defined(LCMSHDRI) target_type=(cmsUInt32Number) TYPE_GRAY_DBL; #else target_type=(cmsUInt32Number) TYPE_GRAY_16; #endif break; } case cmsSigLabData: { target_colorspace=LabColorspace; #if defined(LCMSHDRI) target_type=(cmsUInt32Number) TYPE_Lab_DBL; target_scale=0.01; #else target_type=(cmsUInt32Number) TYPE_Lab_16; #endif break; } #if !defined(LCMSHDRI) case cmsSigLuvData: { target_colorspace=YUVColorspace; target_type=(cmsUInt32Number) TYPE_YUV_16; break; } #endif case cmsSigRgbData: { target_colorspace=sRGBColorspace; #if defined(LCMSHDRI) target_type=(cmsUInt32Number) TYPE_RGB_DBL; #else target_type=(cmsUInt32Number) TYPE_RGB_16; #endif break; } case cmsSigXYZData: { target_colorspace=XYZColorspace; #if defined(LCMSHDRI) target_type=(cmsUInt32Number) TYPE_XYZ_DBL; #else target_type=(cmsUInt32Number) TYPE_XYZ_16; #endif break; } #if !defined(LCMSHDRI) case cmsSigYCbCrData: { target_colorspace=YUVColorspace; target_type=(cmsUInt32Number) TYPE_YCbCr_16; break; } #endif default: ThrowProfileException(ImageError, "ColorspaceColorProfileMismatch",name); } switch (image->rendering_intent) { case AbsoluteIntent: intent=INTENT_ABSOLUTE_COLORIMETRIC; break; case PerceptualIntent: intent=INTENT_PERCEPTUAL; break; case RelativeIntent: intent=INTENT_RELATIVE_COLORIMETRIC; break; case SaturationIntent: intent=INTENT_SATURATION; break; default: intent=INTENT_PERCEPTUAL; break; } flags=cmsFLAGS_HIGHRESPRECALC; #if defined(cmsFLAGS_BLACKPOINTCOMPENSATION) if (image->black_point_compensation != MagickFalse) flags|=cmsFLAGS_BLACKPOINTCOMPENSATION; #endif transform=AcquireTransformThreadSet(image,source_profile, source_type,target_profile,target_type,intent,flags, &cms_exception); if (transform == (cmsHTRANSFORM *) NULL) ThrowProfileException(ImageError,"UnableToCreateColorTransform", name); /* Transform image as dictated by the source & target image profiles. */ source_pixels=AcquirePixelThreadSet(image->columns,source_channels); target_pixels=AcquirePixelThreadSet(image->columns,target_channels); if ((source_pixels == (LCMSType **) NULL) || (target_pixels == (LCMSType **) NULL)) { target_pixels=DestroyPixelThreadSet(target_pixels); source_pixels=DestroyPixelThreadSet(source_pixels); transform=DestroyTransformThreadSet(transform); ThrowProfileException(ResourceLimitError, "MemoryAllocationFailed",image->filename); } if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) { target_pixels=DestroyPixelThreadSet(target_pixels); source_pixels=DestroyPixelThreadSet(source_pixels); transform=DestroyTransformThreadSet(transform); if (source_profile != (cmsHPROFILE) NULL) (void) cmsCloseProfile(source_profile); if (target_profile != (cmsHPROFILE) NULL) (void) cmsCloseProfile(target_profile); return(MagickFalse); } if (target_colorspace == CMYKColorspace) (void) SetImageColorspace(image,target_colorspace,exception); progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register LCMSType *p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } p=source_pixels[id]; for (x=0; x < (ssize_t) image->columns; x++) { *p++=LCMSScaleSource(GetPixelRed(image,q)); if (source_channels > 1) { *p++=LCMSScaleSource(GetPixelGreen(image,q)); *p++=LCMSScaleSource(GetPixelBlue(image,q)); } if (source_channels > 3) *p++=LCMSScaleSource(GetPixelBlack(image,q)); q+=GetPixelChannels(image); } cmsDoTransform(transform[id],source_pixels[id],target_pixels[id], (unsigned int) image->columns); p=target_pixels[id]; q-=GetPixelChannels(image)*image->columns; for (x=0; x < (ssize_t) image->columns; x++) { if (target_channels == 1) SetPixelGray(image,LCMSScaleTarget(*p),q); else SetPixelRed(image,LCMSScaleTarget(*p),q); p++; if (target_channels > 1) { SetPixelGreen(image,LCMSScaleTarget(*p),q); p++; SetPixelBlue(image,LCMSScaleTarget(*p),q); p++; } if (target_channels > 3) { SetPixelBlack(image,LCMSScaleTarget(*p),q); p++; } q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ProfileImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); (void) SetImageColorspace(image,target_colorspace,exception); switch (signature) { case cmsSigRgbData: { image->type=image->alpha_trait == UndefinedPixelTrait ? TrueColorType : TrueColorAlphaType; break; } case cmsSigCmykData: { image->type=image->alpha_trait == UndefinedPixelTrait ? ColorSeparationType : ColorSeparationAlphaType; break; } case cmsSigGrayData: { image->type=image->alpha_trait == UndefinedPixelTrait ? GrayscaleType : GrayscaleAlphaType; break; } default: break; } target_pixels=DestroyPixelThreadSet(target_pixels); source_pixels=DestroyPixelThreadSet(source_pixels); transform=DestroyTransformThreadSet(transform); if ((status != MagickFalse) && (cmsGetDeviceClass(source_profile) != cmsSigLinkClass)) status=SetImageProfile(image,name,profile,exception); if (target_profile != (cmsHPROFILE) NULL) (void) cmsCloseProfile(target_profile); } (void) cmsCloseProfile(source_profile); } #endif } profile=DestroyStringInfo(profile); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e m o v e I m a g e P r o f i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RemoveImageProfile() removes a named profile from the image and returns its % value. % % The format of the RemoveImageProfile method is: % % void *RemoveImageProfile(Image *image,const char *name) % % A description of each parameter follows: % % o image: the image. % % o name: the profile name. % */ MagickExport StringInfo *RemoveImageProfile(Image *image,const char *name) { StringInfo *profile; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->profiles == (SplayTreeInfo *) NULL) return((StringInfo *) NULL); WriteTo8BimProfile(image,name,(StringInfo *) NULL); profile=(StringInfo *) RemoveNodeFromSplayTree((SplayTreeInfo *) image->profiles,name); return(profile); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e s e t P r o f i l e I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResetImageProfileIterator() resets the image profile iterator. Use it in % conjunction with GetNextImageProfile() to iterate over all the profiles % associated with an image. % % The format of the ResetImageProfileIterator method is: % % ResetImageProfileIterator(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport void ResetImageProfileIterator(const Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->profiles == (SplayTreeInfo *) NULL) return; ResetSplayTreeIterator((SplayTreeInfo *) image->profiles); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e P r o f i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageProfile() adds a named profile to the image. If a profile with the % same name already exists, it is replaced. This method differs from the % ProfileImage() method in that it does not apply CMS color profiles. % % The format of the SetImageProfile method is: % % MagickBooleanType SetImageProfile(Image *image,const char *name, % const StringInfo *profile) % % A description of each parameter follows: % % o image: the image. % % o name: the profile name, for example icc, exif, and 8bim (8bim is the % Photoshop wrapper for iptc profiles). % % o profile: A StringInfo structure that contains the named profile. % */ static void *DestroyProfile(void *profile) { return((void *) DestroyStringInfo((StringInfo *) profile)); } static inline const unsigned char *ReadResourceByte(const unsigned char *p, unsigned char *quantum) { *quantum=(*p++); return(p); } static inline const unsigned char *ReadResourceLong(const unsigned char *p, unsigned int *quantum) { *quantum=(unsigned int) (*p++) << 24; *quantum|=(unsigned int) (*p++) << 16; *quantum|=(unsigned int) (*p++) << 8; *quantum|=(unsigned int) (*p++); return(p); } static inline const unsigned char *ReadResourceShort(const unsigned char *p, unsigned short *quantum) { *quantum=(unsigned short) (*p++) << 8; *quantum|=(unsigned short) (*p++); return(p); } static inline void WriteResourceLong(unsigned char *p, const unsigned int quantum) { unsigned char buffer[4]; buffer[0]=(unsigned char) (quantum >> 24); buffer[1]=(unsigned char) (quantum >> 16); buffer[2]=(unsigned char) (quantum >> 8); buffer[3]=(unsigned char) quantum; (void) memcpy(p,buffer,4); } static void WriteTo8BimProfile(Image *image,const char *name, const StringInfo *profile) { const unsigned char *datum, *q; register const unsigned char *p; size_t length; StringInfo *profile_8bim; ssize_t count; unsigned char length_byte; unsigned int value; unsigned short id, profile_id; if (LocaleCompare(name,"icc") == 0) profile_id=0x040f; else if (LocaleCompare(name,"iptc") == 0) profile_id=0x0404; else if (LocaleCompare(name,"xmp") == 0) profile_id=0x0424; else return; profile_8bim=(StringInfo *) GetValueFromSplayTree((SplayTreeInfo *) image->profiles,"8bim"); if (profile_8bim == (StringInfo *) NULL) return; datum=GetStringInfoDatum(profile_8bim); length=GetStringInfoLength(profile_8bim); for (p=datum; p < (datum+length-16); ) { q=p; if (LocaleNCompare((char *) p,"8BIM",4) != 0) break; p+=4; p=ReadResourceShort(p,&id); p=ReadResourceByte(p,&length_byte); p+=length_byte; if (((length_byte+1) & 0x01) != 0) p++; if (p > (datum+length-4)) break; p=ReadResourceLong(p,&value); count=(ssize_t) value; if ((count & 0x01) != 0) count++; if ((count < 0) || (p > (datum+length-count)) || (count > (ssize_t) length)) break; if (id != profile_id) p+=count; else { size_t extent, offset; ssize_t extract_extent; StringInfo *extract_profile; extract_extent=0; extent=(datum+length)-(p+count); if (profile == (StringInfo *) NULL) { offset=(q-datum); extract_profile=AcquireStringInfo(offset+extent); (void) memcpy(extract_profile->datum,datum,offset); } else { offset=(p-datum); extract_extent=profile->length; if ((extract_extent & 0x01) != 0) extract_extent++; extract_profile=AcquireStringInfo(offset+extract_extent+extent); (void) memcpy(extract_profile->datum,datum,offset-4); WriteResourceLong(extract_profile->datum+offset-4,(unsigned int) profile->length); (void) memcpy(extract_profile->datum+offset, profile->datum,profile->length); } (void) memcpy(extract_profile->datum+offset+extract_extent, p+count,extent); (void) AddValueToSplayTree((SplayTreeInfo *) image->profiles, ConstantString("8bim"),CloneStringInfo(extract_profile)); extract_profile=DestroyStringInfo(extract_profile); break; } } } static void GetProfilesFromResourceBlock(Image *image, const StringInfo *resource_block,ExceptionInfo *exception) { const unsigned char *datum; register const unsigned char *p; size_t length; ssize_t count; StringInfo *profile; unsigned char length_byte; unsigned int value; unsigned short id; datum=GetStringInfoDatum(resource_block); length=GetStringInfoLength(resource_block); for (p=datum; p < (datum+length-16); ) { if (LocaleNCompare((char *) p,"8BIM",4) != 0) break; p+=4; p=ReadResourceShort(p,&id); p=ReadResourceByte(p,&length_byte); p+=length_byte; if (((length_byte+1) & 0x01) != 0) p++; if (p > (datum+length-4)) break; p=ReadResourceLong(p,&value); count=(ssize_t) value; if ((p > (datum+length-count)) || (count > (ssize_t) length) || (count < 0)) break; switch (id) { case 0x03ed: { unsigned int resolution; unsigned short units; /* Resolution. */ if (count < 10) break; p=ReadResourceLong(p,&resolution); image->resolution.x=((double) resolution)/65536.0; p=ReadResourceShort(p,&units)+2; p=ReadResourceLong(p,&resolution)+4; image->resolution.y=((double) resolution)/65536.0; /* Values are always stored as pixels per inch. */ if ((ResolutionType) units != PixelsPerCentimeterResolution) image->units=PixelsPerInchResolution; else { image->units=PixelsPerCentimeterResolution; image->resolution.x/=2.54; image->resolution.y/=2.54; } break; } case 0x0404: { /* IPTC Profile */ profile=AcquireStringInfo(count); SetStringInfoDatum(profile,p); (void) SetImageProfileInternal(image,"iptc",profile,MagickTrue, exception); profile=DestroyStringInfo(profile); p+=count; break; } case 0x040c: { /* Thumbnail. */ p+=count; break; } case 0x040f: { /* ICC Profile. */ profile=AcquireStringInfo(count); SetStringInfoDatum(profile,p); (void) SetImageProfileInternal(image,"icc",profile,MagickTrue, exception); profile=DestroyStringInfo(profile); p+=count; break; } case 0x0422: { /* EXIF Profile. */ profile=AcquireStringInfo(count); SetStringInfoDatum(profile,p); (void) SetImageProfileInternal(image,"exif",profile,MagickTrue, exception); profile=DestroyStringInfo(profile); p+=count; break; } case 0x0424: { /* XMP Profile. */ profile=AcquireStringInfo(count); SetStringInfoDatum(profile,p); (void) SetImageProfileInternal(image,"xmp",profile,MagickTrue, exception); profile=DestroyStringInfo(profile); p+=count; break; } default: { p+=count; break; } } if ((count & 0x01) != 0) p++; } } static MagickBooleanType ValidateXMPProfile(const StringInfo *profile) { #if defined(MAGICKCORE_XML_DELEGATE) { xmlDocPtr document; /* Parse XML profile. */ document=xmlReadMemory((const char *) GetStringInfoDatum(profile),(int) GetStringInfoLength(profile),"xmp.xml",NULL,XML_PARSE_NOERROR | XML_PARSE_NOWARNING); if (document == (xmlDocPtr) NULL) return(MagickFalse); xmlFreeDoc(document); return(MagickTrue); } #else return(MagickTrue); #endif } static MagickBooleanType SetImageProfileInternal(Image *image,const char *name, const StringInfo *profile,const MagickBooleanType recursive, ExceptionInfo *exception) { char key[MagickPathExtent], property[MagickPathExtent]; MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if ((LocaleCompare(name,"xmp") == 0) && (ValidateXMPProfile(profile) == MagickFalse)) { (void) ThrowMagickException(exception,GetMagickModule(),ImageWarning, "CorruptImageProfile","`%s'",name); return(MagickTrue); } if (image->profiles == (SplayTreeInfo *) NULL) image->profiles=NewSplayTree(CompareSplayTreeString,RelinquishMagickMemory, DestroyProfile); (void) CopyMagickString(key,name,MagickPathExtent); LocaleLower(key); status=AddValueToSplayTree((SplayTreeInfo *) image->profiles, ConstantString(key),CloneStringInfo(profile)); if (status != MagickFalse) { if (LocaleCompare(name,"8bim") == 0) GetProfilesFromResourceBlock(image,profile,exception); else if (recursive == MagickFalse) WriteTo8BimProfile(image,name,profile); } /* Inject profile into image properties. */ (void) FormatLocaleString(property,MagickPathExtent,"%s:*",name); (void) GetImageProperty(image,property,exception); return(status); } MagickExport MagickBooleanType SetImageProfile(Image *image,const char *name, const StringInfo *profile,ExceptionInfo *exception) { return(SetImageProfileInternal(image,name,profile,MagickFalse,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S y n c I m a g e P r o f i l e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncImageProfiles() synchronizes image properties with the image profiles. % Currently we only support updating the EXIF resolution and orientation. % % The format of the SyncImageProfiles method is: % % MagickBooleanType SyncImageProfiles(Image *image) % % A description of each parameter follows: % % o image: the image. % */ static inline int ReadProfileByte(unsigned char **p,size_t *length) { int c; if (*length < 1) return(EOF); c=(int) (*(*p)++); (*length)--; return(c); } static inline signed short ReadProfileShort(const EndianType endian, unsigned char *buffer) { union { unsigned int unsigned_value; signed int signed_value; } quantum; unsigned short value; if (endian == LSBEndian) { value=(unsigned short) buffer[1] << 8; value|=(unsigned short) buffer[0]; quantum.unsigned_value=value & 0xffff; return(quantum.signed_value); } value=(unsigned short) buffer[0] << 8; value|=(unsigned short) buffer[1]; quantum.unsigned_value=value & 0xffff; return(quantum.signed_value); } static inline signed int ReadProfileLong(const EndianType endian, unsigned char *buffer) { union { unsigned int unsigned_value; signed int signed_value; } quantum; unsigned int value; if (endian == LSBEndian) { value=(unsigned int) buffer[3] << 24; value|=(unsigned int) buffer[2] << 16; value|=(unsigned int) buffer[1] << 8; value|=(unsigned int) buffer[0]; quantum.unsigned_value=value & 0xffffffff; return(quantum.signed_value); } value=(unsigned int) buffer[0] << 24; value|=(unsigned int) buffer[1] << 16; value|=(unsigned int) buffer[2] << 8; value|=(unsigned int) buffer[3]; quantum.unsigned_value=value & 0xffffffff; return(quantum.signed_value); } static inline signed int ReadProfileMSBLong(unsigned char **p,size_t *length) { signed int value; if (*length < 4) return(0); value=ReadProfileLong(MSBEndian,*p); (*length)-=4; *p+=4; return(value); } static inline signed short ReadProfileMSBShort(unsigned char **p, size_t *length) { signed short value; if (*length < 2) return(0); value=ReadProfileShort(MSBEndian,*p); (*length)-=2; *p+=2; return(value); } static inline void WriteProfileLong(const EndianType endian, const size_t value,unsigned char *p) { unsigned char buffer[4]; if (endian == LSBEndian) { buffer[0]=(unsigned char) value; buffer[1]=(unsigned char) (value >> 8); buffer[2]=(unsigned char) (value >> 16); buffer[3]=(unsigned char) (value >> 24); (void) memcpy(p,buffer,4); return; } buffer[0]=(unsigned char) (value >> 24); buffer[1]=(unsigned char) (value >> 16); buffer[2]=(unsigned char) (value >> 8); buffer[3]=(unsigned char) value; (void) memcpy(p,buffer,4); } static void WriteProfileShort(const EndianType endian, const unsigned short value,unsigned char *p) { unsigned char buffer[2]; if (endian == LSBEndian) { buffer[0]=(unsigned char) value; buffer[1]=(unsigned char) (value >> 8); (void) memcpy(p,buffer,2); return; } buffer[0]=(unsigned char) (value >> 8); buffer[1]=(unsigned char) value; (void) memcpy(p,buffer,2); } static MagickBooleanType Sync8BimProfile(Image *image,StringInfo *profile) { size_t length; ssize_t count; unsigned char *p; unsigned short id; length=GetStringInfoLength(profile); p=GetStringInfoDatum(profile); while (length != 0) { if (ReadProfileByte(&p,&length) != 0x38) continue; if (ReadProfileByte(&p,&length) != 0x42) continue; if (ReadProfileByte(&p,&length) != 0x49) continue; if (ReadProfileByte(&p,&length) != 0x4D) continue; if (length < 7) return(MagickFalse); id=ReadProfileMSBShort(&p,&length); count=(ssize_t) ReadProfileByte(&p,&length); if ((count >= (ssize_t) length) || (count < 0)) return(MagickFalse); p+=count; length-=count; if ((*p & 0x01) == 0) (void) ReadProfileByte(&p,&length); count=(ssize_t) ReadProfileMSBLong(&p,&length); if ((count > (ssize_t) length) || (count < 0)) return(MagickFalse); if ((id == 0x3ED) && (count == 16)) { if (image->units == PixelsPerCentimeterResolution) WriteProfileLong(MSBEndian,(unsigned int) (image->resolution.x*2.54* 65536.0),p); else WriteProfileLong(MSBEndian,(unsigned int) (image->resolution.x* 65536.0),p); WriteProfileShort(MSBEndian,(unsigned short) image->units,p+4); if (image->units == PixelsPerCentimeterResolution) WriteProfileLong(MSBEndian,(unsigned int) (image->resolution.y*2.54* 65536.0),p+8); else WriteProfileLong(MSBEndian,(unsigned int) (image->resolution.y* 65536.0),p+8); WriteProfileShort(MSBEndian,(unsigned short) image->units,p+12); } p+=count; length-=count; } return(MagickTrue); } MagickBooleanType SyncExifProfile(Image *image,StringInfo *profile) { #define MaxDirectoryStack 16 #define EXIF_DELIMITER "\n" #define EXIF_NUM_FORMATS 12 #define TAG_EXIF_OFFSET 0x8769 #define TAG_INTEROP_OFFSET 0xa005 typedef struct _DirectoryInfo { unsigned char *directory; size_t entry; } DirectoryInfo; DirectoryInfo directory_stack[MaxDirectoryStack]; EndianType endian; size_t entry, length, number_entries; SplayTreeInfo *exif_resources; ssize_t id, level, offset; static int format_bytes[] = {0, 1, 1, 2, 4, 8, 1, 1, 2, 4, 8, 4, 8}; unsigned char *directory, *exif; /* Set EXIF resolution tag. */ length=GetStringInfoLength(profile); exif=GetStringInfoDatum(profile); if (length < 16) return(MagickFalse); id=(ssize_t) ReadProfileShort(LSBEndian,exif); if ((id != 0x4949) && (id != 0x4D4D)) { while (length != 0) { if (ReadProfileByte(&exif,&length) != 0x45) continue; if (ReadProfileByte(&exif,&length) != 0x78) continue; if (ReadProfileByte(&exif,&length) != 0x69) continue; if (ReadProfileByte(&exif,&length) != 0x66) continue; if (ReadProfileByte(&exif,&length) != 0x00) continue; if (ReadProfileByte(&exif,&length) != 0x00) continue; break; } if (length < 16) return(MagickFalse); id=(ssize_t) ReadProfileShort(LSBEndian,exif); } endian=LSBEndian; if (id == 0x4949) endian=LSBEndian; else if (id == 0x4D4D) endian=MSBEndian; else return(MagickFalse); if (ReadProfileShort(endian,exif+2) != 0x002a) return(MagickFalse); /* This the offset to the first IFD. */ offset=(ssize_t) ReadProfileLong(endian,exif+4); if ((offset < 0) || ((size_t) offset >= length)) return(MagickFalse); directory=exif+offset; level=0; entry=0; exif_resources=NewSplayTree((int (*)(const void *,const void *)) NULL, (void *(*)(void *)) NULL,(void *(*)(void *)) NULL); do { if (level > 0) { level--; directory=directory_stack[level].directory; entry=directory_stack[level].entry; } if ((directory < exif) || (directory > (exif+length-2))) break; /* Determine how many entries there are in the current IFD. */ number_entries=ReadProfileShort(endian,directory); for ( ; entry < number_entries; entry++) { int components; register unsigned char *p, *q; size_t number_bytes; ssize_t format, tag_value; q=(unsigned char *) (directory+2+(12*entry)); if (q > (exif+length-12)) break; /* corrupt EXIF */ if (GetValueFromSplayTree(exif_resources,q) == q) break; (void) AddValueToSplayTree(exif_resources,q,q); tag_value=(ssize_t) ReadProfileShort(endian,q); format=(ssize_t) ReadProfileShort(endian,q+2); if ((format < 0) || ((format-1) >= EXIF_NUM_FORMATS)) break; components=(int) ReadProfileLong(endian,q+4); if (components < 0) break; /* corrupt EXIF */ number_bytes=(size_t) components*format_bytes[format]; if ((ssize_t) number_bytes < components) break; /* prevent overflow */ if (number_bytes <= 4) p=q+8; else { /* The directory entry contains an offset. */ offset=(ssize_t) ReadProfileLong(endian,q+8); if ((offset < 0) || ((size_t) (offset+number_bytes) > length)) continue; if (~length < number_bytes) continue; /* prevent overflow */ p=(unsigned char *) (exif+offset); } switch (tag_value) { case 0x011a: { (void) WriteProfileLong(endian,(size_t) (image->resolution.x+0.5),p); if (number_bytes == 8) (void) WriteProfileLong(endian,1UL,p+4); break; } case 0x011b: { (void) WriteProfileLong(endian,(size_t) (image->resolution.y+0.5),p); if (number_bytes == 8) (void) WriteProfileLong(endian,1UL,p+4); break; } case 0x0112: { if (number_bytes == 4) { (void) WriteProfileLong(endian,(size_t) image->orientation,p); break; } (void) WriteProfileShort(endian,(unsigned short) image->orientation, p); break; } case 0x0128: { if (number_bytes == 4) { (void) WriteProfileLong(endian,(size_t) (image->units+1),p); break; } (void) WriteProfileShort(endian,(unsigned short) (image->units+1),p); break; } default: break; } if ((tag_value == TAG_EXIF_OFFSET) || (tag_value == TAG_INTEROP_OFFSET)) { offset=(ssize_t) ReadProfileLong(endian,p); if (((size_t) offset < length) && (level < (MaxDirectoryStack-2))) { directory_stack[level].directory=directory; entry++; directory_stack[level].entry=entry; level++; directory_stack[level].directory=exif+offset; directory_stack[level].entry=0; level++; if ((directory+2+(12*number_entries)) > (exif+length)) break; offset=(ssize_t) ReadProfileLong(endian,directory+2+(12* number_entries)); if ((offset != 0) && ((size_t) offset < length) && (level < (MaxDirectoryStack-2))) { directory_stack[level].directory=exif+offset; directory_stack[level].entry=0; level++; } } break; } } } while (level > 0); exif_resources=DestroySplayTree(exif_resources); return(MagickTrue); } MagickPrivate MagickBooleanType SyncImageProfiles(Image *image) { MagickBooleanType status; StringInfo *profile; status=MagickTrue; profile=(StringInfo *) GetImageProfile(image,"8BIM"); if (profile != (StringInfo *) NULL) if (Sync8BimProfile(image,profile) == MagickFalse) status=MagickFalse; profile=(StringInfo *) GetImageProfile(image,"EXIF"); if (profile != (StringInfo *) NULL) if (SyncExifProfile(image,profile) == MagickFalse) status=MagickFalse; return(status); }
GB_binop__rminus_int64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__rminus_int64) // A.*B function (eWiseMult): GB (_AemultB_08__rminus_int64) // A.*B function (eWiseMult): GB (_AemultB_02__rminus_int64) // A.*B function (eWiseMult): GB (_AemultB_04__rminus_int64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__rminus_int64) // A*D function (colscale): GB (_AxD__rminus_int64) // D*A function (rowscale): GB (_DxB__rminus_int64) // C+=B function (dense accum): GB (_Cdense_accumB__rminus_int64) // C+=b function (dense accum): GB (_Cdense_accumb__rminus_int64) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__rminus_int64) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__rminus_int64) // C=scalar+B GB (_bind1st__rminus_int64) // C=scalar+B' GB (_bind1st_tran__rminus_int64) // C=A+scalar GB (_bind2nd__rminus_int64) // C=A'+scalar GB (_bind2nd_tran__rminus_int64) // C type: int64_t // A type: int64_t // A pattern? 0 // B type: int64_t // B pattern? 0 // BinaryOp: cij = (bij - aij) #define GB_ATYPE \ int64_t #define GB_BTYPE \ int64_t #define GB_CTYPE \ int64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int64_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int64_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (y - x) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_RMINUS || GxB_NO_INT64 || GxB_NO_RMINUS_INT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__rminus_int64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__rminus_int64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__rminus_int64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__rminus_int64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int64_t int64_t bwork = (*((int64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__rminus_int64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *restrict Cx = (int64_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__rminus_int64) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *restrict Cx = (int64_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__rminus_int64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int64_t alpha_scalar ; int64_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int64_t *) alpha_scalar_in)) ; beta_scalar = (*((int64_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__rminus_int64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__rminus_int64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__rminus_int64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__rminus_int64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__rminus_int64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *Cx = (int64_t *) Cx_output ; int64_t x = (*((int64_t *) x_input)) ; int64_t *Bx = (int64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int64_t bij = GBX (Bx, p, false) ; Cx [p] = (bij - x) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__rminus_int64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int64_t *Cx = (int64_t *) Cx_output ; int64_t *Ax = (int64_t *) Ax_input ; int64_t y = (*((int64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int64_t aij = GBX (Ax, p, false) ; Cx [p] = (y - aij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij - x) ; \ } GrB_Info GB (_bind1st_tran__rminus_int64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t x = (*((const int64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (y - aij) ; \ } GrB_Info GB (_bind2nd_tran__rminus_int64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t y = (*((const int64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
ellipticSchwarzSolverHex3D.c
extern "C" void FUNC(preFDM) (const dlong& Nelements, const pfloat* __restrict__ u, pfloat* __restrict__ work1) { #define getIdx(k,j,i,e) ((k)*p_Nq_e*p_Nq_e+(j)*p_Nq_e+(i)+(e)*p_Nq_e*p_Nq_e*p_Nq_e) #define getIdx2(k,j,i,e) ((k-1)*p_Nq*p_Nq+(j-1)*p_Nq+(i-1)+(e)*p_Nq*p_Nq*p_Nq) #define sWork1(k,j,i,e) (work1[(getIdx(k,j,i,e))]) #define uArr(k,j,i,e) (u[(getIdx2(k,j,i,e))]) #ifdef __NEKRS__OMP__ #pragma omp parallel for #endif for (dlong elem = 0; elem < Nelements; elem++) { #pragma unroll for(int k = 0; k < p_Nq_e; ++k){ #pragma unroll for(int j = 0; j < p_Nq_e; ++j){ #pragma unroll for(int i = 0; i < p_Nq_e; ++i){ const bool iBound = i>=1 && i <(p_Nq_e-1); const bool jBound = j>=1 && j <(p_Nq_e-1); const bool kBound = k>=1 && k <(p_Nq_e-1); if(iBound && jBound && kBound){ const dlong elem_offset = elem * p_Nq * p_Nq * p_Nq; const dlong idx = i + j * p_Nq + k * p_Nq * p_Nq + elem_offset; sWork1(k,j,i,elem) = uArr(k,j,i,elem); } else { sWork1(k,j,i,elem) = 0.0; } } } } #pragma unroll for(int j = 1; j < p_Nq_e-1; ++j){ #pragma unroll for(int k = 1; k < p_Nq_e-1; ++k){ const int l1 = 0; const int l2 = 2; sWork1(l1,j,k,elem) = uArr(l2,j,k,elem); } } #pragma unroll for(int j = 1; j < p_Nq_e-1; ++j){ #pragma unroll for(int k = 1; k < p_Nq_e-1; ++k){ const int l1 = 0; const int l2 = 2; sWork1(p_Nq_e - l1 - 1,j,k,elem) = uArr(p_Nq_e - l2 - 1,j,k,elem); } } #pragma unroll for(int i = 1; i < p_Nq_e-1; ++i){ #pragma unroll for(int k = 1; k < p_Nq_e-1; ++k){ const int l1 = 0; const int l2 = 2; sWork1(i,l1,k,elem) = uArr(i,l2,k,elem); } } #pragma unroll for(int i = 1; i < p_Nq_e-1; ++i){ #pragma unroll for(int k = 1; k < p_Nq_e-1; ++k){ const int l1 = 0; const int l2 = 2; sWork1(i,p_Nq_e - l1 - 1,k,elem) = uArr(i,p_Nq_e - l2 - 1,k,elem); } } #pragma unroll for(int i = 1; i < p_Nq_e-1; ++i){ #pragma unroll for(int j = 1; j < p_Nq_e-1; ++j){ const int l1 = 0; const int l2 = 2; sWork1(i,j,l1,elem) = uArr(i,j,l2,elem); } } #pragma unroll for(int i = 1; i < p_Nq_e-1; ++i){ #pragma unroll for(int j = 1; j < p_Nq_e-1; ++j){ const int l1 = 0; const int l2 = 2; sWork1(i,j,p_Nq_e - l1 - 1,elem) = uArr(i,j,p_Nq_e - l2 - 1,elem); } } } #undef getIdx #undef getIdx2 #undef sWork1 #undef uArr } extern "C" void FUNC(postFDM) (const dlong& Nelements, pfloat* __restrict__ my_work1, pfloat* __restrict__ my_work2, pfloat* __restrict__ Su, const pfloat* __restrict__ wts) { pfloat work1[p_Nq_e][p_Nq_e][p_Nq_e]; pfloat work2[p_Nq_e][p_Nq_e][p_Nq_e]; for (dlong elem = 0; elem < Nelements; ++elem) { #pragma unroll for(int k = 0; k < p_Nq_e; ++k){ #pragma unroll for(int j = 0; j < p_Nq_e; ++j){ #pragma unroll for(int i = 0; i < p_Nq_e; ++i) { const dlong elem_offset = elem * p_Nq_e * p_Nq_e * p_Nq_e; const dlong idx = i + j * p_Nq_e + k * p_Nq_e * p_Nq_e + elem_offset; work1[k][j][i] = my_work2[idx]; work2[k][j][i] = my_work1[idx]; } } } #pragma unroll for(int j = 1; j < p_Nq_e-1; ++j){ #pragma unroll for(int k = 1; k < p_Nq_e-1; ++k){ const int l1 = 0; const int l2 = 0; work1[l1][j][k] = work1[l1][j][k] - work2[l2][j][k]; } } #pragma unroll for(int j = 1; j < p_Nq_e-1; ++j){ #pragma unroll for(int k = 1; k < p_Nq_e-1; ++k){ const int l1 = 0; const int l2 = 0; work1[p_Nq_e - l1 - 1][j][k] = work1[p_Nq_e - l1 - 1][j][k] - work2[p_Nq_e - l2 - 1][j][k]; } } #pragma unroll for(int i = 1; i < p_Nq_e-1; ++i){ #pragma unroll for(int k = 1; k < p_Nq_e-1; ++k){ const int l1 = 0; const int l2 = 0; work1[i][l1][k] = work1[i][l1][k] - work2[i][l2][k]; } } #pragma unroll for(int i = 1; i < p_Nq_e-1; ++i){ #pragma unroll for(int k = 1; k < p_Nq_e-1; ++k){ const int l1 = 0; const int l2 = 0; work1[i][p_Nq_e - l1 - 1][k] = work1[i][p_Nq_e - l1 - 1][k] - work2[i][p_Nq_e - l2 - 1][k]; } } #pragma unroll for(int i = 1; i < p_Nq_e-1; ++i){ #pragma unroll for(int j = 1; j < p_Nq_e-1; ++j){ const int l1 = 0; const int l2 = 0; work1[i][j][l1] = work1[i][j][l1] - work2[i][j][l2]; } } #pragma unroll for(int i = 1; i < p_Nq_e-1; ++i){ #pragma unroll for(int j = 1; j < p_Nq_e-1; ++j){ const int l1 = 0; const int l2 = 0; work1[i][j][p_Nq_e - l1 - 1] = work1[i][j][p_Nq_e - l1 - 1] - work2[i][j][p_Nq_e - l2 - 1]; } } #pragma unroll for(int j = 1; j < p_Nq_e-1; ++j){ #pragma unroll for(int k = 1; k < p_Nq_e-1; ++k){ const int l1 = 2; const int l2 = 0; work1[l1][j][k] = work1[l1][j][k] + work1[l2][j][k]; } } #pragma unroll for(int j = 1; j < p_Nq_e-1; ++j){ #pragma unroll for(int k = 1; k < p_Nq_e-1; ++k){ const int l1 = 2; const int l2 = 0; work1[p_Nq_e - l1 - 1][j][k] = work1[p_Nq_e - l1 - 1][j][k] + work1[p_Nq_e - l2 - 1][j][k]; } } #pragma unroll for(int i = 1; i < p_Nq_e-1; ++i){ #pragma unroll for(int k = 1; k < p_Nq_e-1; ++k){ const int l1 = 2; const int l2 = 0; work1[i][l1][k] = work1[i][l1][k] + work1[i][l2][k]; } } #pragma unroll for(int i = 1; i < p_Nq_e-1; ++i){ #pragma unroll for(int k = 1; k < p_Nq_e-1; ++k){ const int l1 = 2; const int l2 = 0; work1[i][p_Nq_e - l1 - 1][k] = work1[i][p_Nq_e - l1 - 1][k] + work1[i][p_Nq_e - l2 - 1][k]; } } #pragma unroll for(int i = 1; i < p_Nq_e-1; ++i){ #pragma unroll for(int j = 1; j < p_Nq_e-1; ++j){ const int l1 = 2; const int l2 = 0; work1[i][j][l1] = work1[i][j][l1] + work1[i][j][l2]; } } #pragma unroll for(int i = 1; i < p_Nq_e-1; ++i){ #pragma unroll for(int j = 1; j < p_Nq_e-1; ++j){ const int l1 = 2; const int l2 = 0; work1[i][j][p_Nq_e - l1 - 1] = work1[i][j][p_Nq_e - l1 - 1] + work1[i][j][p_Nq_e - l2 - 1]; } } #pragma unroll for(int k = 0; k < p_Nq; ++k){ #pragma unroll for(int j = 0; j < p_Nq; ++j){ #pragma unroll for(int i = 0; i < p_Nq; ++i){ const dlong elem_offset = elem * p_Nq * p_Nq * p_Nq; const dlong idx = i + j * p_Nq + k * p_Nq * p_Nq + elem_offset; Su[idx] = work1[k + 1][j + 1][i + 1] * wts[idx]; } } } } } extern "C" void FUNC(fusedFDM) ( const dlong& Nelements, const dlong& localNelements, const dlong* __restrict__ elementList, pfloat* __restrict__ Su, const pfloat* __restrict__ S_x, const pfloat* __restrict__ S_y, const pfloat* __restrict__ S_z, const pfloat* __restrict__ inv_L, #if p_restrict const dfloat* __restrict__ wts, #endif pfloat* __restrict__ u ) { #define getIdx(k,j,i,e) ((k)*p_Nq_e*p_Nq_e+(j)*p_Nq_e+(i)+(e)*p_Nq_e*p_Nq_e*p_Nq_e) #define work1(k,j,i,e) (u[(getIdx(k,j,i,e))]) pfloat S_x_e[p_Nq_e][p_Nq_e]; pfloat S_y_e[p_Nq_e][p_Nq_e]; pfloat S_z_e[p_Nq_e][p_Nq_e]; pfloat S_x_eT[p_Nq_e][p_Nq_e]; pfloat S_y_eT[p_Nq_e][p_Nq_e]; pfloat S_z_eT[p_Nq_e][p_Nq_e]; pfloat tmp[p_Nq_e][p_Nq_e][p_Nq_e]; pfloat work2[p_Nq_e][p_Nq_e][p_Nq_e]; for (dlong my_elem = 0; my_elem < Nelements; ++my_elem) { const dlong element = my_elem; const dlong elem = element; #pragma unroll for(int j = 1; j < p_Nq_e-1; ++j){ #pragma unroll for(int k = 1; k < p_Nq_e-1; ++k){ const int l1 = 0; const int l2 = 2; work1(l1,j,k,elem) = work1(l1,j,k,elem) - work1(l2,j,k,elem); } } #pragma unroll for(int j = 1; j < p_Nq_e-1; ++j){ #pragma unroll for(int k = 1; k < p_Nq_e-1; ++k){ const int l1 = 0; const int l2 = 2; work1(p_Nq_e - l1 - 1,j,k,elem) = work1(p_Nq_e - l1 - 1,j,k,elem) - work1(p_Nq_e - l2 - 1,j,k,elem); } } #pragma unroll for(int i = 1; i < p_Nq_e-1; ++i){ #pragma unroll for(int k = 1; k < p_Nq_e-1; ++k){ const int l1 = 0; const int l2 = 2; work1(i,l1,k,elem) = work1(i,l1,k,elem) - work1(i,l2,k,elem); } } #pragma unroll for(int i = 1; i < p_Nq_e-1; ++i){ #pragma unroll for(int k = 1; k < p_Nq_e-1; ++k){ const int l1 = 0; const int l2 = 2; work1(i,p_Nq_e - l1 - 1,k,elem) = work1(i,p_Nq_e - l1 - 1,k,elem) - work1(i,p_Nq_e - l2 - 1,k,elem); } } #pragma unroll for(int i = 1; i < p_Nq_e-1; ++i){ #pragma unroll for(int j = 1; j < p_Nq_e-1; ++j){ const int l1 = 0; const int l2 = 2; work1(i,j,l1,elem) = work1(i,j,l1,elem) - work1(i,j,l2,elem); } } #pragma unroll for(int i = 1; i < p_Nq_e-1; ++i){ #pragma unroll for(int j = 1; j < p_Nq_e-1; ++j){ const int l1 = 0; const int l2 = 2; work1(i,j,p_Nq_e - l1 - 1,elem) = work1(i,j,p_Nq_e - l1 - 1,elem) - work1(i,j,p_Nq_e - l2 - 1,elem); } } #pragma unroll for (int i = 0; i < p_Nq_e; i++){ #pragma unroll for (int j = 0; j < p_Nq_e; j++) { const int ij = j + i * p_Nq_e; S_x_e[i][j] = S_x[ij + element * p_Nq_e * p_Nq_e]; S_y_e[i][j] = S_y[ij + element * p_Nq_e * p_Nq_e]; S_z_e[i][j] = S_z[ij + element * p_Nq_e * p_Nq_e]; S_x_eT[j][i] = S_x_e[i][j]; S_y_eT[j][i] = S_y_e[i][j]; S_z_eT[j][i] = S_z_e[i][j]; } } #pragma unroll for (int k = 0; k < p_Nq_e; k++) { #pragma unroll for (int j = 0; j < p_Nq_e; j++) { #pragma unroll for (int i = 0; i < p_Nq_e; i++) { pfloat value = 0.0; #pragma unroll for (int l = 0; l < p_Nq_e; l++) value += S_x_e[l][j] * work1(k,i,l,elem); work2[k][i][j] = value; } } } #pragma unroll for (int k = 0; k < p_Nq_e; k++) { #pragma unroll for (int j = 0; j < p_Nq_e; j++) { #pragma unroll for (int i = 0; i < p_Nq_e; i++) { pfloat value = 0.0; #pragma unroll for (int l = 0; l < p_Nq_e; l++) value += S_y_e[l][j] * work2[k][l][i]; //work1(j,i,k,elem) = value; tmp[j][k][i] = value; } } } #pragma unroll for (int k = 0; k < p_Nq_e; k++) { #pragma unroll for (int j = 0; j < p_Nq_e; j++) { #pragma unroll for (int i = 0; i < p_Nq_e; i++) { const int v = i + j * p_Nq_e + k * p_Nq_e * p_Nq_e; pfloat value = 0.0; #pragma unroll for (int l = 0; l < p_Nq_e; l++) value += S_z_e[l][k] * tmp[j][l][i]; work2[k][i][j] = value * inv_L[v + element * p_Nq_e * p_Nq_e * p_Nq_e]; } } } #pragma unroll for (int k = 0; k < p_Nq_e; k++) { #pragma unroll for (int j = 0; j < p_Nq_e; j++) { #pragma unroll for (int i = 0; i < p_Nq_e; i++) { pfloat value = 0.0; #pragma unroll for (int l = 0; l < p_Nq_e; l++) value += S_x_eT[l][i] * work2[k][l][j]; tmp[k][j][i] = value; } } } #pragma unroll for (int k = 0; k < p_Nq_e; k++) { #pragma unroll for (int j = 0; j < p_Nq_e; j++) { #pragma unroll for (int i = 0; i < p_Nq_e; i++) { pfloat value = 0.0; #pragma unroll for (int l = 0; l < p_Nq_e; l++) value += S_y_eT[l][j] * tmp[k][l][i]; work2[j][k][i] = value; } } } #pragma unroll for (int k = 0; k < p_Nq_e; k++) { #pragma unroll for (int j = 0; j < p_Nq_e; j++) { #pragma unroll for (int i = 0; i < p_Nq_e; i++) { pfloat value = 0.0; #pragma unroll for (int l = 0; l < p_Nq_e; l++) value += S_z_eT[l][k] * work2[j][l][i]; #if (!p_restrict) const dlong elem_offset = element * p_Nq_e * p_Nq_e * p_Nq_e; const int v = i + j * p_Nq_e + k * p_Nq_e * p_Nq_e + elem_offset; Su[v] = value; #endif tmp[k][j][i] = value; } } } #if (!p_restrict) #pragma unroll for(int k = 1; k < p_Nq_e-1; ++k){ #pragma unroll for(int j = 1; j < p_Nq_e-1; ++j){ const int l1 = 0; const int l2 = 0; work2[l1][j][k] = tmp[l2][j][k]; work2[p_Nq_e - l1 - 1][j][k] = tmp[p_Nq_e - l2 - 1][j][k]; } } #pragma unroll for(int i = 1; i < p_Nq_e-1; ++i){ #pragma unroll for(int k = 1; k < p_Nq_e-1; ++k){ const int l1 = 0; const int l2 = 0; work2[i][l1][k] = tmp[i][l2][k]; } } #pragma unroll for(int i = 1; i < p_Nq_e-1; ++i){ #pragma unroll for(int k = 1; k < p_Nq_e-1; ++k){ const int l1 = 0; const int l2 = 0; work2[i][p_Nq_e - l1 - 1][k] = tmp[i][p_Nq_e - l2 - 1][k]; } } #pragma unroll for(int i = 1; i < p_Nq_e-1; ++i){ #pragma unroll for(int j = 1; j < p_Nq_e-1; ++j){ const int l1 = 0; const int l2 = 0; work2[i][j][l1] = tmp[i][j][l2]; } } #pragma unroll for(int i = 1; i < p_Nq_e-1; ++i){ #pragma unroll for(int j = 1; j < p_Nq_e-1; ++j){ const int l1 = 0; const int l2 = 0; work2[i][j][p_Nq_e - l1 - 1] = tmp[i][j][p_Nq_e - l2 - 1]; } } #pragma unroll for(int k = 0; k < p_Nq_e; ++k){ #pragma unroll for(int j = 0; j < p_Nq_e; ++j){ #pragma unroll for(int i = 0; i < p_Nq_e; ++i) { const dlong elem_offset = element * p_Nq_e * p_Nq_e * p_Nq_e; const dlong idx = i + j * p_Nq_e + k * p_Nq_e * p_Nq_e + elem_offset; u[idx] = work2[k][j][i]; } } } #else /* if (!p_restrict) */ #pragma unroll for(int k = 0; k < p_Nq; ++k){ #pragma unroll for(int j = 0; j < p_Nq; ++j){ #pragma unroll for(int i = 0; i < p_Nq; ++i){ const dlong elem_offset = element * p_Nq * p_Nq * p_Nq; const dlong idx = i + j * p_Nq + k * p_Nq * p_Nq + elem_offset; Su[idx] = tmp[k + 1][j + 1][i + 1] * wts[idx]; } } } #endif } #undef getIdx #undef work1 }
ofmo-twoint.c
/** * @file ofmo-twoint.c * @brief 2電子積分に関係する積分計算以外の雑多な処理を行う関数群を * 記述したファイル * * */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include "ofmo-def.h" //#define Free(p) if ((p)!=NULL) free( (p) ) #ifdef _OPENMP #include <omp.h> #else static int omp_get_thread_num() { return 0; } static int omp_get_max_threads() { return 1; } #endif // バッファ法関連のメモリ static int *LAST_IJCS = NULL; static int *LAST_KLCS = NULL; static int *LAST_ERI_TYPE = NULL; static size_t *EBUF_NON_ZERO_ERI = NULL; static double **EBUF_VAL = NULL; static short int **EBUF_IND4 = NULL; static size_t *EBUF_MAX_NZERI = NULL; // direct法で用いる積分一時保存領域 #define TMP_INTEG_SIZE 64 // MB static size_t *ETMP_MAX_NZERI = NULL; static double **ETMP_VAL = NULL; static short **ETMP_IND4 = NULL; static size_t *ETMP_NZERI = NULL; // スレッド並列時のFock行列計算に関わるメモリ static int *MAX_NAO = NULL; static double **G_THREADS = NULL; static double *DENS_SQUARE = NULL; static int BUF_NAO = 0; static float *DENS_CS = NULL; static int BUF_NCS = 0; static int TWOINT_NCS = -1; static int NTHREADS = 1; double _twoint_inv2_; double _twoint_inv3_; double _twoint_spi2_; static void ofmo_twoint_finalize() { int i; Free( LAST_IJCS ); Free( LAST_KLCS ); Free( LAST_ERI_TYPE ); Free( EBUF_NON_ZERO_ERI ); Free( DENS_SQUARE ); Free( EBUF_MAX_NZERI ); Free( MAX_NAO ); if ( EBUF_VAL != NULL ) { for ( i=0; i<NTHREADS; i++ ) Free( EBUF_VAL[i] ); Free( EBUF_VAL ); } if ( EBUF_IND4 != NULL ) { for ( i=0; i<NTHREADS; i++ ) Free( EBUF_IND4[i] ); Free( EBUF_IND4 ); } if ( G_THREADS != NULL ) { for ( i=0; i<NTHREADS; i++ ) Free( G_THREADS[i] ); Free( G_THREADS ); } MAX_NAO = NULL; // Free( ETMP_MAX_NZERI ); Free( ETMP_NZERI ); if ( ETMP_VAL != NULL ) { for ( i=0; i<NTHREADS; i++ ) Free( ETMP_VAL[i] ); Free( ETMP_VAL ); } if ( ETMP_IND4 != NULL ) { for ( i=0; i<NTHREADS; i++ ) Free( ETMP_IND4[i] ); Free( ETMP_IND4 ); } } /** 2電子積分関数の初期化関数 * @ingroup integ-misc * * buffered direct SCF法をMPI/OpenMP hybrid並列で実行するために必要な * 各種変数領域の確保と、その初期化を行う * * @attention スレッド並列ではない部分から呼び出す * * */ int ofmo_twoint_init() { static int called = false; int nthreads, i; if ( called ) return 0; nthreads = omp_get_max_threads(); LAST_IJCS = (int*)malloc( sizeof(int) * nthreads ); LAST_KLCS = (int*)malloc( sizeof(int) * nthreads ); LAST_ERI_TYPE = (int*)malloc( sizeof(int) * nthreads ); EBUF_NON_ZERO_ERI= (size_t*)malloc( sizeof(size_t) * nthreads ); EBUF_VAL = (double**)malloc( sizeof(double*) * nthreads ); EBUF_IND4 = (short int**)malloc( sizeof(short int*) * nthreads ); EBUF_MAX_NZERI = (size_t*)malloc( sizeof(size_t) * nthreads ); G_THREADS = (double**)malloc( sizeof(double*) * nthreads ); MAX_NAO = (int*)malloc( sizeof(int) * nthreads ); // for direct method ETMP_MAX_NZERI = (size_t*)malloc( sizeof(size_t) * nthreads ); ETMP_NZERI = (size_t*)malloc( sizeof(size_t) * nthreads ); ETMP_VAL = (double**)malloc( sizeof(double*) * nthreads ); ETMP_IND4 = (short**)malloc( sizeof(short*) * nthreads ); for ( i=0; i<nthreads; i++ ) { ETMP_VAL[i] = NULL; ETMP_IND4[i] = NULL; } #pragma omp parallel { int mythread; size_t max_nzeri; mythread = omp_get_thread_num(); max_nzeri = TMP_INTEG_SIZE*1024*1024/ ( sizeof(double)*1 + sizeof(short)*4 ); ETMP_VAL[mythread] = (double*)malloc(sizeof(double)*max_nzeri ); ETMP_IND4[mythread] = (short*)malloc( sizeof(short)*max_nzeri*4 ); ETMP_NZERI[mythread] = 0; ETMP_MAX_NZERI[mythread] = max_nzeri; } for ( i=0; i<nthreads; i++ ) LAST_IJCS[i] = LAST_KLCS[i] = -1; //for ( i=0; i<nthreads; i++ ) LAST_ERI_TYPE[i] = 1000000; for ( i=0; i<nthreads; i++ ) LAST_ERI_TYPE[i] = -1; for ( i=0; i<nthreads; i++ ) EBUF_NON_ZERO_ERI[i] = EBUF_MAX_NZERI[i] = MAX_NAO[i] = 0; for ( i=0; i<nthreads; i++ ) { EBUF_VAL[i] = NULL; EBUF_IND4[i] = NULL; G_THREADS[i] = NULL; } NTHREADS = nthreads; // added double pi2; pi2 = 2.e0 * atan(1.e0); _twoint_inv2_ = 1.e0 / 2.e0; _twoint_inv3_ = 1.e0 / 3.e0; _twoint_spi2_ = sqrt(pi2); atexit( ofmo_twoint_finalize ); called = true; return 0; } /** バッファサイズの設定 * * buffered direct SCFで計算した2電子積分を格納するための領域(バッファ) * を確保する関数。 * MPI/OpenMP hybrid並列での利用を考慮してある。 * * @attention hybrid並列時にはスレッド並列部分から呼び出す * * @param[in] mythread スレッド番号 * @param[in] ebuf_buffer_size_mb MB単位でのバッファサイズ * * @ingroup integ-misc * * */ size_t ofmo_twoint_set_buffer_size( const int mythread, const size_t ebuf_buffer_size_mb ) { size_t ebuf_max_nzeri; ebuf_max_nzeri = (size_t) (ebuf_buffer_size_mb * 1024 * 1024 / (double)( sizeof(double)+4*sizeof(short int) ) ); if ( ebuf_max_nzeri > EBUF_MAX_NZERI[mythread] ) { if ( EBUF_VAL[mythread] != NULL ) Free( EBUF_VAL[mythread] ); if ( EBUF_IND4[mythread] != NULL ) Free( EBUF_IND4[mythread] ); EBUF_VAL[mythread] = (double*)malloc( sizeof(double)*ebuf_max_nzeri ); EBUF_IND4[mythread] = (short int*)malloc( sizeof(short int)*ebuf_max_nzeri*4 ); EBUF_MAX_NZERI[mythread] = ebuf_max_nzeri; } return EBUF_MAX_NZERI[mythread]; } size_t ofmo_twoint_get_max_nzeri( const int mythread ) { return EBUF_MAX_NZERI[mythread]; } /** G行列計算で用いる一時領域を確保する関数 * * スレッド並列(hybrid並列を含む)でG行列(Fock行列の2電子積分部分)を * 生成する場合には、各スレッドは、独自の領域に部分G行列を計算した後、 * リダクション処理を行う、というステップを踏む。 * スレッド並列化した場合には、すべてのスレッドが共通のG行列に更新を * かける、という手段も使えるが、そうすると排他制御にコストが * かかってしまい、性能向上が得られない。 * したがって、G行列作成時にスレッド毎に部分G行列を作成して、 * 後でまとめる、という手法をとっている。 * * @attention スレッド並列部分で呼び出す必要がある * * @param[in] mythread スレッド番号 * @param[in] maxnao 確保したいG行列のAO数。考えられるAO数の最大値を * 最初に与えておくと、無駄なfree, mallocの呼び出しが減らせる。 * * @ingroup integ-misc * * */ double* ofmo_twoint_alloc_local_gmat( const int mythread, const int maxnao ) { int nnao; if ( maxnao > MAX_NAO[mythread] ) { nnao = maxnao * maxnao; if ( G_THREADS[mythread] != NULL ) Free( G_THREADS[mythread] ); G_THREADS[mythread] = (double*)malloc( sizeof(double) * nnao ); MAX_NAO[mythread] = maxnao; } return G_THREADS[mythread]; } /** 密度行列を正方行列で保存するためのバッファ確保 * * G行列作成時には対称行列である密度行列を参照するが、そのとき、圧縮形式 * のまま参照するよりも、正方行列として参照したほうがインデックスなどの * 計算量が少なくて済む。そのため、圧縮形式の密度行列を正方行列に * 展開するための領域が必要となる。 * この関数では、そのための領域を確保して、確保した並列のポインタを * 返す。 * * @attention スレッド並列領域外から呼ぶ * * @param[in] maxnao 確保したい密度行列のAO数。考えられるAO数の最大値を * 最初に与えておくと、無駄なfree, mallocの呼び出しが減らせる。 * * @ingroup integ-misc * * */ double* ofmo_twoint_alloc_square_density( const int maxnao ) { if ( maxnao > BUF_NAO ) { if ( DENS_SQUARE != NULL ) Free( DENS_SQUARE ); DENS_SQUARE = (double*)malloc( sizeof(double) * maxnao * maxnao ); BUF_NAO = maxnao; } return DENS_SQUARE; } /** バッファが一杯になって時点でのCSペア番号登録を行う関数 * * buffered direct SCF法では、バッファが一杯になった時点で計算していた * 2電子積分の2つのCSペア番号を記録する必要がある。 * この関数では、そのうち、外側のCSペア番号を登録する。 * スレッド並列を考慮してある。 * * @param[in] mythread スレッド番号 * @param[in] last_ijcs バッファが一杯になった時点での、外側CSペア番号 * * @ingroup integ-misc * * */ void ofmo_twoint_set_last_ijcs( const int mythread, const int last_ijcs ) { LAST_IJCS[mythread] = last_ijcs; } /** バッファが一杯になって時点でのCSペア番号登録を行う関数 * * buffered direct SCF法では、バッファが一杯になった時点で計算していた * 2電子積分の2つのCSペア番号を記録する必要がある。 * この関数では、そのうち、内側のCSペア番号を登録する。 * スレッド並列を考慮してある。 * * @param[in] mythread スレッド番号 * @param[in] last_ijcs バッファが一杯になった時点での、内側CSペア番号 * * @ingroup integ-misc * * */ void ofmo_twoint_set_last_klcs( const int mythread, const int last_klcs ) { LAST_KLCS[mythread] = last_klcs; } /** バッファが一杯になって時点で計算していた積分タイプを登録する関数 * * buffered direct SCF法では、バッファが一杯になった時点で計算していた * 2電子積分の積分タイプを記録する必要がある。 * この関数では、その登録処理を行う。 * * @param[in] mythread スレッド番号 * @param[in] last_eri_type バッファが一杯になった時点で計算していた * 2電子積分のタイプ * * @ingroup integ-misc * * */ void ofmo_twoint_set_last_eri_type( const int mythread, const int last_eri_type ) { LAST_ERI_TYPE[mythread] = last_eri_type; } /** バッファに保存した縮約2電子積分の数を登録する関数 * * buffered direct SCF法では、繰り返し計算のたびに行うG行列計算時に、 * バッファに保存している2電子積分と、再計算する2電子積分を用いる。 * その際に必要となるバッファに保存している2電子積分の個数を登録する * 関数である。 * * @param[in] mythread スレッド番号 * @param[in] ebuf_non_zero_eri * * @ingroup integ-misc * */ void ofmo_twoint_set_stored_nzeri( const int mythread, const size_t ebuf_non_zero_eri ) { EBUF_NON_ZERO_ERI[mythread] = ebuf_non_zero_eri; } /** バッファが一杯になって時点で計算していた積分タイプを得る関数 * * buffered direct SCF法では、バッファに保存されていない * 2電子積分の計算開始時に、バッファが一杯になった時点で計算していた * 2電子積分の積分タイプの情報が必要となる。 * この関数は、その積分タイプを返す関数である。 * * @param[in] mythread スレッド番号 * @return バッファが一杯になった時点で計算していた2電子積分の積分タイプ * * @ingroup integ-misc * * */ int ofmo_twoint_get_last_eri_type( const int mythread ) { return LAST_ERI_TYPE[mythread]; } /** バッファが一杯になって時点でのCSペア番号を得る関数 * * buffered direct SCF法では、バッファに保存されていない * 2電子積分の計算開始時に、バッファが一杯になった時点で計算していた * 2電子積分の2つのCSペア番号の情報が必要となる。 * この関数では、そのうち、外側のCSペア番号を得る関数である。 * スレッド並列を考慮してある。 * * @param[in] mythread スレッド番号 * @return バッファが一杯になった時点での外側CSペア番号 * * @ingroup integ-misc * * */ int ofmo_twoint_get_last_ijcs( const int mythread ) { return LAST_IJCS[mythread]; } /** バッファが一杯になって時点でのCSペア番号を得る関数 * * buffered direct SCF法では、バッファに保存されていない * 2電子積分の計算開始時に、バッファが一杯になった時点で計算していた * 2電子積分の2つのCSペア番号の情報が必要となる。 * この関数では、そのうち、内側のCSペア番号を得る関数である。 * スレッド並列を考慮してある。 * * @param[in] mythread スレッド番号 * @return バッファが一杯になった時点での内側CSペア番号 * * @ingroup integ-misc * * */ int ofmo_twoint_get_last_klcs( const int mythread ) { return LAST_KLCS[mythread]; } /** 計算した2電子積分を保存するバッファの先頭アドレスを返す関数 * * buffered direct SCF法で用いる計算した縮約2電子積分を保存する * バッファ領域の先頭アドレスを返す。 * * @param[in] mythread スレッド番号 * @return バッファ領域の先頭アドレス * * @ingroup integ-misc * * */ double* ofmo_twoint_get_ebuf_eri( const int mythread ) { return EBUF_VAL[mythread]; } /** バッファに保存される2電子積分に関係する4つのAO添字を保存する * 領域の先頭アドレスを返す関数 * * @param[in] mythread スレッド番号 * @return AO添字格納用の配列の先頭アドレス * * @ingroup integ-misc * */ short int* ofmo_twoint_get_ebuf_ind4( const int mythread ) { return EBUF_IND4[mythread]; } /** バッファに保存された縮約2電子積分の数を返す関数 * * @param[in] mythread スレッド番号 * @return バッファに保存された縮約2電子積分の数 * * @ingroup integ-misc * * */ size_t ofmo_twoint_get_stored_nzeri( const int mythread ) { return EBUF_NON_ZERO_ERI[mythread]; } /* direct法やIFC4Cで用いる積分の一時領域に関する関数 */ void ofmo_twoint_set_stored_integ( const int mythread, const size_t ninteg ) { ETMP_NZERI[mythread] = ninteg; } size_t ofmo_twoint_get_max_stored_integ( const int mythread ) { return ETMP_MAX_NZERI[mythread]; } size_t ofmo_twoint_get_stored_integ( const int mythread ) { return ETMP_NZERI[mythread]; } double* ofmo_twoint_getadd_integ_val( const int mythread ) { return ETMP_VAL[mythread]; } short* ofmo_twoint_getadd_integ_ind4( const int mythread ) { return ETMP_IND4[mythread]; } // -------------------------------- // largest last_eri_type among all workers static int g_LAST_ERI_TYPE = -1; void ofmo_twoint_set_global_last_eri_type( const int last_eri_type ) { g_LAST_ERI_TYPE = last_eri_type; } int ofmo_twoint_get_global_last_eri_type( void ) { return g_LAST_ERI_TYPE; } // -------------------------------- // Dcs float* ofmo_twoint_get_Dcs( void ) { return DENS_CS; } void ofmo_twoint_free_Dcs( void ) { Free( DENS_CS ); DENS_CS = NULL; BUF_NCS = 0; TWOINT_NCS = -1; } float* ofmo_twoint_alloc_Dcs( const int maxncs ) { if ( maxncs > BUF_NCS ) { ofmo_twoint_free_Dcs(); DENS_CS = (float*)malloc( sizeof(float) * maxncs * maxncs ); BUF_NCS = maxncs; } return DENS_CS; } #ifndef MIN2 #define MIN2(a, b) ((a)<(b))? (a): (b) #endif #ifndef MAX2 #define MAX2(a, b) ((a)>(b))? (a): (b) #endif float* ofmo_twoint_gen_Dcs( const int maxlqn, const int nao, const int leading_cs[], const double D[]) { int ncs = leading_cs[maxlqn+1]; int nL[] = {1, 3, 6, 10}; float *Dcs = NULL; #pragma omp barrier #pragma omp single Dcs = ofmo_twoint_alloc_Dcs(ncs); Dcs = DENS_CS; //#pragma omp parallel { int iao00 = 0; for (int La=0; La<=maxlqn; La++) { int inao = nL[La]; int ics0 = leading_cs[La]; int ics1 = leading_cs[La+1]; #pragma omp for schedule(guided) nowait for (int ics=ics0; ics<ics1; ics++) { int iao0=iao00+inao*(ics-ics0); int iao1=iao0+inao; int jao0 = 0; for (int Lb=0; Lb<=La; Lb++) { int jnao = nL[Lb]; int jcs0 = leading_cs[Lb]; int jcs1 = leading_cs[Lb+1]-1; jcs1 = MIN2(jcs1, ics); for (int jcs=jcs0; jcs<=jcs1; jcs++) { double dmax = 0.0e0; for (int iao=iao0; iao<iao1; iao++) { int jao1=MIN2(jao0+jnao-1, iao); int iao2 = iao*(iao+1)/2; for (int jao=jao0; jao<=jao1; jao++) { dmax = MAX2(dmax, fabs(D[iao2+jao])); } } Dcs[ics*ncs+jcs] = (float)dmax; Dcs[jcs*ncs+ics] = (float)dmax; jao0 += jnao; } } // iao0 += inao; } iao00 += inao*(ics1-ics0); } } #pragma omp single TWOINT_NCS = ncs; return Dcs; } float ofmo_twoint_dmax6(const int i, const int j, const int k, const int l) { int ncs = TWOINT_NCS; float *Dcs = DENS_CS; float dmax = 1.0; int i2 = i * ncs; int j2 = j * ncs; int k2 = k * ncs; dmax = MAX2(Dcs[i2+j], Dcs[k2+l]) * 4; dmax = MAX2(Dcs[i2+k], dmax); dmax = MAX2(Dcs[i2+l], dmax); dmax = MAX2(Dcs[j2+k], dmax); dmax = MAX2(Dcs[j2+l], dmax); return dmax; } float ofmo_twoint_dmax2(const int k, const int l) { int ncs = TWOINT_NCS; float *Dcs = DENS_CS; return 2 * Dcs[k*ncs+l]; } // -------------------------------- // EPS static float TWOINT_EPS_PS4 = 1e-20F; static float TWOINT_EPS_ERI = 1e-15F; static float TWOINT_EPS_SCH = 1e-12F; float ofmo_twoint_eps_eri(float eps) { if (eps>0) TWOINT_EPS_ERI = eps; return TWOINT_EPS_ERI; } float ofmo_twoint_eps_ps4(float eps) { if (eps>0) TWOINT_EPS_PS4 = eps; return TWOINT_EPS_PS4; } float ofmo_twoint_eps_sch(float eps) { if (eps>0) TWOINT_EPS_SCH = eps; return TWOINT_EPS_SCH; } // --------------------------------
pr1.c
//Write an OpenMP program which performs C=A+B & D=A-B in separate blocks/sections where A,B,C & D are arrays. #include <omp.h> #include <stdio.h> #include <stdlib.h> #define N 50 int main (int argc, char *argv[]) { //Required initializations int i, nthreads, tid; float a[N], b[N], c[N], d[N]; for (i=0; i<N; i++) { a[i] = i * 1.5; b[i] = i + 22.35; c[i] = d[i] = 0.0; } #pragma omp parallel shared (a,b,c,d,nthreads) private(i,tid) { tid = omp_get_thread_num(); if (tid == 0) { nthreads = omp_get_num_threads(); printf("Number of threads = %d\n", nthreads); } printf("Thread %d starting...\n",tid); #pragma omp sections nowait { #pragma omp section { printf("Thread %d doing section 1\n",tid); for (i=0; i<N; i++) { c[i] = a[i] + b[i]; printf("Thread %d: c[%d]= %f\n",tid,i,c[i]); } } #pragma omp section { printf("Thread %d doing section 2\n",tid); for (i=0; i<N; i++) { d[i] = a[i] * b[i]; printf("Thread %d: d[%d]= %f\n",tid,i,d[i]); } } } // end of sections printf("Thread %d done.\n",tid); } // end of parallel section }
lloyds_slow_par.c
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <time.h> #include <stdbool.h> #include <omp.h> #include "csvparser.h" void vector_init(double *a, int length) { for (int i = 0; i < length; i++) { a[i] = 0; } } void vector_copy(double *dst, double *src, int length) { for (int i = 0; i < length; i++) { dst[i] = src[i]; } } void vector_subtract(double *dst, double *a, double *b, int length) { for (int i = 0; i < length; i++) { dst[i] = a[i] - b[i]; } } void vector_add(double *dst, double *a, double *b, int length) { for (int i = 0; i < length; i++) { dst[i] = a[i] + b[i]; } } void vector_elementwise_avg(double *dst, double *a, int denominator, int length) { for (int i = 0; i < length; i++) { dst[i] = a[i] / denominator; } } double vector_L2_norm(double *a, int length) { double vec_length = 0; for (int i = 0; i < length; i++) { vec_length += a[i] * a[i]; } return sqrt(vec_length); } double vector_sum(double *a, int length) { double sum = 0; for (int i = 0; i < length; i ++) { sum += a[i]; } return sum; } void vector_square(double *dst, double *a, int length) { for (int i = 0; i < length; i++) { dst[i] = a[i] * a[i]; } } // Program should take K, a data set (.csv), a delimiter, // a binary flag data_contains_header, and a binary flag to drop labels int main(int argc, char *argv[]){ srand(111); CsvParser *reader; CsvRow *row; if(argc != 6){ printf("Incorrect number of args. Should be 5, received %d\n", argc - 1); exit(1); } int K = atoi(argv[1]); char *data_fp = argv[2]; char *delimiter = argv[3]; int has_header_row = atoi(argv[4]); int drop_labels = atoi(argv[5]); // Take in data set reader = CsvParser_new(data_fp, delimiter, has_header_row); // Get number of columns row = CsvParser_getRow(reader); int num_cols = CsvParser_getNumFields(row); CsvParser_destroy_row(row); if (drop_labels){ num_cols--; } // Get number of rows like lazy people int num_rows = 1; while ((row = CsvParser_getRow(reader))){ num_rows++; CsvParser_destroy_row(row); } // Torch the CsvParser and start again so we can read data in. CsvParser_destroy(reader); reader = CsvParser_new(data_fp, delimiter, has_header_row); double **data_matrix = malloc(num_rows * sizeof(double *)); for (int i = 0; i < num_rows; i++) { data_matrix[i] = malloc(num_cols * sizeof(double)); } int row_index = 0; while ((row = CsvParser_getRow(reader))){ const char **row_fields = CsvParser_getFields(row); for (int col_index = 0; col_index < num_cols; col_index++) { data_matrix[row_index][col_index] = atof(row_fields[col_index]); } CsvParser_destroy_row(row); row_index++; } CsvParser_destroy(reader); // Initialize some cluster centers from random rows in our data // Given the fact that we will usually have way more rows than centers, we can // probably just roll a number and reroll if we already rolled it. Collisions // should be relatively infrequent bool collided; double centers[K][num_cols]; for (int i = 0; i < K; i++) { int center_indices[K]; collided = true; while (collided) { center_indices[i] = rand() % num_rows; collided = false; for (int j = 0; j < i; j++) { if (center_indices[j] == center_indices[i]) { collided = true; break; } } vector_copy(centers[i], data_matrix[center_indices[i]], num_cols); } } printf("Initial cluster centers:\n"); for (int i = 0; i < K; i++) { for (int j = 0; j < num_cols; j++) { printf("%f ", centers[i][j]); } printf("\n"); } int num_iterations = 0; int *clusterings = calloc(num_rows, sizeof(int)); double *cluster_avg = malloc(num_rows * sizeof(double)); bool changes; double tstart = omp_get_wtime(); while (1) { changes = false; omp_set_num_threads(32); // Assign points to cluster centers int center, observation, arg_min; double min_norm, local_norm, diff[num_cols]; #pragma omp parallel for \ private(center, observation, min_norm, local_norm, diff, arg_min) \ shared(num_rows, K, data_matrix, centers) for (observation = 0; observation < num_rows; observation++) { arg_min = 0; min_norm = -1; for (center = 0; center < K; center++) { vector_subtract(diff, data_matrix[observation], centers[center], num_cols); local_norm = vector_L2_norm(diff, num_cols); if ((min_norm == -1) || (local_norm < min_norm)) { arg_min = center; min_norm = local_norm; } } if (clusterings[observation] != arg_min) { changes = true; } clusterings[observation] = arg_min; } // break out of loop if total within-cluster sum of squares has converged if (!changes) { break; } num_iterations++; // Find cluster means and reassign centers int cluster_index, element, elements_in_cluster; #pragma omp parallel for \ private(cluster_index, element, elements_in_cluster) \ shared(num_rows, clusterings, data_matrix, K) for (cluster_index = 0; cluster_index < K; cluster_index++) { elements_in_cluster = 0; vector_init(cluster_avg, num_rows); for (element = 0; element < num_rows; element++) { if (clusterings[element] == cluster_index) { vector_add(cluster_avg, cluster_avg, data_matrix[element], num_cols); elements_in_cluster++; } } vector_elementwise_avg(cluster_avg, cluster_avg, elements_in_cluster, num_cols); vector_copy(centers[cluster_index], cluster_avg, num_cols); } } double tend = omp_get_wtime() - tstart; printf("\nFinal cluster centers:\n"); for (int i = 0; i < K; i++) { for (int j = 0; j < num_cols; j++) { printf("%f ", centers[i][j]); } printf("\n"); } printf("\nNum iterations: %d\n", num_iterations); printf("Time taken %f seconds\n", tend); for (int i = 0; i < num_rows; i++) { free(data_matrix[i]); } free(data_matrix); free(clusterings); free(cluster_avg); exit(0); }
TekNote.c
/* * Nama Program : TekNote - Proyek Akhir Semester Kelompok 7 * Tanggal : 24 Juni 2021 * Nomor Grup : 7 * Nama Anggota : * Muhammad Irsyad Fakhruddin - 2006468850 (tidak berkontribusi) * Muhammad Roland Maulana - 2006520784 * Yehezkiel Jonatan - 2006520235 */ #include <stdio.h> #include <stdlib.h> #include <conio.h> #include <windows.h> #include <string.h> #include <ctype.h> #include <time.h> #include <omp.h> #define TRUE 1 #define FALSE 0 #define INIT_CONSOLE() HANDLE hConsole = GetStdHandle(STD_OUTPUT_HANDLE);\ CONSOLE_SCREEN_BUFFER_INFO consoleInfo;\ WORD saved_attributes;\ GetConsoleScreenBufferInfo(hConsole, &consoleInfo);\ saved_attributes = consoleInfo.wAttributes #define SET_COLOR(x) SetConsoleTextAttribute(hConsole, (x)) #define RESET_COLOR() SetConsoleTextAttribute(hConsole, saved_attributes) //struct untuk data yang berbentuk linked list typedef struct Node { char judul[50], deskripsi[1000], matkul[30]; int tanggal, bulan, tahun; // waktu deadline int progress; // dalam persen (0-100) struct Node *next; } Node; typedef Node* NodePtr; //function prototype void displayMenuAndTitle(char *title_string[], int title_size, char *menu_array[], int menu_count, int *menu_selected); void displayTitle(char *title_string[], int title_size); void displayMenu(char *menu_array[], int menu_count, int menu_selected); void displayMenuExit(void); int getInteger(int lower_bound, int upper_bound, char *message); char *getString(char *string, int size, char *message); char *lowercase(char *string); void findWords(char *string, char **words, int *word_count); void printNote(NodePtr notes, int start, int length, char *message, int selected); void printNoteDescription(NodePtr notes, int index); void deleteNote(NodePtr *notes, int index); void selectNote(NodePtr notes, int note_count, int note_selected); void displayNoteSelected(NodePtr notes, int *note_selected); int daysToDeadline(NodePtr notes); int deadlineCompare(Node note1, Node note2); void searchJudul(NodePtr notes, char *key); void searchMatkul(NodePtr notes, char *key); void sortNote(Node *destination, Node *source, int i_begin, int i_end, int mode); void mergeProgressAscending(Node *destination, Node *source, int i_begin, int i_middle, int i_end); void mergeProgressDescending(Node *destination, Node *source, int i_begin, int i_middle, int i_end); void mergeDeadlineAscending(Node *destination, Node *source, int i_begin, int i_middle, int i_end); void mergeDeadlineDescending(Node *destination, Node *source, int i_begin, int i_middle, int i_end); int emptyNoteError(NodePtr notes); void menuViewNote(NodePtr notes); void menuAddNote(NodePtr *notes); void menuDeleteNote(NodePtr *notes); void menuEditNote(NodePtr *notes); void menuSearchNote(NodePtr notes); void menuSortNote(NodePtr *notes); void menuImportNote(NodePtr *notes); void menuExportNote(NodePtr notes); int main(void) { NodePtr notes = NULL, current_ptr; int menu_selected = 0, note_is_saved = TRUE; omp_set_num_threads(1); char *title[] = { //tampilan menu "***********************************************************\n", "* *\n", "* TekNote *\n", "* Proyek Akhir Semester Kelompok 7 *\n", "* *\n", "***********************************************************\n" }; char *menu[] = { //pilihan menu "Keluar dari program", "Melihat note", "Menambahkan note", "Menghapus note", "Mengedit note", "Mencari note", "Mengurutkan note", "Mengimpor note", "Mengekspor note" }; // Program akan terus berjalan sampai user memilih opsi exit while (TRUE) { displayMenuAndTitle(title, 6, menu, 9, &menu_selected); switch (menu_selected) { case 0: // exit program if (note_is_saved == FALSE) { printf("Note belum tersimpan, tetap keluar program? (tekan y untuk konfirmasi)"); if ((getch() | 32) != 'y') break; } while (notes != NULL) { current_ptr = notes->next; free(notes); notes = current_ptr; } return 0; case 1://pilihan 1:melihat notes menuViewNote(notes); break; case 2://pilihan 2:menambahkan note menuAddNote(&notes); note_is_saved = FALSE; break; case 3://pilihan 3:menghapus note menuDeleteNote(&notes); note_is_saved = FALSE; break; case 4://pilihan 4:mengedit notes menuEditNote(&notes); note_is_saved = FALSE; break; case 5://pilihan 5:mencari notes menuSearchNote(notes); break; case 6://pilihan 6:menyortir notes menuSortNote(&notes); break; case 7://pilihan 7:mengimport notes menuImportNote(&notes); break; case 8://pilihan 8:mengeksport notes menuExportNote(notes); note_is_saved = TRUE; break; } } } void displayMenuAndTitle( //menampilkan menu dan judul char *title_string[], int title_size, char *menu_array[], int menu_count, int *menu_selected ) { char char_input; do { system("cls"); displayTitle(title_string, title_size); displayMenu(menu_array, menu_count, *menu_selected); printf("Gunakan W dan S untuk navigasi.\n"); printf("Tekan Enter untuk memilih menu. "); // Mengatur perpindahan menu yang dipilih char_input = getch(); if (char_input == 'w' || char_input == 'W') { if (*menu_selected <= 0) *menu_selected = menu_count - 1; else (*menu_selected)--; } else if (char_input == 's' || char_input == 'S') { if (*menu_selected >= menu_count - 1) *menu_selected = 0; else (*menu_selected)++; } } while (char_input != '\r'); system("cls"); } void displayTitle(char *title_string[], int title_size) { //menampilkan display untuk judul int i; INIT_CONSOLE(); SET_COLOR(BACKGROUND_BLUE | BACKGROUND_INTENSITY); for (i = 0; i < title_size; i++) printf(title_string[i]); RESET_COLOR(); } void displayMenu(char *menu_array[], int menu_count, int menu_selected) { //menampilkan display untuk menu int i; INIT_CONSOLE(); printf("\n-----------------------------------------------------------\n\n"); for (i = 0; i < menu_count; i++) { if (menu_selected == i) { SET_COLOR(FOREGROUND_BLUE | FOREGROUND_INTENSITY); printf("--> %s\n", menu_array[i]); RESET_COLOR(); } else printf(" %s\n", menu_array[i]); } printf("\n-----------------------------------------------------------\n\n"); } void displayMenuExit(void) { //menampilkan display exit menu printf("\n\npress any key to continue..."); fflush(stdin); getch(); system("cls"); } int getInteger(int lower_bound, int upper_bound, char *message) { //sebagai penerima input integer int integer, scanf_return; // Terus meminta integer sampai input user valid do { printf( "%s ( %d sampai dengan %d ): ", message, lower_bound, upper_bound ); // seharusnya scanf me-return nilai 1 jika input valid scanf_return = scanf("%d", &integer); fflush(stdin); // membersihkan input buffer if (scanf_return != 1) { printf("Input invalid!\n\n"); } else if (integer < lower_bound) { printf("Angka terlalu kecil!\n\n"); } else if (integer > upper_bound) { printf("Angka terlalu besar!\n\n"); } } while (scanf_return != 1 || integer < lower_bound || integer > upper_bound); return integer; } char *getString(char *string, int size, char *message) { //sebagai penerima input string int i, length; do { printf("%s: ", message); fgets(string, size, stdin); // memasukkan input ke string sscanf(string, "%[^\n]", string); // memformat string fflush(stdin); // membersihkan input buffer // memastikan string tidak kosong length = strlen(string); if (length <= 1 && string[0] == '\n') { printf("ERROR: input tidak boleh kosong!\n\n"); i = !length; continue; } // memastikan string tidak ada tanda koma for (i = 0; i < length; i++) { if (string[i] == ',') { printf("ERROR: input tidak boleh ada tanda koma (,)!\n\n"); break; } } } while (i != length); return string; } char *lowercase(char *string) { //sebagai pengonversi huruf besar menjadi kecil untuk menghindari bug program int i; for (i = 0; string[i] != '\0'; i++) string[i] = tolower(string[i]); return string; } void findWords(char *string, char **words, int *word_count) { // Mencari kata-kata pada string // words adalah pointer ke kata-kata yang ada pada string // word_count merupakan jumlah kata yang ada di words int i, j = 0, length = strlen(string), word_begin, mode = 0; *word_count = 0; for (i = 0; i < length + 1; i++) { // mode 0: mencari index pertama kata (word_begin) if (mode == 0 && string[i] != '\0' && string[i] != ' ') { word_begin = i; mode = 1; // mode 1: mencari akhir kata } else if (string[i] == '\0' || string[i] == ' ') { string[i] = '\0'; words[j] = &string[word_begin]; (*word_count)++; j++; mode = 0; } } } int daysToDeadline(NodePtr notes) { // menyimpan waktu sekarang untuk acuan waktu deadline time_t rawtime; struct tm *timeinfo; time(&rawtime); timeinfo = localtime(&rawtime); // menghitung selisih hari menuju deadline int selisih_tahun = timeinfo->tm_year + 1900 - notes->tahun; int selisih_bulan = timeinfo->tm_mon + 1 - notes->bulan; int selisih_tanggal = timeinfo->tm_mday - notes->tanggal; return (selisih_tahun * 365) + (selisih_bulan * 30) + selisih_tanggal; } int deadlineCompare(Node note1, Node note2) { // menghitung selisih hari antara deadline note1 dan deadline note2 untuk sorting int selisih_tahun = note1.tahun - note2.tahun; int selisih_bulan = note1.bulan - note2.bulan; int selisih_tanggal = note1.tanggal - note2.tanggal; return (selisih_tahun * 365) + (selisih_bulan * 30) + selisih_tanggal; } void printNote(NodePtr notes, int start, int length, char *message, int selected) { // menampilkan judul, matkul, deadline, dan progress // start adalah index pertama note yang ditampilkan // length adalah banyaknya notes yang ditampilkan int i; INIT_CONSOLE(); // Jika tidak ada message, maka bagian title dan header tidak akan ditampilkan if (strlen(message) != 0) { printf("\n%90s\n\n", message); printf("%50s" , "JUDUL"); printf("|%30s", "MATA KULIAH"); printf("|%14s", "DEADLINE"); printf("|%15s", "PROGRESS"); printf("|%7s" , "INDEX"); printf("\n"); } for (i = 1; i < start; i++) { notes = notes->next; } for (i = start; i < start + length; i++) { // menampilkan note berwarna sesuai urgency deadline dan progress if (i == selected) // note dipilih SET_COLOR(FOREGROUND_BLUE | FOREGROUND_INTENSITY); else if (notes->progress >= 100) // note sudah selesai SET_COLOR(FOREGROUND_GREEN | FOREGROUND_INTENSITY); else if (daysToDeadline(notes) >= -1) // deadline besok SET_COLOR(FOREGROUND_RED); else if (daysToDeadline(notes) >= -7) // deadline minggu depan SET_COLOR(FOREGROUND_RED | FOREGROUND_GREEN); printf("%50s" , notes->judul); printf("|%30s" , notes->matkul); printf("| %02d", notes->tanggal); printf("-%02d" , notes->bulan); printf("-%04d" , notes->tahun); printf("|%14d%%" , notes->progress); printf("|%7d" , i); printf("\n"); RESET_COLOR(); notes = notes->next; } } void selectNote(NodePtr notes, int note_count, int note_selected) { //memilih dan menampilkan detail notes INIT_CONSOLE(); printNote(notes, 1, note_count, "--- ALL NOTES ---", note_selected); if (note_selected == 0) SET_COLOR(FOREGROUND_BLUE | FOREGROUND_INTENSITY); printf("\nKeluar dari menu\n"); RESET_COLOR(); } void printNoteDescription(NodePtr notes, int index) { // Menampilkan suatu note dengan deskripsinya int i; INIT_CONSOLE(); // menampilkan deskripsi note berwarna sesuai urgency deadline dan progress if (notes->progress >= 100) // note sudah selesai SET_COLOR(FOREGROUND_GREEN | FOREGROUND_INTENSITY); else if (daysToDeadline(notes) >= -1) // deadline besok SET_COLOR(FOREGROUND_RED); else if (daysToDeadline(notes) >= -7) // deadline minggu depan SET_COLOR(FOREGROUND_RED | FOREGROUND_GREEN); for (i = 1; i < index; i++) { notes = notes->next; } printf("\n"); printf("JUDUL: %s\n" , notes->judul); printf("MATA KULIAH: %s\n", notes->matkul); printf( "DEADLINE: %02d-%02d-%04d\n", notes->tanggal, notes->bulan, notes->tahun ); printf("PROGRESS: %d%%\n", notes->progress); printf("DESKRIPSI: %s\n" , notes->deskripsi); RESET_COLOR(); } void deleteNote(NodePtr *notes, int index) { //menghapus note int i; NodePtr temp_ptr, previous_ptr, current_ptr; if (index == 1) { temp_ptr = *notes; *notes = (*notes)->next; free(temp_ptr); return; } current_ptr = *notes; for (i = 1; i < index; i++) { previous_ptr = current_ptr; current_ptr = current_ptr->next; } temp_ptr = current_ptr; previous_ptr->next = current_ptr->next; free(temp_ptr); } void searchJudul(NodePtr notes, char *key) { //mencari note berdasar judul int i, j, k, key_count = 0, judul_count = 0, found = FALSE, match; char *key_words[25], *judul_words[25], judul[50]; lowercase(key); findWords(key, key_words, &key_count); // linar search for (i = 1; notes != NULL; i++) { match = TRUE; strcpy(judul, notes->judul); lowercase(judul); findWords(judul, judul_words, &judul_count); for (j = 0; j < key_count; j++) { for (k = 0; k < judul_count; k++) { if (strcmp(key_words[j], judul_words[k]) == 0) { break; } } if (k == judul_count) { match = FALSE; break; } } if (match == TRUE && found == FALSE) { found = TRUE; printNote(notes, i, 1, "--- FOUND ---", -1); } else if (match == TRUE) { printNote(notes, i, 1, "", -1); } notes = notes->next; } if (found == FALSE) printf("NOT FOUND\n"); } void searchMatkul(NodePtr notes, char *key) { //mencari note berdasar matkul int i, j, k, key_count = 0, matkul_count = 0, found = FALSE, match; char *key_words[25], *matkul_words[25], matkul[50]; lowercase(key); findWords(key, key_words, &key_count); // linar search for (i = 1; notes != NULL; i++) { match = TRUE; strcpy(matkul, notes->matkul); lowercase(matkul); findWords(matkul, matkul_words, &matkul_count); for (j = 0; j < key_count; j++) { for (k = 0; k < matkul_count; k++) { if (strcmp(key_words[j], matkul_words[k]) == 0) { break; } } if (k == matkul_count) { match = FALSE; break; } } if (match == TRUE && found == FALSE) { found = TRUE; printNote(notes, i, 1, "--- FOUND ---", -1); } else if (match == TRUE) { printNote(notes, i, 1, "", -1); } notes = notes->next; } if (found == FALSE) printf("NOT FOUND\n"); } void sortNote(Node *destination, Node *source, int i_begin, int i_end, int mode) { //mensort notes if (i_end - i_begin <= 1) return; int i_middle = (i_begin + i_end) / 2; #pragma omp task sortNote(source, destination, i_begin, i_middle, mode); #pragma omp task sortNote(source, destination, i_middle, i_end, mode); #pragma omp taskwait switch (mode) { case 0: mergeDeadlineAscending(destination, source, i_begin, i_middle, i_end); break; case 1: mergeDeadlineDescending(destination, source, i_begin, i_middle, i_end); break; case 2: mergeProgressAscending(destination, source, i_begin, i_middle, i_end); break; case 3: mergeProgressDescending(destination, source, i_begin, i_middle, i_end); break; } } void mergeProgressAscending(Node *destination, Node *source, int i_begin, int i_middle, int i_end) { //mensort progress secara ascending int i = i_begin, j = i_middle, k; #pragma omp parallel for reduction(+:i) reduction(+:j) for (k = i_begin; k < i_end; k++) { if (i < i_middle && (j >= i_end || source[i].progress <= source[j].progress)) { destination[k] = source[i]; #pragma omp critical i++; } else { destination[k] = source[j]; #pragma omp critical j++; } } } void mergeProgressDescending(Node *destination, Node *source, int i_begin, int i_middle, int i_end) { //mensort progress secara descending int i = i_begin, j = i_middle, k; #pragma omp parallel for reduction(+:i) reduction(+:j) for (k = i_begin; k < i_end; k++) { if (i < i_middle && (j >= i_end || source[i].progress >= source[j].progress)) { destination[k] = source[i]; #pragma omp critical i++; } else { destination[k] = source[j]; #pragma omp critical j++; } } } void mergeDeadlineAscending(Node *destination, Node *source, int i_begin, int i_middle, int i_end) { //mensort deadline secara ascending int i = i_begin, j = i_middle, k; #pragma omp parallel for reduction(+:i) reduction(+:j) for (k = i_begin; k < i_end; k++) { if (i < i_middle && (j >= i_end || deadlineCompare(source[i], source[j]) <= 0)) { destination[k] = source[i]; #pragma omp critical i++; } else { destination[k] = source[j]; #pragma omp critical j++; } } } void mergeDeadlineDescending(Node *destination, Node *source, int i_begin, int i_middle, int i_end) { //mensort deadline secara descending int i = i_begin, j = i_middle, k; #pragma omp parallel for reduction(+:i) reduction(+:j) for (k = i_begin; k < i_end; k++) { if (i < i_middle && (j >= i_end || deadlineCompare(source[i], source[j]) >= 0)) { destination[k] = source[i]; #pragma omp critical i++; } else { destination[k] = source[j]; #pragma omp critical j++; } } } void displayNoteSelected(NodePtr notes, int *note_selected) { //menampilkan detail note yang dipilih NodePtr temp = notes; int note_count = 0; char char_input; while (temp != NULL) { note_count++; temp = temp->next; } do { system("cls"); selectNote(notes, note_count, *note_selected); char_input = getch(); if (char_input == 'w' || char_input == 'W') { if (*note_selected <= 0) *note_selected = note_count; else (*note_selected)--; } else if (char_input == 's' || char_input == 'S') { if (*note_selected >= note_count) *note_selected = 0; else (*note_selected)++; } } while (char_input != '\r'); system("cls"); } int emptyNoteError(NodePtr notes) { //menghindari error jika note kosong if (notes == NULL) { printf("ERROR: note kosong!\n"); displayMenuExit(); return 1; } return 0; } void menuViewNote(NodePtr notes) { //menampilkan detail note if (emptyNoteError(notes)) return; int note_selected = 0; while (TRUE) { displayNoteSelected(notes, &note_selected); if (note_selected <= 0) { return; } else { printNoteDescription(notes, note_selected); displayMenuExit(); } } } void menuAddNote(NodePtr *notes) { //menambahkan note char *title[] = { "***********************************************************\n", "* *\n", "* ADD MENU *\n", "* *\n", "***********************************************************\n" }; displayTitle(title, 5); NodePtr new_ptr = malloc(sizeof(Node)), current_ptr = *notes, previous_ptr; // Meminta user memasukkan data-data untuk satu note printf("\n-----------------------------------------------------------\n\n"); getString(new_ptr->judul, 50, "Masukkan judul"); getString(new_ptr->deskripsi, 1000, "Masukkan deskripsi"); getString(new_ptr->matkul, 30, "Masukkan mata kuliah"); new_ptr->tahun = getInteger(0, 9999, "Masukkan tahun deadline"); new_ptr->bulan = getInteger(1, 12, "Masukkan bulan deadline"); new_ptr->tanggal = getInteger(1, 31, "Masukkan tanggal deadline"); new_ptr->progress = getInteger(0, 100, "Masukkan persentase progress"); new_ptr->next = NULL; printf("\n-----------------------------------------------------------\n\n"); if (*notes == NULL) { *notes = new_ptr; return; } while (current_ptr != NULL) { previous_ptr = current_ptr; current_ptr = current_ptr->next; } previous_ptr->next = new_ptr; displayMenuExit(); } void menuDeleteNote(NodePtr *notes) { //menghapus note if (emptyNoteError(*notes)) return; int note_selected; while (TRUE) { note_selected = 0; displayNoteSelected(*notes, &note_selected); if (note_selected <= 0) { return; } else { printNote(*notes, note_selected, 1, "--- YANG AKAN DIHAPUS ---", -1); printf("\n\nApakah anda yakin untuk menghapus note tersebut? (tekan y untuk konfirmasi)"); if ((getch() | 32) == 'y') { deleteNote(notes, note_selected); system("cls"); printf("Note telah dihapus.\n"); displayMenuExit(); } } } } void menuEditNote(NodePtr *notes) { //mengedit note if (emptyNoteError(*notes)) return; int note_selected = 0; while (TRUE) { displayNoteSelected(*notes, &note_selected); if (note_selected <= 0) { return; } else { int i; NodePtr current_ptr = *notes; for (i = 1; i < note_selected; i++) { current_ptr = current_ptr->next; } // Meminta user memasukkan data-data untuk satu note printf("\n-----------------------------------------------------------\n\n"); getString(current_ptr->judul, 50, "Masukkan judul"); getString(current_ptr->deskripsi, 1000, "Masukkan deskripsi"); getString(current_ptr->matkul, 30, "Masukkan mata kuliah"); current_ptr->tahun = getInteger(0, 9999, "Masukkan tahun deadline"); current_ptr->bulan = getInteger(1, 12, "Masukkan bulan deadline"); current_ptr->tanggal = getInteger(1, 31, "Masukkan tanggal deadline"); current_ptr->progress = getInteger(0, 100, "Masukkan persentase progress"); printf("\n-----------------------------------------------------------\n\n"); displayMenuExit(); } } } void menuSearchNote(NodePtr notes) { //mencari note if (emptyNoteError(notes)) return; char *title[] = { "***********************************************************\n", "* *\n", "* SEARCH MENU *\n", "* *\n", "***********************************************************\n" }; char *menu[] = { "Keluar dari menu", "Mencari judul", "Mencari mata kuliah" }; int menu_selected = 0; while (TRUE) { char key[50]; // string yang dicari displayMenuAndTitle(title, 5, menu, 3, &menu_selected); switch (menu_selected) { case 0: return; case 1: getString(key, 50, "Masukkan judul yang ingin dicari"); searchJudul(notes, key); displayMenuExit(); break; case 2: getString(key, 30, "Masukkan mata kuliah yang ingin dicari"); searchMatkul(notes, key); displayMenuExit(); break; } } } void menuSortNote(NodePtr *notes) { //mensort notes if (emptyNoteError(*notes)) return; int i, note_count = 0, menu_selected = 0; NodePtr temp; char *title[] = { "***********************************************************\n", "* *\n", "* SORT MENU *\n", "* *\n", "***********************************************************\n" }; char *menu[] = { "Keluar dari menu", "Mengurutkan berdasarkan deadline (ascending)", "Mengurutkan berdasarkan deadline (descending)", "Mengurutkan berdasarkan progress (ascending)", "Mengurutkan berdasarkan progress (descending)" }; temp = *notes; while (temp != NULL) { note_count++; temp = temp->next; } // Array untuk merge sort Node *note_array = malloc(sizeof(Node) * note_count), *sort_array = malloc(sizeof(Node) * note_count); temp = *notes; for (i = 1; i <= note_count; i++) { strcpy(note_array[i].judul, temp->judul); strcpy(note_array[i].deskripsi, temp->deskripsi); strcpy(note_array[i].matkul, temp->matkul); note_array[i].tahun = temp->tahun; note_array[i].bulan = temp->bulan; note_array[i].tanggal = temp->tanggal; note_array[i].progress = temp->progress; temp = temp->next; } while (TRUE) { displayMenuAndTitle(title, 5, menu, 5, &menu_selected); for(i = 1; i <= note_count; i++) { sort_array[i] = note_array[i]; } switch (menu_selected) { case 0: free(note_array); free(sort_array); return; case 1: #pragma omp parallel { #pragma omp single sortNote(note_array, sort_array, 1, note_count + 1, 0); temp = *notes; for (i = 1; i <= note_count; i++) { strcpy(temp->judul, note_array[i].judul); strcpy(temp->deskripsi, note_array[i].deskripsi); strcpy(temp->matkul, note_array[i].matkul); temp->tahun = note_array[i].tahun; temp->bulan = note_array[i].bulan; temp->tanggal = note_array[i].tanggal; temp->progress = note_array[i].progress; temp = temp->next; } #pragma omp barrier printNote(*notes, 1, note_count, "--- DEADLINE ASCENDING ---", -1); displayMenuExit(); } break; case 2: #pragma omp parallel { #pragma omp single sortNote(note_array, sort_array, 1, note_count + 1, 1); temp = *notes; for (i = 1; i <= note_count; i++) { strcpy(temp->judul, note_array[i].judul); strcpy(temp->deskripsi, note_array[i].deskripsi); strcpy(temp->matkul, note_array[i].matkul); temp->tahun = note_array[i].tahun; temp->bulan = note_array[i].bulan; temp->tanggal = note_array[i].tanggal; temp->progress = note_array[i].progress; temp = temp->next; } #pragma omp barrier printNote(*notes, 1, note_count, "--- DEADLINE DESCENDING ---", -1); displayMenuExit(); } break; case 3: #pragma omp parallel { #pragma omp single sortNote(note_array, sort_array, 1, note_count + 1, 2); temp = *notes; for (i = 1; i <= note_count; i++) { strcpy(temp->judul, note_array[i].judul); strcpy(temp->deskripsi, note_array[i].deskripsi); strcpy(temp->matkul, note_array[i].matkul); temp->tahun = note_array[i].tahun; temp->bulan = note_array[i].bulan; temp->tanggal = note_array[i].tanggal; temp->progress = note_array[i].progress; temp = temp->next; } #pragma omp barrier printNote(*notes, 1, note_count, "--- PROGRESS ASCENDING ---", -1); displayMenuExit(); } break; case 4: #pragma omp parallel { #pragma omp single sortNote(note_array, sort_array, 1, note_count + 1, 3); temp = *notes; for (i = 1; i <= note_count; i++) { strcpy(temp->judul, note_array[i].judul); strcpy(temp->deskripsi, note_array[i].deskripsi); strcpy(temp->matkul, note_array[i].matkul); temp->tahun = note_array[i].tahun; temp->bulan = note_array[i].bulan; temp->tanggal = note_array[i].tanggal; temp->progress = note_array[i].progress; temp = temp->next; } #pragma omp barrier printNote(*notes, 1, note_count, "--- PROGRESS DESCENDING ---", -1); displayMenuExit(); } break; } } } void menuImportNote(NodePtr *notes) { //mengimpor notes char file_name[64]; char *title[] = { "***********************************************************\n", "* *\n", "* IMPORT MENU *\n", "* *\n", "***********************************************************\n" }; NodePtr new_ptr, current_ptr; displayTitle(title, 5); printf("\n-----------------------------------------------------------\n\n"); getString(file_name, 64, "Masukkan nama file (tanpa ekstensi)"); strcat(file_name, ".teknote"); FILE *fptr = fopen(file_name, "r"); if (fptr == NULL) { printf("ERROR: tidak dapat membuka file!\n"); displayMenuExit(); return; } int i; char buff[2048]; if (fgets(buff, 2048, fptr) == NULL) { printf("ERROR: file kosong!\n"); displayMenuExit(); return; } if (strcmp(buff, "JUDUL,DESKRIPSI,MATA KULIAH,TANGGAL,BULAN,TAHUN,PROGRESS\n") != 0) { printf("ERROR: format file tidak sesuai!\n"); displayMenuExit(); return; } while (*notes != NULL){ current_ptr = (*notes)->next; free(*notes); *notes = current_ptr; } while (fgets(buff, 2048, fptr) != NULL) { new_ptr = malloc(sizeof(Node)); sscanf( buff, "%[^,],%[^,],%[^,],%d,%d,%d,%d", &new_ptr->judul, &new_ptr->deskripsi, &new_ptr->matkul, &new_ptr->tanggal, &new_ptr->bulan, &new_ptr->tahun, &new_ptr->progress ); new_ptr->next = NULL; if (*notes == NULL) { *notes = new_ptr; current_ptr = *notes; continue; } current_ptr->next = new_ptr; current_ptr = current_ptr->next; } fclose(fptr); printf("Import berhasil!\n"); printf("\n-----------------------------------------------------------\n\n"); displayMenuExit(); } void menuExportNote(NodePtr notes) { //mengekspor notes if (emptyNoteError(notes)) return; int i; char file_name[64]; char *title[] = { "***********************************************************\n", "* *\n", "* EXPORT MENU *\n", "* *\n", "***********************************************************\n" }; displayTitle(title, 5); printf("\n-----------------------------------------------------------\n\n"); getString(file_name, 64, "Masukkan nama file (tanpa ekstensi)"); strcat(file_name, ".teknote"); FILE *fptr = fopen(file_name, "w"); if (fptr == NULL) { printf("Error: creating file failed\n"); displayMenuExit(); return; } fprintf(fptr, "JUDUL,DESKRIPSI,MATA KULIAH,TANGGAL,BULAN,TAHUN,PROGRESS\n"); for (; notes != NULL; notes = notes->next) { fprintf( fptr, "%s,%s,%s,%d,%d,%d,%d\n", notes->judul, notes->deskripsi, notes->matkul, notes->tanggal, notes->bulan, notes->tahun, notes->progress ); } fclose(fptr); printf("Export berhasil!\n"); printf("\n-----------------------------------------------------------\n\n"); displayMenuExit(); }
cs6868-ocean.c
/* * ===================================================================================== * * Filename: simulate.c * * Description: Code to simulate Ocean currents. * * Version: 1.0 * Created: 03/03/2018 09:59:42 IST * Revision: none * Compiler: gcc * * Author: Krishna A, and students of CS6868. * * ===================================================================================== */ #include <math.h> #include <stdlib.h> #include <string.h> #include <omp.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/time.h> int min(int a, int b) { return a <= b ? a : b; } int simulate_ocean_currents(double **A, int n, double tol){ int done = 0; double diff; double old; int iter = 0; double **B, **C; B = (double **) malloc(n*sizeof(double*)); for (int k = 0; k < n; k++){ B[k]=(double *) malloc(n*sizeof(double)); memcpy(B[k], A[k], n*sizeof(double)); } while (!done){ iter ++; diff = 0; /* init */ for (int i=1;i<n-1; ++i){ /* skip border elems */ for (int j=1; j<n-1; ++j){ /* skip border elems */ old = A[i][j]; B[i][j] = (A[i][j] + A[i][j-1] + A[i-1][j] + A[i][j+1] + A[i+1][j])/5.0; /*average */ diff += fabs(B[i][j] - old); } } C = A; A = B; B = C; // exchange. if (diff/(n*n) < tol) done = 1; } return iter; } int simulate_ocean_currents_parallel(double **A, int dim, double tol, int procs){ int done = 0, iter = 0; double diff = 0; double **B, **C; B = (double **) malloc(dim*sizeof(double *)); #pragma omp parallel num_threads(procs) shared(A, B, dim) { int tid = omp_get_thread_num(); int start = min(dim, tid*dim/procs); int end = min(dim, (tid + 1)*dim/procs); for (int i = start; i < end; ++i) { B[i] = (double *) malloc (dim*sizeof(double)); memcpy(B[i], A[i], dim*sizeof(double)); } } int chunk = 1 + (dim-3)/procs; #pragma omp parallel num_threads(procs) firstprivate(done) { int tid = omp_get_thread_num(); int start = 1 + min(dim - 2, tid*chunk); int end = 1 + min(dim - 2, (tid+1)*chunk); double old, mydiff; while (!done) { #pragma omp single iter++; diff = 0; #pragma omp barrier mydiff = 0; for (int i = start; i < end; ++i) { for (int j = 1; j < dim-1; ++j) { old = A[i][j]; B[i][j] = (A[i][j] + A[i][j-1] + A[i-1][j] + A[i][j+1] + A[i+1][j])/5.0; mydiff += fabs(B[i][j] - old); } } #pragma omp atomic diff += mydiff; #pragma omp barrier done = diff/(dim*dim) < tol; #pragma omp single { C = A; A = B; B = C; } } } return iter; } /* read input from the standard input, after allocating the array */ double ** read_input (int n){ double **X; X = (double **)malloc(n*sizeof(double*)); for (int i=0;i<n;++i){ X[i]=(double *)malloc(n*sizeof(double)); for (int j=0;j<n;++j) scanf("%lf",&X[i][j]); } return X; } /* output the final grid. */ void print_output(double **A, int n, int niter){ printf("Number of iterations = %d\n", niter); for (int i=0;i<n;++i){ for (int j=0;j<n;++j) printf("%lf ",A[i][j]); printf("\n"); } printf("\n"); } /* Print the time statistics */ void print_statistics(struct timeval start_time,struct timeval end_time) { printf("Start time:\t%lf \n", start_time.tv_sec+(start_time.tv_usec/1000000.0)); printf("End time:\t%lf\n", end_time.tv_sec+(end_time.tv_usec/1000000.0)); printf("Total time: \t%lf (s)\n", end_time.tv_sec - start_time.tv_sec + ((end_time.tv_usec - start_time.tv_usec)/1000000.0)); } /* Error in command line arguments. Print usage and exit. */ void print_usage_and_exit(char *prog){ fprintf(stderr, "Usage: %s <nprocs> <tol> <-serial|-parallel>\n", prog); exit(1); } int main(int argc, char **argv){ struct timeval start_time, end_time; int num_iter = 0; double tol; double **A; int procs; int dim; if (argc != 4){ print_usage_and_exit(argv[0]); } sscanf(argv[1],"%d",&procs); sscanf(argv[2],"%lf",&tol); char *option = argv[3]; if (option == NULL || (strcmp(option,"-serial") != 0 && strcmp(option,"-parallel") != 0 )) print_usage_and_exit(argv[0]); printf("Options: Procs = %d, Tol = %lf, Execution%s\n\n",procs, tol, option); // printf("Dimensions = "); scanf("%d", &dim); A = read_input(dim); // Calculate start time gettimeofday(&start_time, NULL); if (strcmp(option,"-serial") == 0) num_iter=simulate_ocean_currents(A, dim, tol); else num_iter=simulate_ocean_currents_parallel(A, dim, tol, procs); // Calculate end time gettimeofday(&end_time, NULL); // Print Statistics print_output(A, dim, num_iter); print_statistics(start_time,end_time); }
GB_unop__frexpx_fp32_fp32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__frexpx_fp32_fp32) // op(A') function: GB (_unop_tran__frexpx_fp32_fp32) // C type: float // A type: float // cast: float cij = aij // unaryop: cij = GB_frexpxf (aij) #define GB_ATYPE \ float #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_frexpxf (x) ; // casting #define GB_CAST(z, aij) \ float z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ float aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ float z = aij ; \ Cx [pC] = GB_frexpxf (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_FREXPX || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__frexpx_fp32_fp32) ( float *Cx, // Cx and Ax may be aliased const float *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float aij = Ax [p] ; float z = aij ; Cx [p] = GB_frexpxf (z) ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; float aij = Ax [p] ; float z = aij ; Cx [p] = GB_frexpxf (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__frexpx_fp32_fp32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
Sema.h
//===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file defines the Sema class, which performs semantic analysis and // builds ASTs. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_SEMA_SEMA_H #define LLVM_CLANG_SEMA_SEMA_H #include "clang/AST/Attr.h" #include "clang/AST/Availability.h" #include "clang/AST/ComparisonCategories.h" #include "clang/AST/DeclTemplate.h" #include "clang/AST/DeclarationName.h" #include "clang/AST/Expr.h" #include "clang/AST/ExprCXX.h" #include "clang/AST/ExprObjC.h" #include "clang/AST/ExternalASTSource.h" #include "clang/AST/LocInfoType.h" #include "clang/AST/MangleNumberingContext.h" #include "clang/AST/NSAPI.h" #include "clang/AST/PrettyPrinter.h" #include "clang/AST/StmtCXX.h" #include "clang/AST/TypeLoc.h" #include "clang/AST/TypeOrdering.h" #include "clang/Basic/ExpressionTraits.h" #include "clang/Basic/Module.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/PragmaKinds.h" #include "clang/Basic/Specifiers.h" #include "clang/Basic/TemplateKinds.h" #include "clang/Basic/TypeTraits.h" #include "clang/Sema/AnalysisBasedWarnings.h" #include "clang/Sema/CleanupInfo.h" #include "clang/Sema/DeclSpec.h" #include "clang/Sema/ExternalSemaSource.h" #include "clang/Sema/IdentifierResolver.h" #include "clang/Sema/ObjCMethodList.h" #include "clang/Sema/Ownership.h" #include "clang/Sema/Scope.h" #include "clang/Sema/TypoCorrection.h" #include "clang/Sema/Weak.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/Optional.h" #include "llvm/ADT/SetVector.h" #include "llvm/ADT/SmallBitVector.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/TinyPtrVector.h" #include <deque> #include <memory> #include <string> #include <vector> namespace llvm { class APSInt; template <typename ValueT> struct DenseMapInfo; template <typename ValueT, typename ValueInfoT> class DenseSet; class SmallBitVector; struct InlineAsmIdentifierInfo; } namespace clang { class ADLResult; class ASTConsumer; class ASTContext; class ASTMutationListener; class ASTReader; class ASTWriter; class ArrayType; class ParsedAttr; class BindingDecl; class BlockDecl; class CapturedDecl; class CXXBasePath; class CXXBasePaths; class CXXBindTemporaryExpr; typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath; class CXXConstructorDecl; class CXXConversionDecl; class CXXDeleteExpr; class CXXDestructorDecl; class CXXFieldCollector; class CXXMemberCallExpr; class CXXMethodDecl; class CXXScopeSpec; class CXXTemporary; class CXXTryStmt; class CallExpr; class ClassTemplateDecl; class ClassTemplatePartialSpecializationDecl; class ClassTemplateSpecializationDecl; class VarTemplatePartialSpecializationDecl; class CodeCompleteConsumer; class CodeCompletionAllocator; class CodeCompletionTUInfo; class CodeCompletionResult; class CoroutineBodyStmt; class Decl; class DeclAccessPair; class DeclContext; class DeclRefExpr; class DeclaratorDecl; class DeducedTemplateArgument; class DependentDiagnostic; class DesignatedInitExpr; class Designation; class EnableIfAttr; class EnumConstantDecl; class Expr; class ExtVectorType; class FormatAttr; class FriendDecl; class FunctionDecl; class FunctionProtoType; class FunctionTemplateDecl; class ImplicitConversionSequence; typedef MutableArrayRef<ImplicitConversionSequence> ConversionSequenceList; class InitListExpr; class InitializationKind; class InitializationSequence; class InitializedEntity; class IntegerLiteral; class LabelStmt; class LambdaExpr; class LangOptions; class LocalInstantiationScope; class LookupResult; class MacroInfo; typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath; class ModuleLoader; class MultiLevelTemplateArgumentList; class NamedDecl; class ObjCCategoryDecl; class ObjCCategoryImplDecl; class ObjCCompatibleAliasDecl; class ObjCContainerDecl; class ObjCImplDecl; class ObjCImplementationDecl; class ObjCInterfaceDecl; class ObjCIvarDecl; template <class T> class ObjCList; class ObjCMessageExpr; class ObjCMethodDecl; class ObjCPropertyDecl; class ObjCProtocolDecl; class OMPThreadPrivateDecl; class OMPRequiresDecl; class OMPDeclareReductionDecl; class OMPDeclareSimdDecl; class OMPClause; struct OMPVarListLocTy; struct OverloadCandidate; class OverloadCandidateSet; class OverloadExpr; class ParenListExpr; class ParmVarDecl; class Preprocessor; class PseudoDestructorTypeStorage; class PseudoObjectExpr; class QualType; class StandardConversionSequence; class Stmt; class StringLiteral; class SwitchStmt; class TemplateArgument; class TemplateArgumentList; class TemplateArgumentLoc; class TemplateDecl; class TemplateInstantiationCallback; class TemplateParameterList; class TemplatePartialOrderingContext; class TemplateTemplateParmDecl; class Token; class TypeAliasDecl; class TypedefDecl; class TypedefNameDecl; class TypeLoc; class TypoCorrectionConsumer; class UnqualifiedId; class UnresolvedLookupExpr; class UnresolvedMemberExpr; class UnresolvedSetImpl; class UnresolvedSetIterator; class UsingDecl; class UsingShadowDecl; class ValueDecl; class VarDecl; class VarTemplateSpecializationDecl; class VisibilityAttr; class VisibleDeclConsumer; class IndirectFieldDecl; struct DeductionFailureInfo; class TemplateSpecCandidateSet; namespace sema { class AccessedEntity; class BlockScopeInfo; class Capture; class CapturedRegionScopeInfo; class CapturingScopeInfo; class CompoundScopeInfo; class DelayedDiagnostic; class DelayedDiagnosticPool; class FunctionScopeInfo; class LambdaScopeInfo; class PossiblyUnreachableDiag; class SemaPPCallbacks; class TemplateDeductionInfo; } namespace threadSafety { class BeforeSet; void threadSafetyCleanup(BeforeSet* Cache); } // FIXME: No way to easily map from TemplateTypeParmTypes to // TemplateTypeParmDecls, so we have this horrible PointerUnion. typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>, SourceLocation> UnexpandedParameterPack; /// Describes whether we've seen any nullability information for the given /// file. struct FileNullability { /// The first pointer declarator (of any pointer kind) in the file that does /// not have a corresponding nullability annotation. SourceLocation PointerLoc; /// The end location for the first pointer declarator in the file. Used for /// placing fix-its. SourceLocation PointerEndLoc; /// Which kind of pointer declarator we saw. uint8_t PointerKind; /// Whether we saw any type nullability annotations in the given file. bool SawTypeNullability = false; }; /// A mapping from file IDs to a record of whether we've seen nullability /// information in that file. class FileNullabilityMap { /// A mapping from file IDs to the nullability information for each file ID. llvm::DenseMap<FileID, FileNullability> Map; /// A single-element cache based on the file ID. struct { FileID File; FileNullability Nullability; } Cache; public: FileNullability &operator[](FileID file) { // Check the single-element cache. if (file == Cache.File) return Cache.Nullability; // It's not in the single-element cache; flush the cache if we have one. if (!Cache.File.isInvalid()) { Map[Cache.File] = Cache.Nullability; } // Pull this entry into the cache. Cache.File = file; Cache.Nullability = Map[file]; return Cache.Nullability; } }; /// Keeps track of expected type during expression parsing. The type is tied to /// a particular token, all functions that update or consume the type take a /// start location of the token they are looking at as a parameter. This allows /// to avoid updating the type on hot paths in the parser. class PreferredTypeBuilder { public: PreferredTypeBuilder() = default; explicit PreferredTypeBuilder(QualType Type) : Type(Type) {} void enterCondition(Sema &S, SourceLocation Tok); void enterReturn(Sema &S, SourceLocation Tok); void enterVariableInit(SourceLocation Tok, Decl *D); /// Computing a type for the function argument may require running /// overloading, so we postpone its computation until it is actually needed. /// /// Clients should be very careful when using this funciton, as it stores a /// function_ref, clients should make sure all calls to get() with the same /// location happen while function_ref is alive. void enterFunctionArgument(SourceLocation Tok, llvm::function_ref<QualType()> ComputeType); void enterParenExpr(SourceLocation Tok, SourceLocation LParLoc); void enterUnary(Sema &S, SourceLocation Tok, tok::TokenKind OpKind, SourceLocation OpLoc); void enterBinary(Sema &S, SourceLocation Tok, Expr *LHS, tok::TokenKind Op); void enterMemAccess(Sema &S, SourceLocation Tok, Expr *Base); void enterSubscript(Sema &S, SourceLocation Tok, Expr *LHS); /// Handles all type casts, including C-style cast, C++ casts, etc. void enterTypeCast(SourceLocation Tok, QualType CastType); QualType get(SourceLocation Tok) const { if (Tok != ExpectedLoc) return QualType(); if (!Type.isNull()) return Type; if (ComputeType) return ComputeType(); return QualType(); } private: /// Start position of a token for which we store expected type. SourceLocation ExpectedLoc; /// Expected type for a token starting at ExpectedLoc. QualType Type; /// A function to compute expected type at ExpectedLoc. It is only considered /// if Type is null. llvm::function_ref<QualType()> ComputeType; }; /// Sema - This implements semantic analysis and AST building for C. class Sema { Sema(const Sema &) = delete; void operator=(const Sema &) = delete; ///Source of additional semantic information. ExternalSemaSource *ExternalSource; ///Whether Sema has generated a multiplexer and has to delete it. bool isMultiplexExternalSource; static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD); bool isVisibleSlow(const NamedDecl *D); /// Determine whether two declarations should be linked together, given that /// the old declaration might not be visible and the new declaration might /// not have external linkage. bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old, const NamedDecl *New) { if (isVisible(Old)) return true; // See comment in below overload for why it's safe to compute the linkage // of the new declaration here. if (New->isExternallyDeclarable()) { assert(Old->isExternallyDeclarable() && "should not have found a non-externally-declarable previous decl"); return true; } return false; } bool shouldLinkPossiblyHiddenDecl(LookupResult &Old, const NamedDecl *New); void setupImplicitSpecialMemberType(CXXMethodDecl *SpecialMem, QualType ResultTy, ArrayRef<QualType> Args); public: typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy; typedef OpaquePtr<TemplateName> TemplateTy; typedef OpaquePtr<QualType> TypeTy; OpenCLOptions OpenCLFeatures; FPOptions FPFeatures; const LangOptions &LangOpts; Preprocessor &PP; ASTContext &Context; ASTConsumer &Consumer; DiagnosticsEngine &Diags; SourceManager &SourceMgr; /// Flag indicating whether or not to collect detailed statistics. bool CollectStats; /// Code-completion consumer. CodeCompleteConsumer *CodeCompleter; /// CurContext - This is the current declaration context of parsing. DeclContext *CurContext; /// Generally null except when we temporarily switch decl contexts, /// like in \see ActOnObjCTemporaryExitContainerContext. DeclContext *OriginalLexicalContext; /// VAListTagName - The declaration name corresponding to __va_list_tag. /// This is used as part of a hack to omit that class from ADL results. DeclarationName VAListTagName; bool MSStructPragmaOn; // True when \#pragma ms_struct on /// Controls member pointer representation format under the MS ABI. LangOptions::PragmaMSPointersToMembersKind MSPointerToMemberRepresentationMethod; /// Stack of active SEH __finally scopes. Can be empty. SmallVector<Scope*, 2> CurrentSEHFinally; /// Source location for newly created implicit MSInheritanceAttrs SourceLocation ImplicitMSInheritanceAttrLoc; /// pragma clang section kind enum PragmaClangSectionKind { PCSK_Invalid = 0, PCSK_BSS = 1, PCSK_Data = 2, PCSK_Rodata = 3, PCSK_Text = 4 }; enum PragmaClangSectionAction { PCSA_Set = 0, PCSA_Clear = 1 }; struct PragmaClangSection { std::string SectionName; bool Valid = false; SourceLocation PragmaLocation; void Act(SourceLocation PragmaLocation, PragmaClangSectionAction Action, StringLiteral* Name); }; PragmaClangSection PragmaClangBSSSection; PragmaClangSection PragmaClangDataSection; PragmaClangSection PragmaClangRodataSection; PragmaClangSection PragmaClangTextSection; enum PragmaMsStackAction { PSK_Reset = 0x0, // #pragma () PSK_Set = 0x1, // #pragma (value) PSK_Push = 0x2, // #pragma (push[, id]) PSK_Pop = 0x4, // #pragma (pop[, id]) PSK_Show = 0x8, // #pragma (show) -- only for "pack"! PSK_Push_Set = PSK_Push | PSK_Set, // #pragma (push[, id], value) PSK_Pop_Set = PSK_Pop | PSK_Set, // #pragma (pop[, id], value) }; template<typename ValueType> struct PragmaStack { struct Slot { llvm::StringRef StackSlotLabel; ValueType Value; SourceLocation PragmaLocation; SourceLocation PragmaPushLocation; Slot(llvm::StringRef StackSlotLabel, ValueType Value, SourceLocation PragmaLocation, SourceLocation PragmaPushLocation) : StackSlotLabel(StackSlotLabel), Value(Value), PragmaLocation(PragmaLocation), PragmaPushLocation(PragmaPushLocation) {} }; void Act(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, ValueType Value); // MSVC seems to add artificial slots to #pragma stacks on entering a C++ // method body to restore the stacks on exit, so it works like this: // // struct S { // #pragma <name>(push, InternalPragmaSlot, <current_pragma_value>) // void Method {} // #pragma <name>(pop, InternalPragmaSlot) // }; // // It works even with #pragma vtordisp, although MSVC doesn't support // #pragma vtordisp(push [, id], n) // syntax. // // Push / pop a named sentinel slot. void SentinelAction(PragmaMsStackAction Action, StringRef Label) { assert((Action == PSK_Push || Action == PSK_Pop) && "Can only push / pop #pragma stack sentinels!"); Act(CurrentPragmaLocation, Action, Label, CurrentValue); } // Constructors. explicit PragmaStack(const ValueType &Default) : DefaultValue(Default), CurrentValue(Default) {} bool hasValue() const { return CurrentValue != DefaultValue; } SmallVector<Slot, 2> Stack; ValueType DefaultValue; // Value used for PSK_Reset action. ValueType CurrentValue; SourceLocation CurrentPragmaLocation; }; // FIXME: We should serialize / deserialize these if they occur in a PCH (but // we shouldn't do so if they're in a module). /// Whether to insert vtordisps prior to virtual bases in the Microsoft /// C++ ABI. Possible values are 0, 1, and 2, which mean: /// /// 0: Suppress all vtordisps /// 1: Insert vtordisps in the presence of vbase overrides and non-trivial /// structors /// 2: Always insert vtordisps to support RTTI on partially constructed /// objects PragmaStack<MSVtorDispAttr::Mode> VtorDispStack; // #pragma pack. // Sentinel to represent when the stack is set to mac68k alignment. static const unsigned kMac68kAlignmentSentinel = ~0U; PragmaStack<unsigned> PackStack; // The current #pragma pack values and locations at each #include. struct PackIncludeState { unsigned CurrentValue; SourceLocation CurrentPragmaLocation; bool HasNonDefaultValue, ShouldWarnOnInclude; }; SmallVector<PackIncludeState, 8> PackIncludeStack; // Segment #pragmas. PragmaStack<StringLiteral *> DataSegStack; PragmaStack<StringLiteral *> BSSSegStack; PragmaStack<StringLiteral *> ConstSegStack; PragmaStack<StringLiteral *> CodeSegStack; // RAII object to push / pop sentinel slots for all MS #pragma stacks. // Actions should be performed only if we enter / exit a C++ method body. class PragmaStackSentinelRAII { public: PragmaStackSentinelRAII(Sema &S, StringRef SlotLabel, bool ShouldAct); ~PragmaStackSentinelRAII(); private: Sema &S; StringRef SlotLabel; bool ShouldAct; }; /// A mapping that describes the nullability we've seen in each header file. FileNullabilityMap NullabilityMap; /// Last section used with #pragma init_seg. StringLiteral *CurInitSeg; SourceLocation CurInitSegLoc; /// VisContext - Manages the stack for \#pragma GCC visibility. void *VisContext; // Really a "PragmaVisStack*" /// This an attribute introduced by \#pragma clang attribute. struct PragmaAttributeEntry { SourceLocation Loc; ParsedAttr *Attribute; SmallVector<attr::SubjectMatchRule, 4> MatchRules; bool IsUsed; }; /// A push'd group of PragmaAttributeEntries. struct PragmaAttributeGroup { /// The location of the push attribute. SourceLocation Loc; /// The namespace of this push group. const IdentifierInfo *Namespace; SmallVector<PragmaAttributeEntry, 2> Entries; }; SmallVector<PragmaAttributeGroup, 2> PragmaAttributeStack; /// The declaration that is currently receiving an attribute from the /// #pragma attribute stack. const Decl *PragmaAttributeCurrentTargetDecl; /// This represents the last location of a "#pragma clang optimize off" /// directive if such a directive has not been closed by an "on" yet. If /// optimizations are currently "on", this is set to an invalid location. SourceLocation OptimizeOffPragmaLocation; /// Flag indicating if Sema is building a recovery call expression. /// /// This flag is used to avoid building recovery call expressions /// if Sema is already doing so, which would cause infinite recursions. bool IsBuildingRecoveryCallExpr; /// Used to control the generation of ExprWithCleanups. CleanupInfo Cleanup; /// ExprCleanupObjects - This is the stack of objects requiring /// cleanup that are created by the current full expression. The /// element type here is ExprWithCleanups::Object. SmallVector<BlockDecl*, 8> ExprCleanupObjects; /// Store a set of either DeclRefExprs or MemberExprs that contain a reference /// to a variable (constant) that may or may not be odr-used in this Expr, and /// we won't know until all lvalue-to-rvalue and discarded value conversions /// have been applied to all subexpressions of the enclosing full expression. /// This is cleared at the end of each full expression. using MaybeODRUseExprSet = llvm::SmallPtrSet<Expr *, 2>; MaybeODRUseExprSet MaybeODRUseExprs; std::unique_ptr<sema::FunctionScopeInfo> PreallocatedFunctionScope; /// Stack containing information about each of the nested /// function, block, and method scopes that are currently active. SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes; typedef LazyVector<TypedefNameDecl *, ExternalSemaSource, &ExternalSemaSource::ReadExtVectorDecls, 2, 2> ExtVectorDeclsType; /// ExtVectorDecls - This is a list all the extended vector types. This allows /// us to associate a raw vector type with one of the ext_vector type names. /// This is only necessary for issuing pretty diagnostics. ExtVectorDeclsType ExtVectorDecls; /// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes. std::unique_ptr<CXXFieldCollector> FieldCollector; typedef llvm::SmallSetVector<NamedDecl *, 16> NamedDeclSetType; /// Set containing all declared private fields that are not used. NamedDeclSetType UnusedPrivateFields; /// Set containing all typedefs that are likely unused. llvm::SmallSetVector<const TypedefNameDecl *, 4> UnusedLocalTypedefNameCandidates; /// Delete-expressions to be analyzed at the end of translation unit /// /// This list contains class members, and locations of delete-expressions /// that could not be proven as to whether they mismatch with new-expression /// used in initializer of the field. typedef std::pair<SourceLocation, bool> DeleteExprLoc; typedef llvm::SmallVector<DeleteExprLoc, 4> DeleteLocs; llvm::MapVector<FieldDecl *, DeleteLocs> DeleteExprs; typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy; /// PureVirtualClassDiagSet - a set of class declarations which we have /// emitted a list of pure virtual functions. Used to prevent emitting the /// same list more than once. std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet; /// ParsingInitForAutoVars - a set of declarations with auto types for which /// we are currently parsing the initializer. llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars; /// Look for a locally scoped extern "C" declaration by the given name. NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name); typedef LazyVector<VarDecl *, ExternalSemaSource, &ExternalSemaSource::ReadTentativeDefinitions, 2, 2> TentativeDefinitionsType; /// All the tentative definitions encountered in the TU. TentativeDefinitionsType TentativeDefinitions; typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2> UnusedFileScopedDeclsType; /// The set of file scoped decls seen so far that have not been used /// and must warn if not used. Only contains the first declaration. UnusedFileScopedDeclsType UnusedFileScopedDecls; typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadDelegatingConstructors, 2, 2> DelegatingCtorDeclsType; /// All the delegating constructors seen so far in the file, used for /// cycle detection at the end of the TU. DelegatingCtorDeclsType DelegatingCtorDecls; /// All the overriding functions seen during a class definition /// that had their exception spec checks delayed, plus the overridden /// function. SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2> DelayedOverridingExceptionSpecChecks; /// All the function redeclarations seen during a class definition that had /// their exception spec checks delayed, plus the prior declaration they /// should be checked against. Except during error recovery, the new decl /// should always be a friend declaration, as that's the only valid way to /// redeclare a special member before its class is complete. SmallVector<std::pair<FunctionDecl*, FunctionDecl*>, 2> DelayedEquivalentExceptionSpecChecks; typedef llvm::MapVector<const FunctionDecl *, std::unique_ptr<LateParsedTemplate>> LateParsedTemplateMapT; LateParsedTemplateMapT LateParsedTemplateMap; /// Callback to the parser to parse templated functions when needed. typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT); typedef void LateTemplateParserCleanupCB(void *P); LateTemplateParserCB *LateTemplateParser; LateTemplateParserCleanupCB *LateTemplateParserCleanup; void *OpaqueParser; void SetLateTemplateParser(LateTemplateParserCB *LTP, LateTemplateParserCleanupCB *LTPCleanup, void *P) { LateTemplateParser = LTP; LateTemplateParserCleanup = LTPCleanup; OpaqueParser = P; } class DelayedDiagnostics; class DelayedDiagnosticsState { sema::DelayedDiagnosticPool *SavedPool; friend class Sema::DelayedDiagnostics; }; typedef DelayedDiagnosticsState ParsingDeclState; typedef DelayedDiagnosticsState ProcessingContextState; /// A class which encapsulates the logic for delaying diagnostics /// during parsing and other processing. class DelayedDiagnostics { /// The current pool of diagnostics into which delayed /// diagnostics should go. sema::DelayedDiagnosticPool *CurPool; public: DelayedDiagnostics() : CurPool(nullptr) {} /// Adds a delayed diagnostic. void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h /// Determines whether diagnostics should be delayed. bool shouldDelayDiagnostics() { return CurPool != nullptr; } /// Returns the current delayed-diagnostics pool. sema::DelayedDiagnosticPool *getCurrentPool() const { return CurPool; } /// Enter a new scope. Access and deprecation diagnostics will be /// collected in this pool. DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = &pool; return state; } /// Leave a delayed-diagnostic state that was previously pushed. /// Do not emit any of the diagnostics. This is performed as part /// of the bookkeeping of popping a pool "properly". void popWithoutEmitting(DelayedDiagnosticsState state) { CurPool = state.SavedPool; } /// Enter a new scope where access and deprecation diagnostics are /// not delayed. DelayedDiagnosticsState pushUndelayed() { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = nullptr; return state; } /// Undo a previous pushUndelayed(). void popUndelayed(DelayedDiagnosticsState state) { assert(CurPool == nullptr); CurPool = state.SavedPool; } } DelayedDiagnostics; /// A RAII object to temporarily push a declaration context. class ContextRAII { private: Sema &S; DeclContext *SavedContext; ProcessingContextState SavedContextState; QualType SavedCXXThisTypeOverride; public: ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true) : S(S), SavedContext(S.CurContext), SavedContextState(S.DelayedDiagnostics.pushUndelayed()), SavedCXXThisTypeOverride(S.CXXThisTypeOverride) { assert(ContextToPush && "pushing null context"); S.CurContext = ContextToPush; if (NewThisContext) S.CXXThisTypeOverride = QualType(); } void pop() { if (!SavedContext) return; S.CurContext = SavedContext; S.DelayedDiagnostics.popUndelayed(SavedContextState); S.CXXThisTypeOverride = SavedCXXThisTypeOverride; SavedContext = nullptr; } ~ContextRAII() { pop(); } }; /// RAII object to handle the state changes required to synthesize /// a function body. class SynthesizedFunctionScope { Sema &S; Sema::ContextRAII SavedContext; bool PushedCodeSynthesisContext = false; public: SynthesizedFunctionScope(Sema &S, DeclContext *DC) : S(S), SavedContext(S, DC) { S.PushFunctionScope(); S.PushExpressionEvaluationContext( Sema::ExpressionEvaluationContext::PotentiallyEvaluated); if (auto *FD = dyn_cast<FunctionDecl>(DC)) FD->setWillHaveBody(true); else assert(isa<ObjCMethodDecl>(DC)); } void addContextNote(SourceLocation UseLoc) { assert(!PushedCodeSynthesisContext); Sema::CodeSynthesisContext Ctx; Ctx.Kind = Sema::CodeSynthesisContext::DefiningSynthesizedFunction; Ctx.PointOfInstantiation = UseLoc; Ctx.Entity = cast<Decl>(S.CurContext); S.pushCodeSynthesisContext(Ctx); PushedCodeSynthesisContext = true; } ~SynthesizedFunctionScope() { if (PushedCodeSynthesisContext) S.popCodeSynthesisContext(); if (auto *FD = dyn_cast<FunctionDecl>(S.CurContext)) FD->setWillHaveBody(false); S.PopExpressionEvaluationContext(); S.PopFunctionScopeInfo(); } }; /// WeakUndeclaredIdentifiers - Identifiers contained in /// \#pragma weak before declared. rare. may alias another /// identifier, declared or undeclared llvm::MapVector<IdentifierInfo *, WeakInfo> WeakUndeclaredIdentifiers; /// ExtnameUndeclaredIdentifiers - Identifiers contained in /// \#pragma redefine_extname before declared. Used in Solaris system headers /// to define functions that occur in multiple standards to call the version /// in the currently selected standard. llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers; /// Load weak undeclared identifiers from the external source. void LoadExternalWeakUndeclaredIdentifiers(); /// WeakTopLevelDecl - Translation-unit scoped declarations generated by /// \#pragma weak during processing of other Decls. /// I couldn't figure out a clean way to generate these in-line, so /// we store them here and handle separately -- which is a hack. /// It would be best to refactor this. SmallVector<Decl*,2> WeakTopLevelDecl; IdentifierResolver IdResolver; /// Translation Unit Scope - useful to Objective-C actions that need /// to lookup file scope declarations in the "ordinary" C decl namespace. /// For example, user-defined classes, built-in "id" type, etc. Scope *TUScope; /// The C++ "std" namespace, where the standard library resides. LazyDeclPtr StdNamespace; /// The C++ "std::bad_alloc" class, which is defined by the C++ /// standard library. LazyDeclPtr StdBadAlloc; /// The C++ "std::align_val_t" enum class, which is defined by the C++ /// standard library. LazyDeclPtr StdAlignValT; /// The C++ "std::experimental" namespace, where the experimental parts /// of the standard library resides. NamespaceDecl *StdExperimentalNamespaceCache; /// The C++ "std::initializer_list" template, which is defined in /// \<initializer_list>. ClassTemplateDecl *StdInitializerList; /// The C++ "std::coroutine_traits" template, which is defined in /// \<coroutine_traits> ClassTemplateDecl *StdCoroutineTraitsCache; /// The C++ "type_info" declaration, which is defined in \<typeinfo>. RecordDecl *CXXTypeInfoDecl; /// The MSVC "_GUID" struct, which is defined in MSVC header files. RecordDecl *MSVCGuidDecl; /// Caches identifiers/selectors for NSFoundation APIs. std::unique_ptr<NSAPI> NSAPIObj; /// The declaration of the Objective-C NSNumber class. ObjCInterfaceDecl *NSNumberDecl; /// The declaration of the Objective-C NSValue class. ObjCInterfaceDecl *NSValueDecl; /// Pointer to NSNumber type (NSNumber *). QualType NSNumberPointer; /// Pointer to NSValue type (NSValue *). QualType NSValuePointer; /// The Objective-C NSNumber methods used to create NSNumber literals. ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods]; /// The declaration of the Objective-C NSString class. ObjCInterfaceDecl *NSStringDecl; /// Pointer to NSString type (NSString *). QualType NSStringPointer; /// The declaration of the stringWithUTF8String: method. ObjCMethodDecl *StringWithUTF8StringMethod; /// The declaration of the valueWithBytes:objCType: method. ObjCMethodDecl *ValueWithBytesObjCTypeMethod; /// The declaration of the Objective-C NSArray class. ObjCInterfaceDecl *NSArrayDecl; /// The declaration of the arrayWithObjects:count: method. ObjCMethodDecl *ArrayWithObjectsMethod; /// The declaration of the Objective-C NSDictionary class. ObjCInterfaceDecl *NSDictionaryDecl; /// The declaration of the dictionaryWithObjects:forKeys:count: method. ObjCMethodDecl *DictionaryWithObjectsMethod; /// id<NSCopying> type. QualType QIDNSCopying; /// will hold 'respondsToSelector:' Selector RespondsToSelectorSel; /// A flag to remember whether the implicit forms of operator new and delete /// have been declared. bool GlobalNewDeleteDeclared; /// A flag to indicate that we're in a context that permits abstract /// references to fields. This is really a bool AllowAbstractFieldReference; /// Describes how the expressions currently being parsed are /// evaluated at run-time, if at all. enum class ExpressionEvaluationContext { /// The current expression and its subexpressions occur within an /// unevaluated operand (C++11 [expr]p7), such as the subexpression of /// \c sizeof, where the type of the expression may be significant but /// no code will be generated to evaluate the value of the expression at /// run time. Unevaluated, /// The current expression occurs within a braced-init-list within /// an unevaluated operand. This is mostly like a regular unevaluated /// context, except that we still instantiate constexpr functions that are /// referenced here so that we can perform narrowing checks correctly. UnevaluatedList, /// The current expression occurs within a discarded statement. /// This behaves largely similarly to an unevaluated operand in preventing /// definitions from being required, but not in other ways. DiscardedStatement, /// The current expression occurs within an unevaluated /// operand that unconditionally permits abstract references to /// fields, such as a SIZE operator in MS-style inline assembly. UnevaluatedAbstract, /// The current context is "potentially evaluated" in C++11 terms, /// but the expression is evaluated at compile-time (like the values of /// cases in a switch statement). ConstantEvaluated, /// The current expression is potentially evaluated at run time, /// which means that code may be generated to evaluate the value of the /// expression at run time. PotentiallyEvaluated, /// The current expression is potentially evaluated, but any /// declarations referenced inside that expression are only used if /// in fact the current expression is used. /// /// This value is used when parsing default function arguments, for which /// we would like to provide diagnostics (e.g., passing non-POD arguments /// through varargs) but do not want to mark declarations as "referenced" /// until the default argument is used. PotentiallyEvaluatedIfUsed }; /// Data structure used to record current or nested /// expression evaluation contexts. struct ExpressionEvaluationContextRecord { /// The expression evaluation context. ExpressionEvaluationContext Context; /// Whether the enclosing context needed a cleanup. CleanupInfo ParentCleanup; /// Whether we are in a decltype expression. bool IsDecltype; /// The number of active cleanup objects when we entered /// this expression evaluation context. unsigned NumCleanupObjects; /// The number of typos encountered during this expression evaluation /// context (i.e. the number of TypoExprs created). unsigned NumTypos; MaybeODRUseExprSet SavedMaybeODRUseExprs; /// The lambdas that are present within this context, if it /// is indeed an unevaluated context. SmallVector<LambdaExpr *, 2> Lambdas; /// The declaration that provides context for lambda expressions /// and block literals if the normal declaration context does not /// suffice, e.g., in a default function argument. Decl *ManglingContextDecl; /// The context information used to mangle lambda expressions /// and block literals within this context. /// /// This mangling information is allocated lazily, since most contexts /// do not have lambda expressions or block literals. std::unique_ptr<MangleNumberingContext> MangleNumbering; /// If we are processing a decltype type, a set of call expressions /// for which we have deferred checking the completeness of the return type. SmallVector<CallExpr *, 8> DelayedDecltypeCalls; /// If we are processing a decltype type, a set of temporary binding /// expressions for which we have deferred checking the destructor. SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds; llvm::SmallPtrSet<const Expr *, 8> PossibleDerefs; /// \brief Describes whether we are in an expression constext which we have /// to handle differently. enum ExpressionKind { EK_Decltype, EK_TemplateArgument, EK_Other } ExprContext; ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context, unsigned NumCleanupObjects, CleanupInfo ParentCleanup, Decl *ManglingContextDecl, ExpressionKind ExprContext) : Context(Context), ParentCleanup(ParentCleanup), NumCleanupObjects(NumCleanupObjects), NumTypos(0), ManglingContextDecl(ManglingContextDecl), MangleNumbering(), ExprContext(ExprContext) {} /// Retrieve the mangling numbering context, used to consistently /// number constructs like lambdas for mangling. MangleNumberingContext &getMangleNumberingContext(ASTContext &Ctx); bool isUnevaluated() const { return Context == ExpressionEvaluationContext::Unevaluated || Context == ExpressionEvaluationContext::UnevaluatedAbstract || Context == ExpressionEvaluationContext::UnevaluatedList; } bool isConstantEvaluated() const { return Context == ExpressionEvaluationContext::ConstantEvaluated; } }; /// A stack of expression evaluation contexts. SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts; /// Emit a warning for all pending noderef expressions that we recorded. void WarnOnPendingNoDerefs(ExpressionEvaluationContextRecord &Rec); /// Compute the mangling number context for a lambda expression or /// block literal. /// /// \param DC - The DeclContext containing the lambda expression or /// block literal. /// \param[out] ManglingContextDecl - Returns the ManglingContextDecl /// associated with the context, if relevant. MangleNumberingContext *getCurrentMangleNumberContext( const DeclContext *DC, Decl *&ManglingContextDecl); /// SpecialMemberOverloadResult - The overloading result for a special member /// function. /// /// This is basically a wrapper around PointerIntPair. The lowest bits of the /// integer are used to determine whether overload resolution succeeded. class SpecialMemberOverloadResult { public: enum Kind { NoMemberOrDeleted, Ambiguous, Success }; private: llvm::PointerIntPair<CXXMethodDecl*, 2> Pair; public: SpecialMemberOverloadResult() : Pair() {} SpecialMemberOverloadResult(CXXMethodDecl *MD) : Pair(MD, MD->isDeleted() ? NoMemberOrDeleted : Success) {} CXXMethodDecl *getMethod() const { return Pair.getPointer(); } void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); } Kind getKind() const { return static_cast<Kind>(Pair.getInt()); } void setKind(Kind K) { Pair.setInt(K); } }; class SpecialMemberOverloadResultEntry : public llvm::FastFoldingSetNode, public SpecialMemberOverloadResult { public: SpecialMemberOverloadResultEntry(const llvm::FoldingSetNodeID &ID) : FastFoldingSetNode(ID) {} }; /// A cache of special member function overload resolution results /// for C++ records. llvm::FoldingSet<SpecialMemberOverloadResultEntry> SpecialMemberCache; /// A cache of the flags available in enumerations with the flag_bits /// attribute. mutable llvm::DenseMap<const EnumDecl*, llvm::APInt> FlagBitsCache; /// The kind of translation unit we are processing. /// /// When we're processing a complete translation unit, Sema will perform /// end-of-translation-unit semantic tasks (such as creating /// initializers for tentative definitions in C) once parsing has /// completed. Modules and precompiled headers perform different kinds of /// checks. TranslationUnitKind TUKind; llvm::BumpPtrAllocator BumpAlloc; /// The number of SFINAE diagnostics that have been trapped. unsigned NumSFINAEErrors; typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>> UnparsedDefaultArgInstantiationsMap; /// A mapping from parameters with unparsed default arguments to the /// set of instantiations of each parameter. /// /// This mapping is a temporary data structure used when parsing /// nested class templates or nested classes of class templates, /// where we might end up instantiating an inner class before the /// default arguments of its methods have been parsed. UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations; // Contains the locations of the beginning of unparsed default // argument locations. llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs; /// UndefinedInternals - all the used, undefined objects which require a /// definition in this translation unit. llvm::MapVector<NamedDecl *, SourceLocation> UndefinedButUsed; /// Determine if VD, which must be a variable or function, is an external /// symbol that nonetheless can't be referenced from outside this translation /// unit because its type has no linkage and it's not extern "C". bool isExternalWithNoLinkageType(ValueDecl *VD); /// Obtain a sorted list of functions that are undefined but ODR-used. void getUndefinedButUsed( SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined); /// Retrieves list of suspicious delete-expressions that will be checked at /// the end of translation unit. const llvm::MapVector<FieldDecl *, DeleteLocs> & getMismatchingDeleteExpressions() const; typedef std::pair<ObjCMethodList, ObjCMethodList> GlobalMethods; typedef llvm::DenseMap<Selector, GlobalMethods> GlobalMethodPool; /// Method Pool - allows efficient lookup when typechecking messages to "id". /// We need to maintain a list, since selectors can have differing signatures /// across classes. In Cocoa, this happens to be extremely uncommon (only 1% /// of selectors are "overloaded"). /// At the head of the list it is recorded whether there were 0, 1, or >= 2 /// methods inside categories with a particular selector. GlobalMethodPool MethodPool; /// Method selectors used in a \@selector expression. Used for implementation /// of -Wselector. llvm::MapVector<Selector, SourceLocation> ReferencedSelectors; /// List of SourceLocations where 'self' is implicitly retained inside a /// block. llvm::SmallVector<std::pair<SourceLocation, const BlockDecl *>, 1> ImplicitlyRetainedSelfLocs; /// Kinds of C++ special members. enum CXXSpecialMember { CXXDefaultConstructor, CXXCopyConstructor, CXXMoveConstructor, CXXCopyAssignment, CXXMoveAssignment, CXXDestructor, CXXInvalid }; typedef llvm::PointerIntPair<CXXRecordDecl *, 3, CXXSpecialMember> SpecialMemberDecl; /// The C++ special members which we are currently in the process of /// declaring. If this process recursively triggers the declaration of the /// same special member, we should act as if it is not yet declared. llvm::SmallPtrSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared; /// The function definitions which were renamed as part of typo-correction /// to match their respective declarations. We want to keep track of them /// to ensure that we don't emit a "redefinition" error if we encounter a /// correctly named definition after the renamed definition. llvm::SmallPtrSet<const NamedDecl *, 4> TypoCorrectedFunctionDefinitions; /// Stack of types that correspond to the parameter entities that are /// currently being copy-initialized. Can be empty. llvm::SmallVector<QualType, 4> CurrentParameterCopyTypes; void ReadMethodPool(Selector Sel); void updateOutOfDateSelector(Selector Sel); /// Private Helper predicate to check for 'self'. bool isSelfExpr(Expr *RExpr); bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method); /// Cause the active diagnostic on the DiagosticsEngine to be /// emitted. This is closely coupled to the SemaDiagnosticBuilder class and /// should not be used elsewhere. void EmitCurrentDiagnostic(unsigned DiagID); /// Records and restores the FP_CONTRACT state on entry/exit of compound /// statements. class FPContractStateRAII { public: FPContractStateRAII(Sema &S) : S(S), OldFPFeaturesState(S.FPFeatures) {} ~FPContractStateRAII() { S.FPFeatures = OldFPFeaturesState; } private: Sema& S; FPOptions OldFPFeaturesState; }; void addImplicitTypedef(StringRef Name, QualType T); public: Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer, TranslationUnitKind TUKind = TU_Complete, CodeCompleteConsumer *CompletionConsumer = nullptr); ~Sema(); /// Perform initialization that occurs after the parser has been /// initialized but before it parses anything. void Initialize(); const LangOptions &getLangOpts() const { return LangOpts; } OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; } FPOptions &getFPOptions() { return FPFeatures; } DiagnosticsEngine &getDiagnostics() const { return Diags; } SourceManager &getSourceManager() const { return SourceMgr; } Preprocessor &getPreprocessor() const { return PP; } ASTContext &getASTContext() const { return Context; } ASTConsumer &getASTConsumer() const { return Consumer; } ASTMutationListener *getASTMutationListener() const; ExternalSemaSource* getExternalSource() const { return ExternalSource; } ///Registers an external source. If an external source already exists, /// creates a multiplex external source and appends to it. /// ///\param[in] E - A non-null external sema source. /// void addExternalSource(ExternalSemaSource *E); void PrintStats() const; /// Helper class that creates diagnostics with optional /// template instantiation stacks. /// /// This class provides a wrapper around the basic DiagnosticBuilder /// class that emits diagnostics. SemaDiagnosticBuilder is /// responsible for emitting the diagnostic (as DiagnosticBuilder /// does) and, if the diagnostic comes from inside a template /// instantiation, printing the template instantiation stack as /// well. class SemaDiagnosticBuilder : public DiagnosticBuilder { Sema &SemaRef; unsigned DiagID; public: SemaDiagnosticBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID) : DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) { } // This is a cunning lie. DiagnosticBuilder actually performs move // construction in its copy constructor (but due to varied uses, it's not // possible to conveniently express this as actual move construction). So // the default copy ctor here is fine, because the base class disables the // source anyway, so the user-defined ~SemaDiagnosticBuilder is a safe no-op // in that case anwyay. SemaDiagnosticBuilder(const SemaDiagnosticBuilder&) = default; ~SemaDiagnosticBuilder() { // If we aren't active, there is nothing to do. if (!isActive()) return; // Otherwise, we need to emit the diagnostic. First flush the underlying // DiagnosticBuilder data, and clear the diagnostic builder itself so it // won't emit the diagnostic in its own destructor. // // This seems wasteful, in that as written the DiagnosticBuilder dtor will // do its own needless checks to see if the diagnostic needs to be // emitted. However, because we take care to ensure that the builder // objects never escape, a sufficiently smart compiler will be able to // eliminate that code. FlushCounts(); Clear(); // Dispatch to Sema to emit the diagnostic. SemaRef.EmitCurrentDiagnostic(DiagID); } /// Teach operator<< to produce an object of the correct type. template<typename T> friend const SemaDiagnosticBuilder &operator<<( const SemaDiagnosticBuilder &Diag, const T &Value) { const DiagnosticBuilder &BaseDiag = Diag; BaseDiag << Value; return Diag; } }; /// Emit a diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID) { DiagnosticBuilder DB = Diags.Report(Loc, DiagID); return SemaDiagnosticBuilder(DB, *this, DiagID); } /// Emit a partial diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic& PD); /// Build a partial diagnostic. PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h bool findMacroSpelling(SourceLocation &loc, StringRef name); /// Get a string to suggest for zero-initialization of a type. std::string getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const; std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const; /// Calls \c Lexer::getLocForEndOfToken() SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0); /// Retrieve the module loader associated with the preprocessor. ModuleLoader &getModuleLoader() const; void emitAndClearUnusedLocalTypedefWarnings(); enum TUFragmentKind { /// The global module fragment, between 'module;' and a module-declaration. Global, /// A normal translation unit fragment. For a non-module unit, this is the /// entire translation unit. Otherwise, it runs from the module-declaration /// to the private-module-fragment (if any) or the end of the TU (if not). Normal, /// The private module fragment, between 'module :private;' and the end of /// the translation unit. Private }; void ActOnStartOfTranslationUnit(); void ActOnEndOfTranslationUnit(); void ActOnEndOfTranslationUnitFragment(TUFragmentKind Kind); void CheckDelegatingCtorCycles(); Scope *getScopeForContext(DeclContext *Ctx); void PushFunctionScope(); void PushBlockScope(Scope *BlockScope, BlockDecl *Block); sema::LambdaScopeInfo *PushLambdaScope(); /// This is used to inform Sema what the current TemplateParameterDepth /// is during Parsing. Currently it is used to pass on the depth /// when parsing generic lambda 'auto' parameters. void RecordParsingTemplateParameterDepth(unsigned Depth); void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD, RecordDecl *RD, CapturedRegionKind K); void PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr, const Decl *D = nullptr, const BlockExpr *blkExpr = nullptr); sema::FunctionScopeInfo *getCurFunction() const { return FunctionScopes.empty() ? nullptr : FunctionScopes.back(); } sema::FunctionScopeInfo *getEnclosingFunction() const; void setFunctionHasBranchIntoScope(); void setFunctionHasBranchProtectedScope(); void setFunctionHasIndirectGoto(); void PushCompoundScope(bool IsStmtExpr); void PopCompoundScope(); sema::CompoundScopeInfo &getCurCompoundScope() const; bool hasAnyUnrecoverableErrorsInThisFunction() const; /// Retrieve the current block, if any. sema::BlockScopeInfo *getCurBlock(); /// Retrieve the current lambda scope info, if any. /// \param IgnoreNonLambdaCapturingScope true if should find the top-most /// lambda scope info ignoring all inner capturing scopes that are not /// lambda scopes. sema::LambdaScopeInfo * getCurLambda(bool IgnoreNonLambdaCapturingScope = false); /// Retrieve the current generic lambda info, if any. sema::LambdaScopeInfo *getCurGenericLambda(); /// Retrieve the current captured region, if any. sema::CapturedRegionScopeInfo *getCurCapturedRegion(); /// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; } void ActOnComment(SourceRange Comment); //===--------------------------------------------------------------------===// // Type Analysis / Processing: SemaType.cpp. // QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs, const DeclSpec *DS = nullptr); QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA, const DeclSpec *DS = nullptr); QualType BuildPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildReferenceType(QualType T, bool LValueRef, SourceLocation Loc, DeclarationName Entity); QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM, Expr *ArraySize, unsigned Quals, SourceRange Brackets, DeclarationName Entity); QualType BuildVectorType(QualType T, Expr *VecSize, SourceLocation AttrLoc); QualType BuildExtVectorType(QualType T, Expr *ArraySize, SourceLocation AttrLoc); QualType BuildAddressSpaceAttr(QualType &T, LangAS ASIdx, Expr *AddrSpace, SourceLocation AttrLoc); /// Same as above, but constructs the AddressSpace index if not provided. QualType BuildAddressSpaceAttr(QualType &T, Expr *AddrSpace, SourceLocation AttrLoc); bool CheckFunctionReturnType(QualType T, SourceLocation Loc); /// Build a function type. /// /// This routine checks the function type according to C++ rules and /// under the assumption that the result type and parameter types have /// just been instantiated from a template. It therefore duplicates /// some of the behavior of GetTypeForDeclarator, but in a much /// simpler form that is only suitable for this narrow use case. /// /// \param T The return type of the function. /// /// \param ParamTypes The parameter types of the function. This array /// will be modified to account for adjustments to the types of the /// function parameters. /// /// \param Loc The location of the entity whose type involves this /// function type or, if there is no such entity, the location of the /// type that will have function type. /// /// \param Entity The name of the entity that involves the function /// type, if known. /// /// \param EPI Extra information about the function type. Usually this will /// be taken from an existing function with the same prototype. /// /// \returns A suitable function type, if there are no errors. The /// unqualified type will always be a FunctionProtoType. /// Otherwise, returns a NULL type. QualType BuildFunctionType(QualType T, MutableArrayRef<QualType> ParamTypes, SourceLocation Loc, DeclarationName Entity, const FunctionProtoType::ExtProtoInfo &EPI); QualType BuildMemberPointerType(QualType T, QualType Class, SourceLocation Loc, DeclarationName Entity); QualType BuildBlockPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildParenType(QualType T); QualType BuildAtomicType(QualType T, SourceLocation Loc); QualType BuildReadPipeType(QualType T, SourceLocation Loc); QualType BuildWritePipeType(QualType T, SourceLocation Loc); TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S); TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy); /// Package the given type and TSI into a ParsedType. ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo); DeclarationNameInfo GetNameForDeclarator(Declarator &D); DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name); static QualType GetTypeFromParser(ParsedType Ty, TypeSourceInfo **TInfo = nullptr); CanThrowResult canThrow(const Expr *E); const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc, const FunctionProtoType *FPT); void UpdateExceptionSpec(FunctionDecl *FD, const FunctionProtoType::ExceptionSpecInfo &ESI); bool CheckSpecifiedExceptionType(QualType &T, SourceRange Range); bool CheckDistantExceptionSpec(QualType T); bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New); bool CheckEquivalentExceptionSpec( const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool CheckEquivalentExceptionSpec( const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID, const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool handlerCanCatch(QualType HandlerType, QualType ExceptionType); bool CheckExceptionSpecSubset(const PartialDiagnostic &DiagID, const PartialDiagnostic &NestedDiagID, const PartialDiagnostic &NoteID, const FunctionProtoType *Superset, SourceLocation SuperLoc, const FunctionProtoType *Subset, SourceLocation SubLoc); bool CheckParamExceptionSpec(const PartialDiagnostic &NestedDiagID, const PartialDiagnostic &NoteID, const FunctionProtoType *Target, SourceLocation TargetLoc, const FunctionProtoType *Source, SourceLocation SourceLoc); TypeResult ActOnTypeName(Scope *S, Declarator &D); /// The parser has parsed the context-sensitive type 'instancetype' /// in an Objective-C message declaration. Return the appropriate type. ParsedType ActOnObjCInstanceType(SourceLocation Loc); /// Abstract class used to diagnose incomplete types. struct TypeDiagnoser { TypeDiagnoser() {} virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0; virtual ~TypeDiagnoser() {} }; static int getPrintable(int I) { return I; } static unsigned getPrintable(unsigned I) { return I; } static bool getPrintable(bool B) { return B; } static const char * getPrintable(const char *S) { return S; } static StringRef getPrintable(StringRef S) { return S; } static const std::string &getPrintable(const std::string &S) { return S; } static const IdentifierInfo *getPrintable(const IdentifierInfo *II) { return II; } static DeclarationName getPrintable(DeclarationName N) { return N; } static QualType getPrintable(QualType T) { return T; } static SourceRange getPrintable(SourceRange R) { return R; } static SourceRange getPrintable(SourceLocation L) { return L; } static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); } static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();} template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser { unsigned DiagID; std::tuple<const Ts &...> Args; template <std::size_t... Is> void emit(const SemaDiagnosticBuilder &DB, llvm::index_sequence<Is...>) const { // Apply all tuple elements to the builder in order. bool Dummy[] = {false, (DB << getPrintable(std::get<Is>(Args)))...}; (void)Dummy; } public: BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args) : TypeDiagnoser(), DiagID(DiagID), Args(Args...) { assert(DiagID != 0 && "no diagnostic for type diagnoser"); } void diagnose(Sema &S, SourceLocation Loc, QualType T) override { const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID); emit(DB, llvm::index_sequence_for<Ts...>()); DB << T; } }; private: /// Methods for marking which expressions involve dereferencing a pointer /// marked with the 'noderef' attribute. Expressions are checked bottom up as /// they are parsed, meaning that a noderef pointer may not be accessed. For /// example, in `&*p` where `p` is a noderef pointer, we will first parse the /// `*p`, but need to check that `address of` is called on it. This requires /// keeping a container of all pending expressions and checking if the address /// of them are eventually taken. void CheckSubscriptAccessOfNoDeref(const ArraySubscriptExpr *E); void CheckAddressOfNoDeref(const Expr *E); void CheckMemberAccessOfNoDeref(const MemberExpr *E); bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T, TypeDiagnoser *Diagnoser); struct ModuleScope { SourceLocation BeginLoc; clang::Module *Module = nullptr; bool ModuleInterface = false; bool ImplicitGlobalModuleFragment = false; VisibleModuleSet OuterVisibleModules; }; /// The modules we're currently parsing. llvm::SmallVector<ModuleScope, 16> ModuleScopes; /// Namespace definitions that we will export when they finish. llvm::SmallPtrSet<const NamespaceDecl*, 8> DeferredExportedNamespaces; /// Get the module whose scope we are currently within. Module *getCurrentModule() const { return ModuleScopes.empty() ? nullptr : ModuleScopes.back().Module; } VisibleModuleSet VisibleModules; public: /// Get the module owning an entity. Module *getOwningModule(Decl *Entity) { return Entity->getOwningModule(); } /// Make a merged definition of an existing hidden definition \p ND /// visible at the specified location. void makeMergedDefinitionVisible(NamedDecl *ND); bool isModuleVisible(const Module *M, bool ModulePrivate = false); /// Determine whether a declaration is visible to name lookup. bool isVisible(const NamedDecl *D) { return !D->isHidden() || isVisibleSlow(D); } /// Determine whether any declaration of an entity is visible. bool hasVisibleDeclaration(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr) { return isVisible(D) || hasVisibleDeclarationSlow(D, Modules); } bool hasVisibleDeclarationSlow(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules); bool hasVisibleMergedDefinition(NamedDecl *Def); bool hasMergedDefinitionInCurrentModule(NamedDecl *Def); /// Determine if \p D and \p Suggested have a structurally compatible /// layout as described in C11 6.2.7/1. bool hasStructuralCompatLayout(Decl *D, Decl *Suggested); /// Determine if \p D has a visible definition. If not, suggest a declaration /// that should be made visible to expose the definition. bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested, bool OnlyNeedComplete = false); bool hasVisibleDefinition(const NamedDecl *D) { NamedDecl *Hidden; return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden); } /// Determine if the template parameter \p D has a visible default argument. bool hasVisibleDefaultArgument(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if there is a visible declaration of \p D that is an explicit /// specialization declaration for a specialization of a template. (For a /// member specialization, use hasVisibleMemberSpecialization.) bool hasVisibleExplicitSpecialization( const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if there is a visible declaration of \p D that is a member /// specialization declaration (as opposed to an instantiated declaration). bool hasVisibleMemberSpecialization( const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if \p A and \p B are equivalent internal linkage declarations /// from different modules, and thus an ambiguity error can be downgraded to /// an extension warning. bool isEquivalentInternalLinkageDeclaration(const NamedDecl *A, const NamedDecl *B); void diagnoseEquivalentInternalLinkageDeclarations( SourceLocation Loc, const NamedDecl *D, ArrayRef<const NamedDecl *> Equiv); bool isUsualDeallocationFunction(const CXXMethodDecl *FD); bool isCompleteType(SourceLocation Loc, QualType T) { return !RequireCompleteTypeImpl(Loc, T, nullptr); } bool RequireCompleteType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID); template <typename... Ts> bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteType(Loc, T, Diagnoser); } void completeExprArrayBound(Expr *E); bool RequireCompleteExprType(Expr *E, TypeDiagnoser &Diagnoser); bool RequireCompleteExprType(Expr *E, unsigned DiagID); template <typename... Ts> bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteExprType(E, Diagnoser); } bool RequireLiteralType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID); template <typename... Ts> bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireLiteralType(Loc, T, Diagnoser); } QualType getElaboratedType(ElaboratedTypeKeyword Keyword, const CXXScopeSpec &SS, QualType T, TagDecl *OwnedTagDecl = nullptr); QualType BuildTypeofExprType(Expr *E, SourceLocation Loc); /// If AsUnevaluated is false, E is treated as though it were an evaluated /// context, such as when building a type for decltype(auto). QualType BuildDecltypeType(Expr *E, SourceLocation Loc, bool AsUnevaluated = true); QualType BuildUnaryTransformType(QualType BaseType, UnaryTransformType::UTTKind UKind, SourceLocation Loc); //===--------------------------------------------------------------------===// // Symbol table / Decl tracking callbacks: SemaDecl.cpp. // struct SkipBodyInfo { SkipBodyInfo() : ShouldSkip(false), CheckSameAsPrevious(false), Previous(nullptr), New(nullptr) {} bool ShouldSkip; bool CheckSameAsPrevious; NamedDecl *Previous; NamedDecl *New; }; DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr); void DiagnoseUseOfUnimplementedSelectors(); bool isSimpleTypeSpecifier(tok::TokenKind Kind) const; ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec *SS = nullptr, bool isClassName = false, bool HasTrailingDot = false, ParsedType ObjectType = nullptr, bool IsCtorOrDtorName = false, bool WantNontrivialTypeSourceInfo = false, bool IsClassTemplateDeductionContext = true, IdentifierInfo **CorrectedII = nullptr); TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S); bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S); void DiagnoseUnknownTypeName(IdentifierInfo *&II, SourceLocation IILoc, Scope *S, CXXScopeSpec *SS, ParsedType &SuggestedType, bool IsTemplateName = false); /// Attempt to behave like MSVC in situations where lookup of an unqualified /// type name has failed in a dependent context. In these situations, we /// automatically form a DependentTypeName that will retry lookup in a related /// scope during instantiation. ParsedType ActOnMSVCUnknownTypeName(const IdentifierInfo &II, SourceLocation NameLoc, bool IsTemplateTypeArg); /// Describes the result of the name lookup and resolution performed /// by \c ClassifyName(). enum NameClassificationKind { NC_Unknown, NC_Error, NC_Keyword, NC_Type, NC_Expression, NC_NestedNameSpecifier, NC_TypeTemplate, NC_VarTemplate, NC_FunctionTemplate, NC_UndeclaredTemplate, }; class NameClassification { NameClassificationKind Kind; ExprResult Expr; TemplateName Template; ParsedType Type; explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {} public: NameClassification(ExprResult Expr) : Kind(NC_Expression), Expr(Expr) {} NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {} NameClassification(const IdentifierInfo *Keyword) : Kind(NC_Keyword) {} static NameClassification Error() { return NameClassification(NC_Error); } static NameClassification Unknown() { return NameClassification(NC_Unknown); } static NameClassification NestedNameSpecifier() { return NameClassification(NC_NestedNameSpecifier); } static NameClassification TypeTemplate(TemplateName Name) { NameClassification Result(NC_TypeTemplate); Result.Template = Name; return Result; } static NameClassification VarTemplate(TemplateName Name) { NameClassification Result(NC_VarTemplate); Result.Template = Name; return Result; } static NameClassification FunctionTemplate(TemplateName Name) { NameClassification Result(NC_FunctionTemplate); Result.Template = Name; return Result; } static NameClassification UndeclaredTemplate(TemplateName Name) { NameClassification Result(NC_UndeclaredTemplate); Result.Template = Name; return Result; } NameClassificationKind getKind() const { return Kind; } ParsedType getType() const { assert(Kind == NC_Type); return Type; } ExprResult getExpression() const { assert(Kind == NC_Expression); return Expr; } TemplateName getTemplateName() const { assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate || Kind == NC_VarTemplate || Kind == NC_UndeclaredTemplate); return Template; } TemplateNameKind getTemplateNameKind() const { switch (Kind) { case NC_TypeTemplate: return TNK_Type_template; case NC_FunctionTemplate: return TNK_Function_template; case NC_VarTemplate: return TNK_Var_template; case NC_UndeclaredTemplate: return TNK_Undeclared_template; default: llvm_unreachable("unsupported name classification."); } } }; /// Perform name lookup on the given name, classifying it based on /// the results of name lookup and the following token. /// /// This routine is used by the parser to resolve identifiers and help direct /// parsing. When the identifier cannot be found, this routine will attempt /// to correct the typo and classify based on the resulting name. /// /// \param S The scope in which we're performing name lookup. /// /// \param SS The nested-name-specifier that precedes the name. /// /// \param Name The identifier. If typo correction finds an alternative name, /// this pointer parameter will be updated accordingly. /// /// \param NameLoc The location of the identifier. /// /// \param NextToken The token following the identifier. Used to help /// disambiguate the name. /// /// \param IsAddressOfOperand True if this name is the operand of a unary /// address of ('&') expression, assuming it is classified as an /// expression. /// /// \param CCC The correction callback, if typo correction is desired. NameClassification ClassifyName(Scope *S, CXXScopeSpec &SS, IdentifierInfo *&Name, SourceLocation NameLoc, const Token &NextToken, bool IsAddressOfOperand, CorrectionCandidateCallback *CCC = nullptr); /// Describes the detailed kind of a template name. Used in diagnostics. enum class TemplateNameKindForDiagnostics { ClassTemplate, FunctionTemplate, VarTemplate, AliasTemplate, TemplateTemplateParam, DependentTemplate }; TemplateNameKindForDiagnostics getTemplateNameKindForDiagnostics(TemplateName Name); /// Determine whether it's plausible that E was intended to be a /// template-name. bool mightBeIntendedToBeTemplateName(ExprResult E, bool &Dependent) { if (!getLangOpts().CPlusPlus || E.isInvalid()) return false; Dependent = false; if (auto *DRE = dyn_cast<DeclRefExpr>(E.get())) return !DRE->hasExplicitTemplateArgs(); if (auto *ME = dyn_cast<MemberExpr>(E.get())) return !ME->hasExplicitTemplateArgs(); Dependent = true; if (auto *DSDRE = dyn_cast<DependentScopeDeclRefExpr>(E.get())) return !DSDRE->hasExplicitTemplateArgs(); if (auto *DSME = dyn_cast<CXXDependentScopeMemberExpr>(E.get())) return !DSME->hasExplicitTemplateArgs(); // Any additional cases recognized here should also be handled by // diagnoseExprIntendedAsTemplateName. return false; } void diagnoseExprIntendedAsTemplateName(Scope *S, ExprResult TemplateName, SourceLocation Less, SourceLocation Greater); Decl *ActOnDeclarator(Scope *S, Declarator &D); NamedDecl *HandleDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParameterLists); void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S); bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info); bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC, DeclarationName Name, SourceLocation Loc, bool IsTemplateId); void diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals, SourceLocation FallbackLoc, SourceLocation ConstQualLoc = SourceLocation(), SourceLocation VolatileQualLoc = SourceLocation(), SourceLocation RestrictQualLoc = SourceLocation(), SourceLocation AtomicQualLoc = SourceLocation(), SourceLocation UnalignedQualLoc = SourceLocation()); static bool adjustContextForLocalExternDecl(DeclContext *&DC); void DiagnoseFunctionSpecifiers(const DeclSpec &DS); NamedDecl *getShadowedDeclaration(const TypedefNameDecl *D, const LookupResult &R); NamedDecl *getShadowedDeclaration(const VarDecl *D, const LookupResult &R); void CheckShadow(NamedDecl *D, NamedDecl *ShadowedDecl, const LookupResult &R); void CheckShadow(Scope *S, VarDecl *D); /// Warn if 'E', which is an expression that is about to be modified, refers /// to a shadowing declaration. void CheckShadowingDeclModification(Expr *E, SourceLocation Loc); void DiagnoseShadowingLambdaDecls(const sema::LambdaScopeInfo *LSI); private: /// Map of current shadowing declarations to shadowed declarations. Warn if /// it looks like the user is trying to modify the shadowing declaration. llvm::DenseMap<const NamedDecl *, const NamedDecl *> ShadowingDecls; public: void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange); void handleTagNumbering(const TagDecl *Tag, Scope *TagScope); void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec, TypedefNameDecl *NewTD); void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D); NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous); NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D, LookupResult &Previous, bool &Redeclaration); NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope, ArrayRef<BindingDecl *> Bindings = None); NamedDecl * ActOnDecompositionDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParamLists); // Returns true if the variable declaration is a redeclaration bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous); void CheckVariableDeclarationType(VarDecl *NewVD); bool DeduceVariableDeclarationType(VarDecl *VDecl, bool DirectInit, Expr *Init); void CheckCompleteVariableDeclaration(VarDecl *VD); void CheckCompleteDecompositionDeclaration(DecompositionDecl *DD); void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D); NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope); bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD); bool CheckConstexprFunctionDecl(const FunctionDecl *FD); bool CheckConstexprFunctionBody(const FunctionDecl *FD, Stmt *Body); void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD); void FindHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); void NoteHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); // Returns true if the function declaration is a redeclaration bool CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD, LookupResult &Previous, bool IsMemberSpecialization); bool shouldLinkDependentDeclWithPrevious(Decl *D, Decl *OldDecl); bool canFullyTypeCheckRedeclaration(ValueDecl *NewD, ValueDecl *OldD, QualType NewT, QualType OldT); void CheckMain(FunctionDecl *FD, const DeclSpec &D); void CheckMSVCRTEntryPoint(FunctionDecl *FD); Attr *getImplicitCodeSegOrSectionAttrForFunction(const FunctionDecl *FD, bool IsDefinition); Decl *ActOnParamDeclarator(Scope *S, Declarator &D); ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC, SourceLocation Loc, QualType T); ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc, SourceLocation NameLoc, IdentifierInfo *Name, QualType T, TypeSourceInfo *TSInfo, StorageClass SC); void ActOnParamDefaultArgument(Decl *param, SourceLocation EqualLoc, Expr *defarg); void ActOnParamUnparsedDefaultArgument(Decl *param, SourceLocation EqualLoc, SourceLocation ArgLoc); void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc); bool SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg, SourceLocation EqualLoc); void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit); void ActOnUninitializedDecl(Decl *dcl); void ActOnInitializerError(Decl *Dcl); void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc); void ActOnCXXForRangeDecl(Decl *D); StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc, IdentifierInfo *Ident, ParsedAttributes &Attrs, SourceLocation AttrEnd); void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc); void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc); void CheckStaticLocalForDllExport(VarDecl *VD); void FinalizeDeclaration(Decl *D); DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS, ArrayRef<Decl *> Group); DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group); /// Should be called on all declarations that might have attached /// documentation comments. void ActOnDocumentableDecl(Decl *D); void ActOnDocumentableDecls(ArrayRef<Decl *> Group); void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D, SourceLocation LocAfterDecls); void CheckForFunctionRedefinition( FunctionDecl *FD, const FunctionDecl *EffectiveDefinition = nullptr, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParamLists, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D, SkipBodyInfo *SkipBody = nullptr); void ActOnStartOfObjCMethodDef(Scope *S, Decl *D); bool isObjCMethodDecl(Decl *D) { return D && isa<ObjCMethodDecl>(D); } /// Determine whether we can delay parsing the body of a function or /// function template until it is used, assuming we don't care about emitting /// code for that function. /// /// This will be \c false if we may need the body of the function in the /// middle of parsing an expression (where it's impractical to switch to /// parsing a different function), for instance, if it's constexpr in C++11 /// or has an 'auto' return type in C++14. These cases are essentially bugs. bool canDelayFunctionBody(const Declarator &D); /// Determine whether we can skip parsing the body of a function /// definition, assuming we don't care about analyzing its body or emitting /// code for that function. /// /// This will be \c false only if we may need the body of the function in /// order to parse the rest of the program (for instance, if it is /// \c constexpr in C++11 or has an 'auto' return type in C++14). bool canSkipFunctionBody(Decl *D); void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation); Decl *ActOnSkippedFunctionBody(Decl *Decl); void ActOnFinishInlineFunctionDef(FunctionDecl *D); /// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an /// attribute for which parsing is delayed. void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs); /// Diagnose any unused parameters in the given sequence of /// ParmVarDecl pointers. void DiagnoseUnusedParameters(ArrayRef<ParmVarDecl *> Parameters); /// Diagnose whether the size of parameters or return value of a /// function or obj-c method definition is pass-by-value and larger than a /// specified threshold. void DiagnoseSizeOfParametersAndReturnValue(ArrayRef<ParmVarDecl *> Parameters, QualType ReturnTy, NamedDecl *D); void DiagnoseInvalidJumps(Stmt *Body); Decl *ActOnFileScopeAsmDecl(Expr *expr, SourceLocation AsmLoc, SourceLocation RParenLoc); /// Handle a C++11 empty-declaration and attribute-declaration. Decl *ActOnEmptyDeclaration(Scope *S, const ParsedAttributesView &AttrList, SourceLocation SemiLoc); enum class ModuleDeclKind { Interface, ///< 'export module X;' Implementation, ///< 'module X;' }; /// The parser has processed a module-declaration that begins the definition /// of a module interface or implementation. DeclGroupPtrTy ActOnModuleDecl(SourceLocation StartLoc, SourceLocation ModuleLoc, ModuleDeclKind MDK, ModuleIdPath Path, bool IsFirstDecl); /// The parser has processed a global-module-fragment declaration that begins /// the definition of the global module fragment of the current module unit. /// \param ModuleLoc The location of the 'module' keyword. DeclGroupPtrTy ActOnGlobalModuleFragmentDecl(SourceLocation ModuleLoc); /// The parser has processed a private-module-fragment declaration that begins /// the definition of the private module fragment of the current module unit. /// \param ModuleLoc The location of the 'module' keyword. /// \param PrivateLoc The location of the 'private' keyword. DeclGroupPtrTy ActOnPrivateModuleFragmentDecl(SourceLocation ModuleLoc, SourceLocation PrivateLoc); /// The parser has processed a module import declaration. /// /// \param StartLoc The location of the first token in the declaration. This /// could be the location of an '@', 'export', or 'import'. /// \param ExportLoc The location of the 'export' keyword, if any. /// \param ImportLoc The location of the 'import' keyword. /// \param Path The module access path. DeclResult ActOnModuleImport(SourceLocation StartLoc, SourceLocation ExportLoc, SourceLocation ImportLoc, ModuleIdPath Path); DeclResult ActOnModuleImport(SourceLocation StartLoc, SourceLocation ExportLoc, SourceLocation ImportLoc, Module *M, ModuleIdPath Path = {}); /// The parser has processed a module import translated from a /// #include or similar preprocessing directive. void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod); void BuildModuleInclude(SourceLocation DirectiveLoc, Module *Mod); /// The parsed has entered a submodule. void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod); /// The parser has left a submodule. void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod); /// Create an implicit import of the given module at the given /// source location, for error recovery, if possible. /// /// This routine is typically used when an entity found by name lookup /// is actually hidden within a module that we know about but the user /// has forgotten to import. void createImplicitModuleImportForErrorRecovery(SourceLocation Loc, Module *Mod); /// Kinds of missing import. Note, the values of these enumerators correspond /// to %select values in diagnostics. enum class MissingImportKind { Declaration, Definition, DefaultArgument, ExplicitSpecialization, PartialSpecialization }; /// Diagnose that the specified declaration needs to be visible but /// isn't, and suggest a module import that would resolve the problem. void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, MissingImportKind MIK, bool Recover = true); void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, SourceLocation DeclLoc, ArrayRef<Module *> Modules, MissingImportKind MIK, bool Recover); Decl *ActOnStartExportDecl(Scope *S, SourceLocation ExportLoc, SourceLocation LBraceLoc); Decl *ActOnFinishExportDecl(Scope *S, Decl *ExportDecl, SourceLocation RBraceLoc); /// We've found a use of a templated declaration that would trigger an /// implicit instantiation. Check that any relevant explicit specializations /// and partial specializations are visible, and diagnose if not. void checkSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec); /// We've found a use of a template specialization that would select a /// partial specialization. Check that the partial specialization is visible, /// and diagnose if not. void checkPartialSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec); /// Retrieve a suitable printing policy for diagnostics. PrintingPolicy getPrintingPolicy() const { return getPrintingPolicy(Context, PP); } /// Retrieve a suitable printing policy for diagnostics. static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx, const Preprocessor &PP); /// Scope actions. void ActOnPopScope(SourceLocation Loc, Scope *S); void ActOnTranslationUnitScope(Scope *S); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, RecordDecl *&AnonRecord); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, MultiTemplateParamsArg TemplateParams, bool IsExplicitInstantiation, RecordDecl *&AnonRecord); Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS, AccessSpecifier AS, RecordDecl *Record, const PrintingPolicy &Policy); Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS, RecordDecl *Record); /// Common ways to introduce type names without a tag for use in diagnostics. /// Keep in sync with err_tag_reference_non_tag. enum NonTagKind { NTK_NonStruct, NTK_NonClass, NTK_NonUnion, NTK_NonEnum, NTK_Typedef, NTK_TypeAlias, NTK_Template, NTK_TypeAliasTemplate, NTK_TemplateTemplateArgument, }; /// Given a non-tag type declaration, returns an enum useful for indicating /// what kind of non-tag type this is. NonTagKind getNonTagTypeDeclKind(const Decl *D, TagTypeKind TTK); bool isAcceptableTagRedeclaration(const TagDecl *Previous, TagTypeKind NewTag, bool isDefinition, SourceLocation NewTagLoc, const IdentifierInfo *Name); enum TagUseKind { TUK_Reference, // Reference to a tag: 'struct foo *X;' TUK_Declaration, // Fwd decl of a tag: 'struct foo;' TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;' TUK_Friend // Friend declaration: 'friend struct foo;' }; Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, AccessSpecifier AS, SourceLocation ModulePrivateLoc, MultiTemplateParamsArg TemplateParameterLists, bool &OwnedDecl, bool &IsDependent, SourceLocation ScopedEnumKWLoc, bool ScopedEnumUsesClassTag, TypeResult UnderlyingType, bool IsTypeSpecifier, bool IsTemplateParamOrArg, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc, unsigned TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, MultiTemplateParamsArg TempParamLists); TypeResult ActOnDependentTag(Scope *S, unsigned TagSpec, TagUseKind TUK, const CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation TagLoc, SourceLocation NameLoc); void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart, IdentifierInfo *ClassName, SmallVectorImpl<Decl *> &Decls); Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth); FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS); MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS, const ParsedAttr &MSPropertyAttr); FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T, TypeSourceInfo *TInfo, RecordDecl *Record, SourceLocation Loc, bool Mutable, Expr *BitfieldWidth, InClassInitStyle InitStyle, SourceLocation TSSL, AccessSpecifier AS, NamedDecl *PrevDecl, Declarator *D = nullptr); bool CheckNontrivialField(FieldDecl *FD); void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM); enum TrivialABIHandling { /// The triviality of a method unaffected by "trivial_abi". TAH_IgnoreTrivialABI, /// The triviality of a method affected by "trivial_abi". TAH_ConsiderTrivialABI }; bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM, TrivialABIHandling TAH = TAH_IgnoreTrivialABI, bool Diagnose = false); CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD); void ActOnLastBitfield(SourceLocation DeclStart, SmallVectorImpl<Decl *> &AllIvarDecls); Decl *ActOnIvar(Scope *S, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, tok::ObjCKeywordKind visibility); // This is used for both record definitions and ObjC interface declarations. void ActOnFields(Scope *S, SourceLocation RecLoc, Decl *TagDecl, ArrayRef<Decl *> Fields, SourceLocation LBrac, SourceLocation RBrac, const ParsedAttributesView &AttrList); /// ActOnTagStartDefinition - Invoked when we have entered the /// scope of a tag's definition (e.g., for an enumeration, class, /// struct, or union). void ActOnTagStartDefinition(Scope *S, Decl *TagDecl); /// Perform ODR-like check for C/ObjC when merging tag types from modules. /// Differently from C++, actually parse the body and reject / error out /// in case of a structural mismatch. bool ActOnDuplicateDefinition(DeclSpec &DS, Decl *Prev, SkipBodyInfo &SkipBody); typedef void *SkippedDefinitionContext; /// Invoked when we enter a tag definition that we're skipping. SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD); Decl *ActOnObjCContainerStartDefinition(Decl *IDecl); /// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a /// C++ record definition's base-specifiers clause and are starting its /// member declarations. void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl, SourceLocation FinalLoc, bool IsFinalSpelledSealed, SourceLocation LBraceLoc); /// ActOnTagFinishDefinition - Invoked once we have finished parsing /// the definition of a tag (enumeration, class, struct, or union). void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl, SourceRange BraceRange); void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context); void ActOnObjCContainerFinishDefinition(); /// Invoked when we must temporarily exit the objective-c container /// scope for parsing/looking-up C constructs. /// /// Must be followed by a call to \see ActOnObjCReenterContainerContext void ActOnObjCTemporaryExitContainerContext(DeclContext *DC); void ActOnObjCReenterContainerContext(DeclContext *DC); /// ActOnTagDefinitionError - Invoked when there was an unrecoverable /// error parsing the definition of a tag. void ActOnTagDefinitionError(Scope *S, Decl *TagDecl); EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum, EnumConstantDecl *LastEnumConst, SourceLocation IdLoc, IdentifierInfo *Id, Expr *val); bool CheckEnumUnderlyingType(TypeSourceInfo *TI); bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped, QualType EnumUnderlyingTy, bool IsFixed, const EnumDecl *Prev); /// Determine whether the body of an anonymous enumeration should be skipped. /// \param II The name of the first enumerator. SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II, SourceLocation IILoc); Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant, SourceLocation IdLoc, IdentifierInfo *Id, const ParsedAttributesView &Attrs, SourceLocation EqualLoc, Expr *Val); void ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange, Decl *EnumDecl, ArrayRef<Decl *> Elements, Scope *S, const ParsedAttributesView &Attr); DeclContext *getContainingDC(DeclContext *DC); /// Set the current declaration context until it gets popped. void PushDeclContext(Scope *S, DeclContext *DC); void PopDeclContext(); /// EnterDeclaratorContext - Used when we must lookup names in the context /// of a declarator's nested name specifier. void EnterDeclaratorContext(Scope *S, DeclContext *DC); void ExitDeclaratorContext(Scope *S); /// Push the parameters of D, which must be a function, into scope. void ActOnReenterFunctionContext(Scope* S, Decl* D); void ActOnExitFunctionContext(); DeclContext *getFunctionLevelDeclContext(); /// getCurFunctionDecl - If inside of a function body, this returns a pointer /// to the function decl for the function being parsed. If we're currently /// in a 'block', this returns the containing context. FunctionDecl *getCurFunctionDecl(); /// getCurMethodDecl - If inside of a method body, this returns a pointer to /// the method decl for the method being parsed. If we're currently /// in a 'block', this returns the containing context. ObjCMethodDecl *getCurMethodDecl(); /// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method /// or C function we're in, otherwise return null. If we're currently /// in a 'block', this returns the containing context. NamedDecl *getCurFunctionOrMethodDecl(); /// Add this decl to the scope shadowed decl chains. void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true); /// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true /// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns /// true if 'D' belongs to the given declaration context. /// /// \param AllowInlineNamespace If \c true, allow the declaration to be in the /// enclosing namespace set of the context, rather than contained /// directly within it. bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr, bool AllowInlineNamespace = false); /// Finds the scope corresponding to the given decl context, if it /// happens to be an enclosing scope. Otherwise return NULL. static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC); /// Subroutines of ActOnDeclarator(). TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T, TypeSourceInfo *TInfo); bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New); /// Describes the kind of merge to perform for availability /// attributes (including "deprecated", "unavailable", and "availability"). enum AvailabilityMergeKind { /// Don't merge availability attributes at all. AMK_None, /// Merge availability attributes for a redeclaration, which requires /// an exact match. AMK_Redeclaration, /// Merge availability attributes for an override, which requires /// an exact match or a weakening of constraints. AMK_Override, /// Merge availability attributes for an implementation of /// a protocol requirement. AMK_ProtocolImplementation, }; /// Describes the kind of priority given to an availability attribute. /// /// The sum of priorities deteremines the final priority of the attribute. /// The final priority determines how the attribute will be merged. /// An attribute with a lower priority will always remove higher priority /// attributes for the specified platform when it is being applied. An /// attribute with a higher priority will not be applied if the declaration /// already has an availability attribute with a lower priority for the /// specified platform. The final prirority values are not expected to match /// the values in this enumeration, but instead should be treated as a plain /// integer value. This enumeration just names the priority weights that are /// used to calculate that final vaue. enum AvailabilityPriority : int { /// The availability attribute was specified explicitly next to the /// declaration. AP_Explicit = 0, /// The availability attribute was applied using '#pragma clang attribute'. AP_PragmaClangAttribute = 1, /// The availability attribute for a specific platform was inferred from /// an availability attribute for another platform. AP_InferredFromOtherPlatform = 2 }; /// Attribute merging methods. Return true if a new attribute was added. AvailabilityAttr *mergeAvailabilityAttr( NamedDecl *D, SourceRange Range, IdentifierInfo *Platform, bool Implicit, VersionTuple Introduced, VersionTuple Deprecated, VersionTuple Obsoleted, bool IsUnavailable, StringRef Message, bool IsStrict, StringRef Replacement, AvailabilityMergeKind AMK, int Priority, unsigned AttrSpellingListIndex); TypeVisibilityAttr *mergeTypeVisibilityAttr(Decl *D, SourceRange Range, TypeVisibilityAttr::VisibilityType Vis, unsigned AttrSpellingListIndex); VisibilityAttr *mergeVisibilityAttr(Decl *D, SourceRange Range, VisibilityAttr::VisibilityType Vis, unsigned AttrSpellingListIndex); UuidAttr *mergeUuidAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex, StringRef Uuid); DLLImportAttr *mergeDLLImportAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); DLLExportAttr *mergeDLLExportAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); MSInheritanceAttr * mergeMSInheritanceAttr(Decl *D, SourceRange Range, bool BestCase, unsigned AttrSpellingListIndex, MSInheritanceAttr::Spelling SemanticSpelling); FormatAttr *mergeFormatAttr(Decl *D, SourceRange Range, IdentifierInfo *Format, int FormatIdx, int FirstArg, unsigned AttrSpellingListIndex); SectionAttr *mergeSectionAttr(Decl *D, SourceRange Range, StringRef Name, unsigned AttrSpellingListIndex); CodeSegAttr *mergeCodeSegAttr(Decl *D, SourceRange Range, StringRef Name, unsigned AttrSpellingListIndex); AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D, SourceRange Range, IdentifierInfo *Ident, unsigned AttrSpellingListIndex); MinSizeAttr *mergeMinSizeAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); NoSpeculativeLoadHardeningAttr * mergeNoSpeculativeLoadHardeningAttr(Decl *D, const NoSpeculativeLoadHardeningAttr &AL); SpeculativeLoadHardeningAttr * mergeSpeculativeLoadHardeningAttr(Decl *D, const SpeculativeLoadHardeningAttr &AL); OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const ParsedAttr &AL); InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const InternalLinkageAttr &AL); CommonAttr *mergeCommonAttr(Decl *D, const ParsedAttr &AL); CommonAttr *mergeCommonAttr(Decl *D, const CommonAttr &AL); void mergeDeclAttributes(NamedDecl *New, Decl *Old, AvailabilityMergeKind AMK = AMK_Redeclaration); void MergeTypedefNameDecl(Scope *S, TypedefNameDecl *New, LookupResult &OldDecls); bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S, bool MergeTypeWithOld); bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old, Scope *S, bool MergeTypeWithOld); void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old); void MergeVarDecl(VarDecl *New, LookupResult &Previous); void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld); void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old); bool checkVarDeclRedefinition(VarDecl *OldDefn, VarDecl *NewDefn); void notePreviousDefinition(const NamedDecl *Old, SourceLocation New); bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S); // AssignmentAction - This is used by all the assignment diagnostic functions // to represent what is actually causing the operation enum AssignmentAction { AA_Assigning, AA_Passing, AA_Returning, AA_Converting, AA_Initializing, AA_Sending, AA_Casting, AA_Passing_CFAudited }; /// C++ Overloading. enum OverloadKind { /// This is a legitimate overload: the existing declarations are /// functions or function templates with different signatures. Ovl_Overload, /// This is not an overload because the signature exactly matches /// an existing declaration. Ovl_Match, /// This is not an overload because the lookup results contain a /// non-function. Ovl_NonFunction }; OverloadKind CheckOverload(Scope *S, FunctionDecl *New, const LookupResult &OldDecls, NamedDecl *&OldDecl, bool IsForUsingDecl); bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl, bool ConsiderCudaAttrs = true); ImplicitConversionSequence TryImplicitConversion(Expr *From, QualType ToType, bool SuppressUserConversions, bool AllowExplicit, bool InOverloadResolution, bool CStyle, bool AllowObjCWritebackConversion); bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType); bool IsFloatingPointPromotion(QualType FromType, QualType ToType); bool IsComplexPromotion(QualType FromType, QualType ToType); bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCWritebackConversion(QualType FromType, QualType ToType, QualType &ConvertedType); bool IsBlockPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType); bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType, const FunctionProtoType *NewType, unsigned *ArgPos = nullptr); void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag, QualType FromType, QualType ToType); void maybeExtendBlockObject(ExprResult &E); CastKind PrepareCastToObjCObjectPointer(ExprResult &E); bool CheckPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath& BasePath, bool IgnoreBaseAccess, bool Diagnose = true); bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType &ConvertedType); bool CheckMemberPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath &BasePath, bool IgnoreBaseAccess); bool IsQualificationConversion(QualType FromType, QualType ToType, bool CStyle, bool &ObjCLifetimeConversion); bool IsFunctionConversion(QualType FromType, QualType ToType, QualType &ResultTy); bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType); bool isSameOrCompatibleFunctionType(CanQualType Param, CanQualType Arg); ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity, const VarDecl *NRVOCandidate, QualType ResultType, Expr *Value, bool AllowNRVO = true); bool CanPerformCopyInitialization(const InitializedEntity &Entity, ExprResult Init); ExprResult PerformCopyInitialization(const InitializedEntity &Entity, SourceLocation EqualLoc, ExprResult Init, bool TopLevelOfInitList = false, bool AllowExplicit = false); ExprResult PerformObjectArgumentInitialization(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, CXXMethodDecl *Method); /// Check that the lifetime of the initializer (and its subobjects) is /// sufficient for initializing the entity, and perform lifetime extension /// (when permitted) if not. void checkInitializerLifetime(const InitializedEntity &Entity, Expr *Init); ExprResult PerformContextuallyConvertToBool(Expr *From); ExprResult PerformContextuallyConvertToObjCPointer(Expr *From); /// Contexts in which a converted constant expression is required. enum CCEKind { CCEK_CaseValue, ///< Expression in a case label. CCEK_Enumerator, ///< Enumerator value with fixed underlying type. CCEK_TemplateArg, ///< Value of a non-type template parameter. CCEK_NewExpr, ///< Constant expression in a noptr-new-declarator. CCEK_ConstexprIf, ///< Condition in a constexpr if statement. CCEK_ExplicitBool ///< Condition in an explicit(bool) specifier. }; ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, llvm::APSInt &Value, CCEKind CCE); ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, APValue &Value, CCEKind CCE); /// Abstract base class used to perform a contextual implicit /// conversion from an expression to any type passing a filter. class ContextualImplicitConverter { public: bool Suppress; bool SuppressConversion; ContextualImplicitConverter(bool Suppress = false, bool SuppressConversion = false) : Suppress(Suppress), SuppressConversion(SuppressConversion) {} /// Determine whether the specified type is a valid destination type /// for this conversion. virtual bool match(QualType T) = 0; /// Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a diagnostic when the expression has incomplete class type. virtual SemaDiagnosticBuilder diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a diagnostic when the only matching conversion function /// is explicit. virtual SemaDiagnosticBuilder diagnoseExplicitConv( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; /// Emits a note for the explicit conversion function. virtual SemaDiagnosticBuilder noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// Emits a diagnostic when there are multiple possible conversion /// functions. virtual SemaDiagnosticBuilder diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a note for one of the candidate conversions. virtual SemaDiagnosticBuilder noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// Emits a diagnostic when we picked a conversion function /// (for cases when we are not allowed to pick a conversion function). virtual SemaDiagnosticBuilder diagnoseConversion( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; virtual ~ContextualImplicitConverter() {} }; class ICEConvertDiagnoser : public ContextualImplicitConverter { bool AllowScopedEnumerations; public: ICEConvertDiagnoser(bool AllowScopedEnumerations, bool Suppress, bool SuppressConversion) : ContextualImplicitConverter(Suppress, SuppressConversion), AllowScopedEnumerations(AllowScopedEnumerations) {} /// Match an integral or (possibly scoped) enumeration type. bool match(QualType T) override; SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override { return diagnoseNotInt(S, Loc, T); } /// Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0; }; /// Perform a contextual implicit conversion. ExprResult PerformContextualImplicitConversion( SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter); enum ObjCSubscriptKind { OS_Array, OS_Dictionary, OS_Error }; ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE); // Note that LK_String is intentionally after the other literals, as // this is used for diagnostics logic. enum ObjCLiteralKind { LK_Array, LK_Dictionary, LK_Numeric, LK_Boxed, LK_String, LK_Block, LK_None }; ObjCLiteralKind CheckLiteralKind(Expr *FromE); ExprResult PerformObjectMemberConversion(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, NamedDecl *Member); // Members have to be NamespaceDecl* or TranslationUnitDecl*. // TODO: make this is a typesafe union. typedef llvm::SmallSetVector<DeclContext *, 16> AssociatedNamespaceSet; typedef llvm::SmallSetVector<CXXRecordDecl *, 16> AssociatedClassSet; using ADLCallKind = CallExpr::ADLCallKind; void AddOverloadCandidate(FunctionDecl *Function, DeclAccessPair FoundDecl, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, bool AllowExplicit = true, bool AllowExplicitConversion = false, ADLCallKind IsADLCandidate = ADLCallKind::NotADL, ConversionSequenceList EarlyConversions = None); void AddFunctionCandidates(const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, bool SuppressUserConversions = false, bool PartialOverloading = false, bool FirstArgumentIsBase = false); void AddMethodCandidate(DeclAccessPair FoundDecl, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversion = false); void AddMethodCandidate(CXXMethodDecl *Method, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, ConversionSequenceList EarlyConversions = None); void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false); void AddTemplateOverloadCandidate( FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, bool AllowExplicit = true, ADLCallKind IsADLCandidate = ADLCallKind::NotADL); bool CheckNonDependentConversions(FunctionTemplateDecl *FunctionTemplate, ArrayRef<QualType> ParamTypes, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, ConversionSequenceList &Conversions, bool SuppressUserConversions, CXXRecordDecl *ActingContext = nullptr, QualType ObjectType = QualType(), Expr::Classification ObjectClassification = {}); void AddConversionCandidate( CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit, bool AllowExplicit, bool AllowResultConversion = true); void AddTemplateConversionCandidate( FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit, bool AllowExplicit, bool AllowResultConversion = true); void AddSurrogateCandidate(CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, const FunctionProtoType *Proto, Expr *Object, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddMemberOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, SourceRange OpRange = SourceRange()); void AddBuiltinCandidate(QualType *ParamTys, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool IsAssignmentOperator = false, unsigned NumContextualBoolArguments = 0); void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddArgumentDependentLookupCandidates(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, TemplateArgumentListInfo *ExplicitTemplateArgs, OverloadCandidateSet& CandidateSet, bool PartialOverloading = false); // Emit as a 'note' the specific overload candidate void NoteOverloadCandidate(NamedDecl *Found, FunctionDecl *Fn, QualType DestType = QualType(), bool TakingAddress = false); // Emit as a series of 'note's all template and non-templates identified by // the expression Expr void NoteAllOverloadCandidates(Expr *E, QualType DestType = QualType(), bool TakingAddress = false); /// Check the enable_if expressions on the given function. Returns the first /// failing attribute, or NULL if they were all successful. EnableIfAttr *CheckEnableIf(FunctionDecl *Function, ArrayRef<Expr *> Args, bool MissingImplicitThis = false); /// Find the failed Boolean condition within a given Boolean /// constant expression, and describe it with a string. std::pair<Expr *, std::string> findFailedBooleanCondition(Expr *Cond); /// Emit diagnostics for the diagnose_if attributes on Function, ignoring any /// non-ArgDependent DiagnoseIfAttrs. /// /// Argument-dependent diagnose_if attributes should be checked each time a /// function is used as a direct callee of a function call. /// /// Returns true if any errors were emitted. bool diagnoseArgDependentDiagnoseIfAttrs(const FunctionDecl *Function, const Expr *ThisArg, ArrayRef<const Expr *> Args, SourceLocation Loc); /// Emit diagnostics for the diagnose_if attributes on Function, ignoring any /// ArgDependent DiagnoseIfAttrs. /// /// Argument-independent diagnose_if attributes should be checked on every use /// of a function. /// /// Returns true if any errors were emitted. bool diagnoseArgIndependentDiagnoseIfAttrs(const NamedDecl *ND, SourceLocation Loc); /// Returns whether the given function's address can be taken or not, /// optionally emitting a diagnostic if the address can't be taken. /// /// Returns false if taking the address of the function is illegal. bool checkAddressOfFunctionIsAvailable(const FunctionDecl *Function, bool Complain = false, SourceLocation Loc = SourceLocation()); // [PossiblyAFunctionType] --> [Return] // NonFunctionType --> NonFunctionType // R (A) --> R(A) // R (*)(A) --> R (A) // R (&)(A) --> R (A) // R (S::*)(A) --> R (A) QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType); FunctionDecl * ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr, QualType TargetType, bool Complain, DeclAccessPair &Found, bool *pHadMultipleCandidates = nullptr); FunctionDecl * resolveAddressOfOnlyViableOverloadCandidate(Expr *E, DeclAccessPair &FoundResult); bool resolveAndFixAddressOfOnlyViableOverloadCandidate( ExprResult &SrcExpr, bool DoFunctionPointerConversion = false); FunctionDecl * ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl, bool Complain = false, DeclAccessPair *Found = nullptr); bool ResolveAndFixSingleFunctionTemplateSpecialization( ExprResult &SrcExpr, bool DoFunctionPointerConverion = false, bool Complain = false, SourceRange OpRangeForComplaining = SourceRange(), QualType DestTypeForComplaining = QualType(), unsigned DiagIDForComplaining = 0); Expr *FixOverloadedFunctionReference(Expr *E, DeclAccessPair FoundDecl, FunctionDecl *Fn); ExprResult FixOverloadedFunctionReference(ExprResult, DeclAccessPair FoundDecl, FunctionDecl *Fn); void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool PartialOverloading = false); // An enum used to represent the different possible results of building a // range-based for loop. enum ForRangeStatus { FRS_Success, FRS_NoViableFunction, FRS_DiagnosticIssued }; ForRangeStatus BuildForRangeBeginEndCall(SourceLocation Loc, SourceLocation RangeLoc, const DeclarationNameInfo &NameInfo, LookupResult &MemberLookup, OverloadCandidateSet *CandidateSet, Expr *Range, ExprResult *CallExpr); ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc, Expr *ExecConfig, bool AllowTypoCorrection=true, bool CalleesAddressIsTaken=false); bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, MultiExprArg Args, SourceLocation RParenLoc, OverloadCandidateSet *CandidateSet, ExprResult *Result); ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, const UnresolvedSetImpl &Fns, Expr *input, bool RequiresADL = true); ExprResult CreateOverloadedBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, const UnresolvedSetImpl &Fns, Expr *LHS, Expr *RHS, bool RequiresADL = true); ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc, SourceLocation RLoc, Expr *Base,Expr *Idx); ExprResult BuildCallToMemberFunction(Scope *S, Expr *MemExpr, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc); ExprResult BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc); ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base, SourceLocation OpLoc, bool *NoArrowOperatorFound = nullptr); /// CheckCallReturnType - Checks that a call expression's return type is /// complete. Returns true on failure. The location passed in is the location /// that best represents the call. bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc, CallExpr *CE, FunctionDecl *FD); /// Helpers for dealing with blocks and functions. bool CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters, bool CheckParameterNames); void CheckCXXDefaultArguments(FunctionDecl *FD); void CheckExtraCXXDefaultArguments(Declarator &D); Scope *getNonFieldDeclScope(Scope *S); /// \name Name lookup /// /// These routines provide name lookup that is used during semantic /// analysis to resolve the various kinds of names (identifiers, /// overloaded operator names, constructor names, etc.) into zero or /// more declarations within a particular scope. The major entry /// points are LookupName, which performs unqualified name lookup, /// and LookupQualifiedName, which performs qualified name lookup. /// /// All name lookup is performed based on some specific criteria, /// which specify what names will be visible to name lookup and how /// far name lookup should work. These criteria are important both /// for capturing language semantics (certain lookups will ignore /// certain names, for example) and for performance, since name /// lookup is often a bottleneck in the compilation of C++. Name /// lookup criteria is specified via the LookupCriteria enumeration. /// /// The results of name lookup can vary based on the kind of name /// lookup performed, the current language, and the translation /// unit. In C, for example, name lookup will either return nothing /// (no entity found) or a single declaration. In C++, name lookup /// can additionally refer to a set of overloaded functions or /// result in an ambiguity. All of the possible results of name /// lookup are captured by the LookupResult class, which provides /// the ability to distinguish among them. //@{ /// Describes the kind of name lookup to perform. enum LookupNameKind { /// Ordinary name lookup, which finds ordinary names (functions, /// variables, typedefs, etc.) in C and most kinds of names /// (functions, variables, members, types, etc.) in C++. LookupOrdinaryName = 0, /// Tag name lookup, which finds the names of enums, classes, /// structs, and unions. LookupTagName, /// Label name lookup. LookupLabel, /// Member name lookup, which finds the names of /// class/struct/union members. LookupMemberName, /// Look up of an operator name (e.g., operator+) for use with /// operator overloading. This lookup is similar to ordinary name /// lookup, but will ignore any declarations that are class members. LookupOperatorName, /// Look up of a name that precedes the '::' scope resolution /// operator in C++. This lookup completely ignores operator, object, /// function, and enumerator names (C++ [basic.lookup.qual]p1). LookupNestedNameSpecifierName, /// Look up a namespace name within a C++ using directive or /// namespace alias definition, ignoring non-namespace names (C++ /// [basic.lookup.udir]p1). LookupNamespaceName, /// Look up all declarations in a scope with the given name, /// including resolved using declarations. This is appropriate /// for checking redeclarations for a using declaration. LookupUsingDeclName, /// Look up an ordinary name that is going to be redeclared as a /// name with linkage. This lookup ignores any declarations that /// are outside of the current scope unless they have linkage. See /// C99 6.2.2p4-5 and C++ [basic.link]p6. LookupRedeclarationWithLinkage, /// Look up a friend of a local class. This lookup does not look /// outside the innermost non-class scope. See C++11 [class.friend]p11. LookupLocalFriendName, /// Look up the name of an Objective-C protocol. LookupObjCProtocolName, /// Look up implicit 'self' parameter of an objective-c method. LookupObjCImplicitSelfParam, /// Look up the name of an OpenMP user-defined reduction operation. LookupOMPReductionName, /// Look up the name of an OpenMP user-defined mapper. LookupOMPMapperName, /// Look up any declaration with any name. LookupAnyName }; /// Specifies whether (or how) name lookup is being performed for a /// redeclaration (vs. a reference). enum RedeclarationKind { /// The lookup is a reference to this name that is not for the /// purpose of redeclaring the name. NotForRedeclaration = 0, /// The lookup results will be used for redeclaration of a name, /// if an entity by that name already exists and is visible. ForVisibleRedeclaration, /// The lookup results will be used for redeclaration of a name /// with external linkage; non-visible lookup results with external linkage /// may also be found. ForExternalRedeclaration }; RedeclarationKind forRedeclarationInCurContext() { // A declaration with an owning module for linkage can never link against // anything that is not visible. We don't need to check linkage here; if // the context has internal linkage, redeclaration lookup won't find things // from other TUs, and we can't safely compute linkage yet in general. if (cast<Decl>(CurContext) ->getOwningModuleForLinkage(/*IgnoreLinkage*/true)) return ForVisibleRedeclaration; return ForExternalRedeclaration; } /// The possible outcomes of name lookup for a literal operator. enum LiteralOperatorLookupResult { /// The lookup resulted in an error. LOLR_Error, /// The lookup found no match but no diagnostic was issued. LOLR_ErrorNoDiagnostic, /// The lookup found a single 'cooked' literal operator, which /// expects a normal literal to be built and passed to it. LOLR_Cooked, /// The lookup found a single 'raw' literal operator, which expects /// a string literal containing the spelling of the literal token. LOLR_Raw, /// The lookup found an overload set of literal operator templates, /// which expect the characters of the spelling of the literal token to be /// passed as a non-type template argument pack. LOLR_Template, /// The lookup found an overload set of literal operator templates, /// which expect the character type and characters of the spelling of the /// string literal token to be passed as template arguments. LOLR_StringTemplate }; SpecialMemberOverloadResult LookupSpecialMember(CXXRecordDecl *D, CXXSpecialMember SM, bool ConstArg, bool VolatileArg, bool RValueThis, bool ConstThis, bool VolatileThis); typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator; typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)> TypoRecoveryCallback; private: bool CppLookupName(LookupResult &R, Scope *S); struct TypoExprState { std::unique_ptr<TypoCorrectionConsumer> Consumer; TypoDiagnosticGenerator DiagHandler; TypoRecoveryCallback RecoveryHandler; TypoExprState(); TypoExprState(TypoExprState &&other) noexcept; TypoExprState &operator=(TypoExprState &&other) noexcept; }; /// The set of unhandled TypoExprs and their associated state. llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos; /// Creates a new TypoExpr AST node. TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC); // The set of known/encountered (unique, canonicalized) NamespaceDecls. // // The boolean value will be true to indicate that the namespace was loaded // from an AST/PCH file, or false otherwise. llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces; /// Whether we have already loaded known namespaces from an extenal /// source. bool LoadedExternalKnownNamespaces; /// Helper for CorrectTypo and CorrectTypoDelayed used to create and /// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction /// should be skipped entirely. std::unique_ptr<TypoCorrectionConsumer> makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, DeclContext *MemberContext, bool EnteringContext, const ObjCObjectPointerType *OPT, bool ErrorRecovery); public: const TypoExprState &getTypoExprState(TypoExpr *TE) const; /// Clears the state of the given TypoExpr. void clearDelayedTypo(TypoExpr *TE); /// Look up a name, looking for a single declaration. Return /// null if the results were absent, ambiguous, or overloaded. /// /// It is preferable to use the elaborated form and explicitly handle /// ambiguity and overloaded. NamedDecl *LookupSingleName(Scope *S, DeclarationName Name, SourceLocation Loc, LookupNameKind NameKind, RedeclarationKind Redecl = NotForRedeclaration); bool LookupName(LookupResult &R, Scope *S, bool AllowBuiltinCreation = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, bool InUnqualifiedLookup = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, CXXScopeSpec &SS); bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS, bool AllowBuiltinCreation = false, bool EnteringContext = false); ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc, RedeclarationKind Redecl = NotForRedeclaration); bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class); void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S, QualType T1, QualType T2, UnresolvedSetImpl &Functions); LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc, SourceLocation GnuLabelLoc = SourceLocation()); DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class); CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class); CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class); bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id); LiteralOperatorLookupResult LookupLiteralOperator(Scope *S, LookupResult &R, ArrayRef<QualType> ArgTys, bool AllowRaw, bool AllowTemplate, bool AllowStringTemplate, bool DiagnoseMissing); bool isKnownName(StringRef name); void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, ADLResult &Functions); void LookupVisibleDecls(Scope *S, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true, bool LoadExternal = true); void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true, bool IncludeDependentBases = false, bool LoadExternal = true); enum CorrectTypoKind { CTK_NonError, // CorrectTypo used in a non error recovery situation. CTK_ErrorRecovery // CorrectTypo used in normal error recovery. }; TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr, bool RecordFailure = true); TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr); /// Process any TypoExprs in the given Expr and its children, /// generating diagnostics as appropriate and returning a new Expr if there /// were typos that were all successfully corrected and ExprError if one or /// more typos could not be corrected. /// /// \param E The Expr to check for TypoExprs. /// /// \param InitDecl A VarDecl to avoid because the Expr being corrected is its /// initializer. /// /// \param Filter A function applied to a newly rebuilt Expr to determine if /// it is an acceptable/usable result from a single combination of typo /// corrections. As long as the filter returns ExprError, different /// combinations of corrections will be tried until all are exhausted. ExprResult CorrectDelayedTyposInExpr(Expr *E, VarDecl *InitDecl = nullptr, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }); ExprResult CorrectDelayedTyposInExpr(Expr *E, llvm::function_ref<ExprResult(Expr *)> Filter) { return CorrectDelayedTyposInExpr(E, nullptr, Filter); } ExprResult CorrectDelayedTyposInExpr(ExprResult ER, VarDecl *InitDecl = nullptr, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }) { return ER.isInvalid() ? ER : CorrectDelayedTyposInExpr(ER.get(), Filter); } ExprResult CorrectDelayedTyposInExpr(ExprResult ER, llvm::function_ref<ExprResult(Expr *)> Filter) { return CorrectDelayedTyposInExpr(ER, nullptr, Filter); } void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, bool ErrorRecovery = true); void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, const PartialDiagnostic &PrevNote, bool ErrorRecovery = true); void MarkTypoCorrectedFunctionDefinition(const NamedDecl *F); void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc, ArrayRef<Expr *> Args, AssociatedNamespaceSet &AssociatedNamespaces, AssociatedClassSet &AssociatedClasses); void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S, bool ConsiderLinkage, bool AllowInlineNamespace); bool CheckRedeclarationModuleOwnership(NamedDecl *New, NamedDecl *Old); void DiagnoseAmbiguousLookup(LookupResult &Result); //@} ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id, SourceLocation IdLoc, bool TypoCorrection = false); NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID, Scope *S, bool ForRedeclaration, SourceLocation Loc); NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II, Scope *S); void AddKnownFunctionAttributes(FunctionDecl *FD); // More parsing and symbol table subroutines. void ProcessPragmaWeak(Scope *S, Decl *D); // Decl attributes - this routine is the top level dispatcher. void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD); // Helper for delayed processing of attributes. void ProcessDeclAttributeDelayed(Decl *D, const ParsedAttributesView &AttrList); void ProcessDeclAttributeList(Scope *S, Decl *D, const ParsedAttributesView &AL, bool IncludeCXX11Attributes = true); bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl, const ParsedAttributesView &AttrList); void checkUnusedDeclAttributes(Declarator &D); /// Determine if type T is a valid subject for a nonnull and similar /// attributes. By default, we look through references (the behavior used by /// nonnull), but if the second parameter is true, then we treat a reference /// type as valid. bool isValidPointerAttrType(QualType T, bool RefOkay = false); bool CheckRegparmAttr(const ParsedAttr &attr, unsigned &value); bool CheckCallingConvAttr(const ParsedAttr &attr, CallingConv &CC, const FunctionDecl *FD = nullptr); bool CheckAttrTarget(const ParsedAttr &CurrAttr); bool CheckAttrNoArgs(const ParsedAttr &CurrAttr); bool checkStringLiteralArgumentAttr(const ParsedAttr &Attr, unsigned ArgNum, StringRef &Str, SourceLocation *ArgLocation = nullptr); bool checkSectionName(SourceLocation LiteralLoc, StringRef Str); bool checkTargetAttr(SourceLocation LiteralLoc, StringRef Str); bool checkMSInheritanceAttrOnDefinition( CXXRecordDecl *RD, SourceRange Range, bool BestCase, MSInheritanceAttr::Spelling SemanticSpelling); void CheckAlignasUnderalignment(Decl *D); /// Adjust the calling convention of a method to be the ABI default if it /// wasn't specified explicitly. This handles method types formed from /// function type typedefs and typename template arguments. void adjustMemberFunctionCC(QualType &T, bool IsStatic, bool IsCtorOrDtor, SourceLocation Loc); // Check if there is an explicit attribute, but only look through parens. // The intent is to look for an attribute on the current declarator, but not // one that came from a typedef. bool hasExplicitCallingConv(QualType T); /// Get the outermost AttributedType node that sets a calling convention. /// Valid types should not have multiple attributes with different CCs. const AttributedType *getCallingConvAttributedType(QualType T) const; /// Stmt attributes - this routine is the top level dispatcher. StmtResult ProcessStmtAttributes(Stmt *Stmt, const ParsedAttributesView &Attrs, SourceRange Range); void WarnConflictingTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); void CheckConflictingOverridingMethod(ObjCMethodDecl *Method, ObjCMethodDecl *Overridden, bool IsProtocolMethodDecl); /// WarnExactTypedMethods - This routine issues a warning if method /// implementation declaration matches exactly that of its declaration. void WarnExactTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); typedef llvm::SmallPtrSet<Selector, 8> SelectorSet; /// CheckImplementationIvars - This routine checks if the instance variables /// listed in the implelementation match those listed in the interface. void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl, ObjCIvarDecl **Fields, unsigned nIvars, SourceLocation Loc); /// ImplMethodsVsClassMethods - This is main routine to warn if any method /// remains unimplemented in the class or category \@implementation. void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool IncompleteImpl = false); /// DiagnoseUnimplementedProperties - This routine warns on those properties /// which must be implemented by this implementation. void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl *CDecl, bool SynthesizeProperties); /// Diagnose any null-resettable synthesized setters. void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl); /// DefaultSynthesizeProperties - This routine default synthesizes all /// properties which must be synthesized in the class's \@implementation. void DefaultSynthesizeProperties(Scope *S, ObjCImplDecl *IMPDecl, ObjCInterfaceDecl *IDecl, SourceLocation AtEnd); void DefaultSynthesizeProperties(Scope *S, Decl *D, SourceLocation AtEnd); /// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is /// an ivar synthesized for 'Method' and 'Method' is a property accessor /// declared in class 'IFace'. bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace, ObjCMethodDecl *Method, ObjCIvarDecl *IV); /// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which /// backs the property is not used in the property's accessor. void DiagnoseUnusedBackingIvarInAccessor(Scope *S, const ObjCImplementationDecl *ImplD); /// GetIvarBackingPropertyAccessor - If method is a property setter/getter and /// it property has a backing ivar, returns this ivar; otherwise, returns NULL. /// It also returns ivar's property on success. ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method, const ObjCPropertyDecl *&PDecl) const; /// Called by ActOnProperty to handle \@property declarations in /// class extensions. ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, SourceLocation GetterNameLoc, Selector SetterSel, SourceLocation SetterNameLoc, const bool isReadWrite, unsigned &Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind); /// Called by ActOnProperty and HandlePropertyInClassExtension to /// handle creating the ObjcPropertyDecl for a category or \@interface. ObjCPropertyDecl *CreatePropertyDecl(Scope *S, ObjCContainerDecl *CDecl, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, SourceLocation GetterNameLoc, Selector SetterSel, SourceLocation SetterNameLoc, const bool isReadWrite, const unsigned Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); /// AtomicPropertySetterGetterRules - This routine enforces the rule (via /// warning) when atomic property has one but not the other user-declared /// setter or getter. void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl, ObjCInterfaceDecl* IDecl); void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D); void DiagnoseMissingDesignatedInitOverrides( const ObjCImplementationDecl *ImplD, const ObjCInterfaceDecl *IFD); void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID); enum MethodMatchStrategy { MMS_loose, MMS_strict }; /// MatchTwoMethodDeclarations - Checks if two methods' type match and returns /// true, or false, accordingly. bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method, const ObjCMethodDecl *PrevMethod, MethodMatchStrategy strategy = MMS_strict); /// MatchAllMethodDeclarations - Check methods declaraed in interface or /// or protocol against those declared in their implementations. void MatchAllMethodDeclarations(const SelectorSet &InsMap, const SelectorSet &ClsMap, SelectorSet &InsMapSeen, SelectorSet &ClsMapSeen, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool &IncompleteImpl, bool ImmediateClass, bool WarnCategoryMethodImpl=false); /// CheckCategoryVsClassMethodMatches - Checks that methods implemented in /// category matches with those implemented in its primary class and /// warns each time an exact match is found. void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP); /// Add the given method to the list of globally-known methods. void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method); private: /// AddMethodToGlobalPool - Add an instance or factory method to the global /// pool. See descriptoin of AddInstanceMethodToGlobalPool. void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance); /// LookupMethodInGlobalPool - Returns the instance or factory method and /// optionally warns if there are multiple signatures. ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass, bool instance); public: /// - Returns instance or factory methods in global method pool for /// given selector. It checks the desired kind first, if none is found, and /// parameter checkTheOther is set, it then checks the other kind. If no such /// method or only one method is found, function returns false; otherwise, it /// returns true. bool CollectMultipleMethodsInGlobalPool(Selector Sel, SmallVectorImpl<ObjCMethodDecl*>& Methods, bool InstanceFirst, bool CheckTheOther, const ObjCObjectType *TypeBound = nullptr); bool AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod, SourceRange R, bool receiverIdOrClass, SmallVectorImpl<ObjCMethodDecl*>& Methods); void DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods, Selector Sel, SourceRange R, bool receiverIdOrClass); private: /// - Returns a selector which best matches given argument list or /// nullptr if none could be found ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args, bool IsInstance, SmallVectorImpl<ObjCMethodDecl*>& Methods); /// Record the typo correction failure and return an empty correction. TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc, bool RecordFailure = true) { if (RecordFailure) TypoCorrectionFailures[Typo].insert(TypoLoc); return TypoCorrection(); } public: /// AddInstanceMethodToGlobalPool - All instance methods in a translation /// unit are added to a global pool. This allows us to efficiently associate /// a selector with a method declaraation for purposes of typechecking /// messages sent to "id" (where the class of the object is unknown). void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/true); } /// AddFactoryMethodToGlobalPool - Same as above, but for factory methods. void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/false); } /// AddAnyMethodToGlobalPool - Add any method, instance or factory to global /// pool. void AddAnyMethodToGlobalPool(Decl *D); /// LookupInstanceMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/true); } /// LookupFactoryMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/false); } const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel, QualType ObjectType=QualType()); /// LookupImplementedMethodInGlobalPool - Returns the method which has an /// implementation. ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel); /// CollectIvarsToConstructOrDestruct - Collect those ivars which require /// initialization. void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI, SmallVectorImpl<ObjCIvarDecl*> &Ivars); //===--------------------------------------------------------------------===// // Statement Parsing Callbacks: SemaStmt.cpp. public: class FullExprArg { public: FullExprArg() : E(nullptr) { } FullExprArg(Sema &actions) : E(nullptr) { } ExprResult release() { return E; } Expr *get() const { return E; } Expr *operator->() { return E; } private: // FIXME: No need to make the entire Sema class a friend when it's just // Sema::MakeFullExpr that needs access to the constructor below. friend class Sema; explicit FullExprArg(Expr *expr) : E(expr) {} Expr *E; }; FullExprArg MakeFullExpr(Expr *Arg) { return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation()); } FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) { return FullExprArg( ActOnFinishFullExpr(Arg, CC, /*DiscardedValue*/ false).get()); } FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) { ExprResult FE = ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(), /*DiscardedValue*/ true); return FullExprArg(FE.get()); } StmtResult ActOnExprStmt(ExprResult Arg, bool DiscardedValue = true); StmtResult ActOnExprStmtError(); StmtResult ActOnNullStmt(SourceLocation SemiLoc, bool HasLeadingEmptyMacro = false); void ActOnStartOfCompoundStmt(bool IsStmtExpr); void ActOnFinishOfCompoundStmt(); StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R, ArrayRef<Stmt *> Elts, bool isStmtExpr); /// A RAII object to enter scope of a compound statement. class CompoundScopeRAII { public: CompoundScopeRAII(Sema &S, bool IsStmtExpr = false) : S(S) { S.ActOnStartOfCompoundStmt(IsStmtExpr); } ~CompoundScopeRAII() { S.ActOnFinishOfCompoundStmt(); } private: Sema &S; }; /// An RAII helper that pops function a function scope on exit. struct FunctionScopeRAII { Sema &S; bool Active; FunctionScopeRAII(Sema &S) : S(S), Active(true) {} ~FunctionScopeRAII() { if (Active) S.PopFunctionScopeInfo(); } void disable() { Active = false; } }; StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl, SourceLocation StartLoc, SourceLocation EndLoc); void ActOnForEachDeclStmt(DeclGroupPtrTy Decl); StmtResult ActOnForEachLValueExpr(Expr *E); ExprResult ActOnCaseExpr(SourceLocation CaseLoc, ExprResult Val); StmtResult ActOnCaseStmt(SourceLocation CaseLoc, ExprResult LHS, SourceLocation DotDotDotLoc, ExprResult RHS, SourceLocation ColonLoc); void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt); StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc, SourceLocation ColonLoc, Stmt *SubStmt, Scope *CurScope); StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl, SourceLocation ColonLoc, Stmt *SubStmt); StmtResult ActOnAttributedStmt(SourceLocation AttrLoc, ArrayRef<const Attr*> Attrs, Stmt *SubStmt); class ConditionResult; StmtResult ActOnIfStmt(SourceLocation IfLoc, bool IsConstexpr, Stmt *InitStmt, ConditionResult Cond, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult BuildIfStmt(SourceLocation IfLoc, bool IsConstexpr, Stmt *InitStmt, ConditionResult Cond, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc, Stmt *InitStmt, ConditionResult Cond); StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch, Stmt *Body); StmtResult ActOnWhileStmt(SourceLocation WhileLoc, ConditionResult Cond, Stmt *Body); StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body, SourceLocation WhileLoc, SourceLocation CondLParen, Expr *Cond, SourceLocation CondRParen); StmtResult ActOnForStmt(SourceLocation ForLoc, SourceLocation LParenLoc, Stmt *First, ConditionResult Second, FullExprArg Third, SourceLocation RParenLoc, Stmt *Body); ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc, Expr *collection); StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc, Stmt *First, Expr *collection, SourceLocation RParenLoc); StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body); enum BuildForRangeKind { /// Initial building of a for-range statement. BFRK_Build, /// Instantiation or recovery rebuild of a for-range statement. Don't /// attempt any typo-correction. BFRK_Rebuild, /// Determining whether a for-range statement could be built. Avoid any /// unnecessary or irreversible actions. BFRK_Check }; StmtResult ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc, SourceLocation CoawaitLoc, Stmt *InitStmt, Stmt *LoopVar, SourceLocation ColonLoc, Expr *Collection, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc, SourceLocation CoawaitLoc, Stmt *InitStmt, SourceLocation ColonLoc, Stmt *RangeDecl, Stmt *Begin, Stmt *End, Expr *Cond, Expr *Inc, Stmt *LoopVarDecl, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body); StmtResult ActOnGotoStmt(SourceLocation GotoLoc, SourceLocation LabelLoc, LabelDecl *TheDecl); StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc, SourceLocation StarLoc, Expr *DestExp); StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope); StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope); void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, unsigned NumParams); typedef std::pair<StringRef, QualType> CapturedParamNameType; void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, ArrayRef<CapturedParamNameType> Params); StmtResult ActOnCapturedRegionEnd(Stmt *S); void ActOnCapturedRegionError(); RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD, SourceLocation Loc, unsigned NumParams); enum CopyElisionSemanticsKind { CES_Strict = 0, CES_AllowParameters = 1, CES_AllowDifferentTypes = 2, CES_AllowExceptionVariables = 4, CES_FormerDefault = (CES_AllowParameters), CES_Default = (CES_AllowParameters | CES_AllowDifferentTypes), CES_AsIfByStdMove = (CES_AllowParameters | CES_AllowDifferentTypes | CES_AllowExceptionVariables), }; VarDecl *getCopyElisionCandidate(QualType ReturnType, Expr *E, CopyElisionSemanticsKind CESK); bool isCopyElisionCandidate(QualType ReturnType, const VarDecl *VD, CopyElisionSemanticsKind CESK); StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp, Scope *CurScope); StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple, bool IsVolatile, unsigned NumOutputs, unsigned NumInputs, IdentifierInfo **Names, MultiExprArg Constraints, MultiExprArg Exprs, Expr *AsmString, MultiExprArg Clobbers, SourceLocation RParenLoc); void FillInlineAsmIdentifierInfo(Expr *Res, llvm::InlineAsmIdentifierInfo &Info); ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool IsUnevaluatedContext); bool LookupInlineAsmField(StringRef Base, StringRef Member, unsigned &Offset, SourceLocation AsmLoc); ExprResult LookupInlineAsmVarDeclField(Expr *RefExpr, StringRef Member, SourceLocation AsmLoc); StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc, ArrayRef<Token> AsmToks, StringRef AsmString, unsigned NumOutputs, unsigned NumInputs, ArrayRef<StringRef> Constraints, ArrayRef<StringRef> Clobbers, ArrayRef<Expr*> Exprs, SourceLocation EndLoc); LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName, SourceLocation Location, bool AlwaysCreate); VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id, bool Invalid = false); Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D); StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen, Decl *Parm, Stmt *Body); StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body); StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try, MultiStmtArg Catch, Stmt *Finally); StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw); StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw, Scope *CurScope); ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc, Expr *operand); StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc, Expr *SynchExpr, Stmt *SynchBody); StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body); VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id); Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D); StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc, Decl *ExDecl, Stmt *HandlerBlock); StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock, ArrayRef<Stmt *> Handlers); StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ? SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); StmtResult ActOnSEHExceptBlock(SourceLocation Loc, Expr *FilterExpr, Stmt *Block); void ActOnStartSEHFinallyBlock(); void ActOnAbortSEHFinallyBlock(); StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block); StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope); void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock); bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const; /// If it's a file scoped decl that must warn if not used, keep track /// of it. void MarkUnusedFileScopedDecl(const DeclaratorDecl *D); /// DiagnoseUnusedExprResult - If the statement passed in is an expression /// whose result is unused, warn. void DiagnoseUnusedExprResult(const Stmt *S); void DiagnoseUnusedNestedTypedefs(const RecordDecl *D); void DiagnoseUnusedDecl(const NamedDecl *ND); /// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null /// statement as a \p Body, and it is located on the same line. /// /// This helps prevent bugs due to typos, such as: /// if (condition); /// do_stuff(); void DiagnoseEmptyStmtBody(SourceLocation StmtLoc, const Stmt *Body, unsigned DiagID); /// Warn if a for/while loop statement \p S, which is followed by /// \p PossibleBody, has a suspicious null statement as a body. void DiagnoseEmptyLoopBody(const Stmt *S, const Stmt *PossibleBody); /// Warn if a value is moved to itself. void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr, SourceLocation OpLoc); /// Warn if we're implicitly casting from a _Nullable pointer type to a /// _Nonnull one. void diagnoseNullableToNonnullConversion(QualType DstType, QualType SrcType, SourceLocation Loc); /// Warn when implicitly casting 0 to nullptr. void diagnoseZeroToNullptrConversion(CastKind Kind, const Expr *E); ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) { return DelayedDiagnostics.push(pool); } void PopParsingDeclaration(ParsingDeclState state, Decl *decl); typedef ProcessingContextState ParsingClassState; ParsingClassState PushParsingClass() { return DelayedDiagnostics.pushUndelayed(); } void PopParsingClass(ParsingClassState state) { DelayedDiagnostics.popUndelayed(state); } void redelayDiagnostics(sema::DelayedDiagnosticPool &pool); void DiagnoseAvailabilityOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs, const ObjCInterfaceDecl *UnknownObjCClass, bool ObjCPropertyAccess, bool AvoidPartialAvailabilityChecks = false, ObjCInterfaceDecl *ClassReceiver = nullptr); bool makeUnavailableInSystemHeader(SourceLocation loc, UnavailableAttr::ImplicitReason reason); /// Issue any -Wunguarded-availability warnings in \c FD void DiagnoseUnguardedAvailabilityViolations(Decl *FD); //===--------------------------------------------------------------------===// // Expression Parsing Callbacks: SemaExpr.cpp. bool CanUseDecl(NamedDecl *D, bool TreatUnavailableAsInvalid); bool DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs, const ObjCInterfaceDecl *UnknownObjCClass = nullptr, bool ObjCPropertyAccess = false, bool AvoidPartialAvailabilityChecks = false, ObjCInterfaceDecl *ClassReciever = nullptr); void NoteDeletedFunction(FunctionDecl *FD); void NoteDeletedInheritingConstructor(CXXConstructorDecl *CD); bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD, ObjCMethodDecl *Getter, SourceLocation Loc); void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc, ArrayRef<Expr *> Args); void PushExpressionEvaluationContext( ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, ExpressionEvaluationContextRecord::ExpressionKind Type = ExpressionEvaluationContextRecord::EK_Other); enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl }; void PushExpressionEvaluationContext( ExpressionEvaluationContext NewContext, ReuseLambdaContextDecl_t, ExpressionEvaluationContextRecord::ExpressionKind Type = ExpressionEvaluationContextRecord::EK_Other); void PopExpressionEvaluationContext(); void DiscardCleanupsInEvaluationContext(); ExprResult TransformToPotentiallyEvaluated(Expr *E); ExprResult HandleExprEvaluationContextForTypeof(Expr *E); ExprResult ActOnConstantExpression(ExprResult Res); // Functions for marking a declaration referenced. These functions also // contain the relevant logic for marking if a reference to a function or // variable is an odr-use (in the C++11 sense). There are separate variants // for expressions referring to a decl; these exist because odr-use marking // needs to be delayed for some constant variables when we build one of the // named expressions. // // MightBeOdrUse indicates whether the use could possibly be an odr-use, and // should usually be true. This only needs to be set to false if the lack of // odr-use cannot be determined from the current context (for instance, // because the name denotes a virtual function and was written without an // explicit nested-name-specifier). void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool MightBeOdrUse); void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func, bool MightBeOdrUse = true); void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var); void MarkDeclRefReferenced(DeclRefExpr *E, const Expr *Base = nullptr); void MarkMemberReferenced(MemberExpr *E); void UpdateMarkingForLValueToRValue(Expr *E); void CleanupVarDeclMarking(); enum TryCaptureKind { TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef }; /// Try to capture the given variable. /// /// \param Var The variable to capture. /// /// \param Loc The location at which the capture occurs. /// /// \param Kind The kind of capture, which may be implicit (for either a /// block or a lambda), or explicit by-value or by-reference (for a lambda). /// /// \param EllipsisLoc The location of the ellipsis, if one is provided in /// an explicit lambda capture. /// /// \param BuildAndDiagnose Whether we are actually supposed to add the /// captures or diagnose errors. If false, this routine merely check whether /// the capture can occur without performing the capture itself or complaining /// if the variable cannot be captured. /// /// \param CaptureType Will be set to the type of the field used to capture /// this variable in the innermost block or lambda. Only valid when the /// variable can be captured. /// /// \param DeclRefType Will be set to the type of a reference to the capture /// from within the current scope. Only valid when the variable can be /// captured. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// variables that may or may not be used in certain specializations of /// a nested generic lambda. /// /// \returns true if an error occurred (i.e., the variable cannot be /// captured) and false if the capture succeeded. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind, SourceLocation EllipsisLoc, bool BuildAndDiagnose, QualType &CaptureType, QualType &DeclRefType, const unsigned *const FunctionScopeIndexToStopAt); /// Try to capture the given variable. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind = TryCapture_Implicit, SourceLocation EllipsisLoc = SourceLocation()); /// Checks if the variable must be captured. bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc); /// Given a variable, determine the type that a reference to that /// variable will have in the given scope. QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc); /// Mark all of the declarations referenced within a particular AST node as /// referenced. Used when template instantiation instantiates a non-dependent /// type -- entities referenced by the type are now referenced. void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T); void MarkDeclarationsReferencedInExpr(Expr *E, bool SkipLocalVariables = false); /// Try to recover by turning the given expression into a /// call. Returns true if recovery was attempted or an error was /// emitted; this may also leave the ExprResult invalid. bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD, bool ForceComplain = false, bool (*IsPlausibleResult)(QualType) = nullptr); /// Figure out if an expression could be turned into a call. bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy, UnresolvedSetImpl &NonTemplateOverloads); /// Conditionally issue a diagnostic based on the current /// evaluation context. /// /// \param Statement If Statement is non-null, delay reporting the /// diagnostic until the function body is parsed, and then do a basic /// reachability analysis to determine if the statement is reachable. /// If it is unreachable, the diagnostic will not be emitted. bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement, const PartialDiagnostic &PD); /// Similar, but diagnostic is only produced if all the specified statements /// are reachable. bool DiagRuntimeBehavior(SourceLocation Loc, ArrayRef<const Stmt*> Stmts, const PartialDiagnostic &PD); // Primary Expressions. SourceRange getExprRange(Expr *E) const; ExprResult ActOnIdExpression( Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand, CorrectionCandidateCallback *CCC = nullptr, bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr); void DecomposeUnqualifiedId(const UnqualifiedId &Id, TemplateArgumentListInfo &Buffer, DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *&TemplateArgs); bool DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R, CorrectionCandidateCallback &CCC, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr); ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S, IdentifierInfo *II, bool AllowBuiltinCreation=false); ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, bool isAddressOfOperand, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, SourceLocation Loc, const CXXScopeSpec *SS = nullptr); ExprResult BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, const DeclarationNameInfo &NameInfo, const CXXScopeSpec *SS = nullptr, NamedDecl *FoundD = nullptr, const TemplateArgumentListInfo *TemplateArgs = nullptr); ExprResult BuildAnonymousStructUnionMemberReference( const CXXScopeSpec &SS, SourceLocation nameLoc, IndirectFieldDecl *indirectField, DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none), Expr *baseObjectExpr = nullptr, SourceLocation opLoc = SourceLocation()); ExprResult BuildPossibleImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, const Scope *S); ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, bool IsDefiniteInstance, const Scope *S); bool UseArgumentDependentLookup(const CXXScopeSpec &SS, const LookupResult &R, bool HasTrailingLParen); ExprResult BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, bool IsAddressOfOperand, const Scope *S, TypeSourceInfo **RecoveryTSI = nullptr); ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS, LookupResult &R, bool NeedsADL, bool AcceptInvalidDecl = false); ExprResult BuildDeclarationNameExpr( const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D, NamedDecl *FoundD = nullptr, const TemplateArgumentListInfo *TemplateArgs = nullptr, bool AcceptInvalidDecl = false); ExprResult BuildLiteralOperatorCall(LookupResult &R, DeclarationNameInfo &SuffixInfo, ArrayRef<Expr *> Args, SourceLocation LitEndLoc, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr); ExprResult BuildPredefinedExpr(SourceLocation Loc, PredefinedExpr::IdentKind IK); ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind); ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val); bool CheckLoopHintExpr(Expr *E, SourceLocation Loc); ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnCharacterConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E); ExprResult ActOnParenListExpr(SourceLocation L, SourceLocation R, MultiExprArg Val); /// ActOnStringLiteral - The specified tokens were lexed as pasted string /// fragments (e.g. "foo" "bar" L"baz"). ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks, Scope *UDLScope = nullptr); ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<ParsedType> ArgTypes, ArrayRef<Expr *> ArgExprs); ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<TypeSourceInfo *> Types, ArrayRef<Expr *> Exprs); // Binary/Unary Operators. 'Tok' is the token for the operator. ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *InputExpr); ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *Input); ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Op, Expr *Input); bool isQualifiedMemberAccess(Expr *E); QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc); ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, SourceRange R); ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, bool IsType, void *TyOrEx, SourceRange ArgRange); ExprResult CheckPlaceholderExpr(Expr *E); bool CheckVecStepExpr(Expr *E); bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind); bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc, SourceRange ExprRange, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnSizeofParameterPackExpr(Scope *S, SourceLocation OpLoc, IdentifierInfo &Name, SourceLocation NameLoc, SourceLocation RParenLoc); ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Kind, Expr *Input); ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc, Expr *LowerBound, SourceLocation ColonLoc, Expr *Length, SourceLocation RBLoc); // This struct is for use by ActOnMemberAccess to allow // BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after // changing the access operator from a '.' to a '->' (to see if that is the // change needed to fix an error about an unknown member, e.g. when the class // defines a custom operator->). struct ActOnMemberAccessExtraArgs { Scope *S; UnqualifiedId &Id; Decl *ObjCImpDecl; }; ExprResult BuildMemberReferenceExpr( Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, bool SuppressQualifierCheck = false, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult BuildFieldReferenceExpr(Expr *BaseExpr, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, FieldDecl *Field, DeclAccessPair FoundDecl, const DeclarationNameInfo &MemberNameInfo); ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow); bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType, const CXXScopeSpec &SS, const LookupResult &R); ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Member, Decl *ObjCImpDecl); void ActOnDefaultCtorInitializers(Decl *CDtorDecl); bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn, FunctionDecl *FDecl, const FunctionProtoType *Proto, ArrayRef<Expr *> Args, SourceLocation RParenLoc, bool ExecConfig = false); void CheckStaticArrayArgument(SourceLocation CallLoc, ParmVarDecl *Param, const Expr *ArgExpr); /// ActOnCallExpr - Handle a call to Fn with the specified array of arguments. /// This provides the location of the left/right parens and a list of comma /// locations. ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig = nullptr); ExprResult BuildCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig = nullptr, bool IsExecConfig = false); ExprResult BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl, SourceLocation LParenLoc, ArrayRef<Expr *> Arg, SourceLocation RParenLoc, Expr *Config = nullptr, bool IsExecConfig = false, ADLCallKind UsesADL = ADLCallKind::NotADL); ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc, MultiExprArg ExecConfig, SourceLocation GGGLoc); ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc, Declarator &D, ParsedType &Ty, SourceLocation RParenLoc, Expr *CastExpr); ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc, TypeSourceInfo *Ty, SourceLocation RParenLoc, Expr *Op); CastKind PrepareScalarCast(ExprResult &src, QualType destType); /// Build an altivec or OpenCL literal. ExprResult BuildVectorLiteral(SourceLocation LParenLoc, SourceLocation RParenLoc, Expr *E, TypeSourceInfo *TInfo); ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME); ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc, Expr *InitExpr); ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc, TypeSourceInfo *TInfo, SourceLocation RParenLoc, Expr *LiteralExpr); ExprResult ActOnInitList(SourceLocation LBraceLoc, MultiExprArg InitArgList, SourceLocation RBraceLoc); ExprResult ActOnDesignatedInitializer(Designation &Desig, SourceLocation Loc, bool GNUSyntax, ExprResult Init); private: static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind); public: ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc, tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr); ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); void DiagnoseCommaOperator(const Expr *LHS, SourceLocation Loc); /// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null /// in the case of a the GNU conditional expr extension. ExprResult ActOnConditionalOp(SourceLocation QuestionLoc, SourceLocation ColonLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr); /// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo". ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc, LabelDecl *TheDecl); void ActOnStartStmtExpr(); ExprResult ActOnStmtExpr(SourceLocation LPLoc, Stmt *SubStmt, SourceLocation RPLoc); // "({..})" // Handle the final expression in a statement expression. ExprResult ActOnStmtExprResult(ExprResult E); void ActOnStmtExprError(); // __builtin_offsetof(type, identifier(.identifier|[expr])*) struct OffsetOfComponent { SourceLocation LocStart, LocEnd; bool isBrackets; // true if [expr], false if .ident union { IdentifierInfo *IdentInfo; Expr *E; } U; }; /// __builtin_offsetof(type, a.b[123][456].c) ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc, TypeSourceInfo *TInfo, ArrayRef<OffsetOfComponent> Components, SourceLocation RParenLoc); ExprResult ActOnBuiltinOffsetOf(Scope *S, SourceLocation BuiltinLoc, SourceLocation TypeLoc, ParsedType ParsedArgTy, ArrayRef<OffsetOfComponent> Components, SourceLocation RParenLoc); // __builtin_choose_expr(constExpr, expr1, expr2) ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr, SourceLocation RPLoc); // __builtin_va_arg(expr, type) ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty, SourceLocation RPLoc); ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E, TypeSourceInfo *TInfo, SourceLocation RPLoc); // __builtin_LINE(), __builtin_FUNCTION(), __builtin_FILE(), // __builtin_COLUMN() ExprResult ActOnSourceLocExpr(SourceLocExpr::IdentKind Kind, SourceLocation BuiltinLoc, SourceLocation RPLoc); // Build a potentially resolved SourceLocExpr. ExprResult BuildSourceLocExpr(SourceLocExpr::IdentKind Kind, SourceLocation BuiltinLoc, SourceLocation RPLoc, DeclContext *ParentContext); // __null ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc); bool CheckCaseExpression(Expr *E); /// Describes the result of an "if-exists" condition check. enum IfExistsResult { /// The symbol exists. IER_Exists, /// The symbol does not exist. IER_DoesNotExist, /// The name is a dependent name, so the results will differ /// from one instantiation to the next. IER_Dependent, /// An error occurred. IER_Error }; IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS, const DeclarationNameInfo &TargetNameInfo); IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name); StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, NestedNameSpecifierLoc QualifierLoc, DeclarationNameInfo NameInfo, Stmt *Nested); StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name, Stmt *Nested); //===------------------------- "Block" Extension ------------------------===// /// ActOnBlockStart - This callback is invoked when a block literal is /// started. void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockArguments - This callback allows processing of block arguments. /// If there are no arguments, this is still invoked. void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo, Scope *CurScope); /// ActOnBlockError - If there is an error parsing a block, this callback /// is invoked to pop the information about the block from the action impl. void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockStmtExpr - This is called when the body of a block statement /// literal was successfully completed. ^(int x){...} ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body, Scope *CurScope); //===---------------------------- Clang Extensions ----------------------===// /// __builtin_convertvector(...) ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- OpenCL Features -----------------------===// /// __builtin_astype(...) ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- C++ Features --------------------------===// // Act on C++ namespaces Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc, SourceLocation NamespaceLoc, SourceLocation IdentLoc, IdentifierInfo *Ident, SourceLocation LBrace, const ParsedAttributesView &AttrList, UsingDirectiveDecl *&UsingDecl); void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace); NamespaceDecl *getStdNamespace() const; NamespaceDecl *getOrCreateStdNamespace(); NamespaceDecl *lookupStdExperimentalNamespace(); CXXRecordDecl *getStdBadAlloc() const; EnumDecl *getStdAlignValT() const; private: // A cache representing if we've fully checked the various comparison category // types stored in ASTContext. The bit-index corresponds to the integer value // of a ComparisonCategoryType enumerator. llvm::SmallBitVector FullyCheckedComparisonCategories; ValueDecl *tryLookupCtorInitMemberDecl(CXXRecordDecl *ClassDecl, CXXScopeSpec &SS, ParsedType TemplateTypeTy, IdentifierInfo *MemberOrBase); public: /// Lookup the specified comparison category types in the standard /// library, an check the VarDecls possibly returned by the operator<=> /// builtins for that type. /// /// \return The type of the comparison category type corresponding to the /// specified Kind, or a null type if an error occurs QualType CheckComparisonCategoryType(ComparisonCategoryType Kind, SourceLocation Loc); /// Tests whether Ty is an instance of std::initializer_list and, if /// it is and Element is not NULL, assigns the element type to Element. bool isStdInitializerList(QualType Ty, QualType *Element); /// Looks for the std::initializer_list template and instantiates it /// with Element, or emits an error if it's not found. /// /// \returns The instantiated template, or null on error. QualType BuildStdInitializerList(QualType Element, SourceLocation Loc); /// Determine whether Ctor is an initializer-list constructor, as /// defined in [dcl.init.list]p2. bool isInitListConstructor(const FunctionDecl *Ctor); Decl *ActOnUsingDirective(Scope *CurScope, SourceLocation UsingLoc, SourceLocation NamespcLoc, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *NamespcName, const ParsedAttributesView &AttrList); void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir); Decl *ActOnNamespaceAliasDef(Scope *CurScope, SourceLocation NamespaceLoc, SourceLocation AliasLoc, IdentifierInfo *Alias, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *Ident); void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow); bool CheckUsingShadowDecl(UsingDecl *UD, NamedDecl *Target, const LookupResult &PreviousDecls, UsingShadowDecl *&PrevShadow); UsingShadowDecl *BuildUsingShadowDecl(Scope *S, UsingDecl *UD, NamedDecl *Target, UsingShadowDecl *PrevDecl); bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc, bool HasTypenameKeyword, const CXXScopeSpec &SS, SourceLocation NameLoc, const LookupResult &Previous); bool CheckUsingDeclQualifier(SourceLocation UsingLoc, bool HasTypename, const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, SourceLocation NameLoc); NamedDecl *BuildUsingDeclaration( Scope *S, AccessSpecifier AS, SourceLocation UsingLoc, bool HasTypenameKeyword, SourceLocation TypenameLoc, CXXScopeSpec &SS, DeclarationNameInfo NameInfo, SourceLocation EllipsisLoc, const ParsedAttributesView &AttrList, bool IsInstantiation); NamedDecl *BuildUsingPackDecl(NamedDecl *InstantiatedFrom, ArrayRef<NamedDecl *> Expansions); bool CheckInheritingConstructorUsingDecl(UsingDecl *UD); /// Given a derived-class using shadow declaration for a constructor and the /// correspnding base class constructor, find or create the implicit /// synthesized derived class constructor to use for this initialization. CXXConstructorDecl * findInheritingConstructor(SourceLocation Loc, CXXConstructorDecl *BaseCtor, ConstructorUsingShadowDecl *DerivedShadow); Decl *ActOnUsingDeclaration(Scope *CurScope, AccessSpecifier AS, SourceLocation UsingLoc, SourceLocation TypenameLoc, CXXScopeSpec &SS, UnqualifiedId &Name, SourceLocation EllipsisLoc, const ParsedAttributesView &AttrList); Decl *ActOnAliasDeclaration(Scope *CurScope, AccessSpecifier AS, MultiTemplateParamsArg TemplateParams, SourceLocation UsingLoc, UnqualifiedId &Name, const ParsedAttributesView &AttrList, TypeResult Type, Decl *DeclFromDeclSpec); /// BuildCXXConstructExpr - Creates a complete call to a constructor, /// including handling of its default argument expressions. /// /// \param ConstructKind - a CXXConstructExpr::ConstructionKind ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, NamedDecl *FoundDecl, CXXConstructorDecl *Constructor, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); /// Build a CXXConstructExpr whose constructor has already been resolved if /// it denotes an inherited constructor. ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); // FIXME: Can we remove this and have the above BuildCXXConstructExpr check if // the constructor can be elidable? ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, NamedDecl *FoundDecl, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field); /// Instantiate or parse a C++ default argument expression as necessary. /// Return true on error. bool CheckCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating /// the default expr if needed. ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// FinalizeVarWithDestructor - Prepare for calling destructor on the /// constructed variable. void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType); /// Helper class that collects exception specifications for /// implicitly-declared special member functions. class ImplicitExceptionSpecification { // Pointer to allow copying Sema *Self; // We order exception specifications thus: // noexcept is the most restrictive, but is only used in C++11. // throw() comes next. // Then a throw(collected exceptions) // Finally no specification, which is expressed as noexcept(false). // throw(...) is used instead if any called function uses it. ExceptionSpecificationType ComputedEST; llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen; SmallVector<QualType, 4> Exceptions; void ClearExceptions() { ExceptionsSeen.clear(); Exceptions.clear(); } public: explicit ImplicitExceptionSpecification(Sema &Self) : Self(&Self), ComputedEST(EST_BasicNoexcept) { if (!Self.getLangOpts().CPlusPlus11) ComputedEST = EST_DynamicNone; } /// Get the computed exception specification type. ExceptionSpecificationType getExceptionSpecType() const { assert(!isComputedNoexcept(ComputedEST) && "noexcept(expr) should not be a possible result"); return ComputedEST; } /// The number of exceptions in the exception specification. unsigned size() const { return Exceptions.size(); } /// The set of exceptions in the exception specification. const QualType *data() const { return Exceptions.data(); } /// Integrate another called method into the collected data. void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method); /// Integrate an invoked expression into the collected data. void CalledExpr(Expr *E); /// Overwrite an EPI's exception specification with this /// computed exception specification. FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const { FunctionProtoType::ExceptionSpecInfo ESI; ESI.Type = getExceptionSpecType(); if (ESI.Type == EST_Dynamic) { ESI.Exceptions = Exceptions; } else if (ESI.Type == EST_None) { /// C++11 [except.spec]p14: /// The exception-specification is noexcept(false) if the set of /// potential exceptions of the special member function contains "any" ESI.Type = EST_NoexceptFalse; ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(), tok::kw_false).get(); } return ESI; } }; /// Determine what sort of exception specification a defaulted /// copy constructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedDefaultCtorExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted /// default constructor of a class will have, and whether the parameter /// will be const. ImplicitExceptionSpecification ComputeDefaultedCopyCtorExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted /// copy assignment operator of a class will have, and whether the /// parameter will be const. ImplicitExceptionSpecification ComputeDefaultedCopyAssignmentExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted move /// constructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedMoveCtorExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted move /// assignment operator of a class will have. ImplicitExceptionSpecification ComputeDefaultedMoveAssignmentExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted /// destructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedDtorExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification an inheriting /// constructor of a class will have. ImplicitExceptionSpecification ComputeInheritingCtorExceptionSpec(SourceLocation Loc, CXXConstructorDecl *CD); /// Evaluate the implicit exception specification for a defaulted /// special member function. void EvaluateImplicitExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD); /// Check the given noexcept-specifier, convert its expression, and compute /// the appropriate ExceptionSpecificationType. ExprResult ActOnNoexceptSpec(SourceLocation NoexceptLoc, Expr *NoexceptExpr, ExceptionSpecificationType &EST); /// Check the given exception-specification and update the /// exception specification information with the results. void checkExceptionSpecification(bool IsTopLevel, ExceptionSpecificationType EST, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr, SmallVectorImpl<QualType> &Exceptions, FunctionProtoType::ExceptionSpecInfo &ESI); /// Determine if we're in a case where we need to (incorrectly) eagerly /// parse an exception specification to work around a libstdc++ bug. bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D); /// Add an exception-specification to the given member function /// (or member function template). The exception-specification was parsed /// after the method itself was declared. void actOnDelayedExceptionSpecification(Decl *Method, ExceptionSpecificationType EST, SourceRange SpecificationRange, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr); class InheritedConstructorInfo; /// Determine if a special member function should have a deleted /// definition when it is defaulted. bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM, InheritedConstructorInfo *ICI = nullptr, bool Diagnose = false); /// Declare the implicit default constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// default constructor will be added. /// /// \returns The implicitly-declared default constructor. CXXConstructorDecl *DeclareImplicitDefaultConstructor( CXXRecordDecl *ClassDecl); /// DefineImplicitDefaultConstructor - Checks for feasibility of /// defining this constructor as the default constructor. void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit destructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// destructor will be added. /// /// \returns The implicitly-declared destructor. CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl); /// DefineImplicitDestructor - Checks for feasibility of /// defining this destructor as the default destructor. void DefineImplicitDestructor(SourceLocation CurrentLocation, CXXDestructorDecl *Destructor); /// Build an exception spec for destructors that don't have one. /// /// C++11 says that user-defined destructors with no exception spec get one /// that looks as if the destructor was implicitly declared. void AdjustDestructorExceptionSpec(CXXDestructorDecl *Destructor); /// Define the specified inheriting constructor. void DefineInheritingConstructor(SourceLocation UseLoc, CXXConstructorDecl *Constructor); /// Declare the implicit copy constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy constructor will be added. /// /// \returns The implicitly-declared copy constructor. CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitCopyConstructor - Checks for feasibility of /// defining this constructor as the copy constructor. void DefineImplicitCopyConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit move constructor for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move constructor will be added. /// /// \returns The implicitly-declared move constructor, or NULL if it wasn't /// declared. CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitMoveConstructor - Checks for feasibility of /// defining this constructor as the move constructor. void DefineImplicitMoveConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit copy assignment operator for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy assignment operator will be added. /// /// \returns The implicitly-declared copy assignment operator. CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl); /// Defines an implicitly-declared copy assignment operator. void DefineImplicitCopyAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// Declare the implicit move assignment operator for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move assignment operator will be added. /// /// \returns The implicitly-declared move assignment operator, or NULL if it /// wasn't declared. CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl); /// Defines an implicitly-declared move assignment operator. void DefineImplicitMoveAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// Force the declaration of any implicitly-declared members of this /// class. void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class); /// Check a completed declaration of an implicit special member. void CheckImplicitSpecialMemberDeclaration(Scope *S, FunctionDecl *FD); /// Determine whether the given function is an implicitly-deleted /// special member function. bool isImplicitlyDeleted(FunctionDecl *FD); /// Check whether 'this' shows up in the type of a static member /// function after the (naturally empty) cv-qualifier-seq would be. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method); /// Whether this' shows up in the exception specification of a static /// member function. bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method); /// Check whether 'this' shows up in the attributes of the given /// static member function. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method); /// MaybeBindToTemporary - If the passed in expression has a record type with /// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise /// it simply returns the passed in expression. ExprResult MaybeBindToTemporary(Expr *E); bool CompleteConstructorCall(CXXConstructorDecl *Constructor, MultiExprArg ArgsPtr, SourceLocation Loc, SmallVectorImpl<Expr*> &ConvertedArgs, bool AllowExplicit = false, bool IsListInitialization = false); ParsedType getInheritingConstructorName(CXXScopeSpec &SS, SourceLocation NameLoc, IdentifierInfo &Name); ParsedType getConstructorName(IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, bool EnteringContext); ParsedType getDestructorName(SourceLocation TildeLoc, IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, ParsedType ObjectType, bool EnteringContext); ParsedType getDestructorTypeForDecltype(const DeclSpec &DS, ParsedType ObjectType); // Checks that reinterpret casts don't have undefined behavior. void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType, bool IsDereference, SourceRange Range); /// ActOnCXXNamedCast - Parse {dynamic,static,reinterpret,const}_cast's. ExprResult ActOnCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, SourceLocation LAngleBracketLoc, Declarator &D, SourceLocation RAngleBracketLoc, SourceLocation LParenLoc, Expr *E, SourceLocation RParenLoc); ExprResult BuildCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, TypeSourceInfo *Ty, Expr *E, SourceRange AngleBrackets, SourceRange Parens); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXTypeid - Parse typeid( something ). ExprResult ActOnCXXTypeid(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXUuidof - Parse __uuidof( something ). ExprResult ActOnCXXUuidof(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); /// Handle a C++1z fold-expression: ( expr op ... op expr ). ExprResult ActOnCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS, tok::TokenKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc); ExprResult BuildCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS, BinaryOperatorKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc, Optional<unsigned> NumExpansions); ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc, BinaryOperatorKind Operator); //// ActOnCXXThis - Parse 'this' pointer. ExprResult ActOnCXXThis(SourceLocation loc); /// Try to retrieve the type of the 'this' pointer. /// /// \returns The type of 'this', if possible. Otherwise, returns a NULL type. QualType getCurrentThisType(); /// When non-NULL, the C++ 'this' expression is allowed despite the /// current context not being a non-static member function. In such cases, /// this provides the type used for 'this'. QualType CXXThisTypeOverride; /// RAII object used to temporarily allow the C++ 'this' expression /// to be used, with the given qualifiers on the current class type. class CXXThisScopeRAII { Sema &S; QualType OldCXXThisTypeOverride; bool Enabled; public: /// Introduce a new scope where 'this' may be allowed (when enabled), /// using the given declaration (which is either a class template or a /// class) along with the given qualifiers. /// along with the qualifiers placed on '*this'. CXXThisScopeRAII(Sema &S, Decl *ContextDecl, Qualifiers CXXThisTypeQuals, bool Enabled = true); ~CXXThisScopeRAII(); }; /// Make sure the value of 'this' is actually available in the current /// context, if it is a potentially evaluated context. /// /// \param Loc The location at which the capture of 'this' occurs. /// /// \param Explicit Whether 'this' is explicitly captured in a lambda /// capture list. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// 'this' that may or may not be used in certain specializations of /// a nested generic lambda (depending on whether the name resolves to /// a non-static member function or a static function). /// \return returns 'true' if failed, 'false' if success. bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false, bool BuildAndDiagnose = true, const unsigned *const FunctionScopeIndexToStopAt = nullptr, bool ByCopy = false); /// Determine whether the given type is the type of *this that is used /// outside of the body of a member function for a type that is currently /// being defined. bool isThisOutsideMemberFunctionBody(QualType BaseType); /// ActOnCXXBoolLiteral - Parse {true,false} literals. ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); /// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals. ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); ExprResult ActOnObjCAvailabilityCheckExpr(llvm::ArrayRef<AvailabilitySpec> AvailSpecs, SourceLocation AtLoc, SourceLocation RParen); /// ActOnCXXNullPtrLiteral - Parse 'nullptr'. ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc); //// ActOnCXXThrow - Parse throw expressions. ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr); ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex, bool IsThrownVarInScope); bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E); /// ActOnCXXTypeConstructExpr - Parse construction of a specified type. /// Can be interpreted either as function-style casting ("int(x)") /// or class type construction ("ClassType(x,y,z)") /// or creation of a value-initialized type ("int()"). ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep, SourceLocation LParenOrBraceLoc, MultiExprArg Exprs, SourceLocation RParenOrBraceLoc, bool ListInitialization); ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type, SourceLocation LParenLoc, MultiExprArg Exprs, SourceLocation RParenLoc, bool ListInitialization); /// ActOnCXXNew - Parsed a C++ 'new' expression. ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, Declarator &D, Expr *Initializer); ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, QualType AllocType, TypeSourceInfo *AllocTypeInfo, Optional<Expr *> ArraySize, SourceRange DirectInitRange, Expr *Initializer); /// Determine whether \p FD is an aligned allocation or deallocation /// function that is unavailable. bool isUnavailableAlignedAllocationFunction(const FunctionDecl &FD) const; /// Produce diagnostics if \p FD is an aligned allocation or deallocation /// function that is unavailable. void diagnoseUnavailableAlignedAllocation(const FunctionDecl &FD, SourceLocation Loc); bool CheckAllocatedType(QualType AllocType, SourceLocation Loc, SourceRange R); /// The scope in which to find allocation functions. enum AllocationFunctionScope { /// Only look for allocation functions in the global scope. AFS_Global, /// Only look for allocation functions in the scope of the /// allocated class. AFS_Class, /// Look for allocation functions in both the global scope /// and in the scope of the allocated class. AFS_Both }; /// Finds the overloads of operator new and delete that are appropriate /// for the allocation. bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range, AllocationFunctionScope NewScope, AllocationFunctionScope DeleteScope, QualType AllocType, bool IsArray, bool &PassAlignment, MultiExprArg PlaceArgs, FunctionDecl *&OperatorNew, FunctionDecl *&OperatorDelete, bool Diagnose = true); void DeclareGlobalNewDelete(); void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return, ArrayRef<QualType> Params); bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD, DeclarationName Name, FunctionDecl* &Operator, bool Diagnose = true); FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc, bool CanProvideSize, bool Overaligned, DeclarationName Name); FunctionDecl *FindDeallocationFunctionForDestructor(SourceLocation StartLoc, CXXRecordDecl *RD); /// ActOnCXXDelete - Parsed a C++ 'delete' expression ExprResult ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal, bool ArrayForm, Expr *Operand); void CheckVirtualDtorCall(CXXDestructorDecl *dtor, SourceLocation Loc, bool IsDelete, bool CallCanBeVirtual, bool WarnOnNonAbstractTypes, SourceLocation DtorLoc); ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen, Expr *Operand, SourceLocation RParen); ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand, SourceLocation RParen); /// Parsed one of the type trait support pseudo-functions. ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<ParsedType> Args, SourceLocation RParenLoc); ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<TypeSourceInfo *> Args, SourceLocation RParenLoc); /// ActOnArrayTypeTrait - Parsed one of the binary type trait support /// pseudo-functions. ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, ParsedType LhsTy, Expr *DimExpr, SourceLocation RParen); ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, TypeSourceInfo *TSInfo, Expr *DimExpr, SourceLocation RParen); /// ActOnExpressionTrait - Parsed one of the unary type trait support /// pseudo-functions. ExprResult ActOnExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult BuildExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult ActOnStartCXXMemberReference(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, ParsedType &ObjectType, bool &MayBePseudoDestructor); ExprResult BuildPseudoDestructorExpr(Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, const CXXScopeSpec &SS, TypeSourceInfo *ScopeType, SourceLocation CCLoc, SourceLocation TildeLoc, PseudoDestructorTypeStorage DestroyedType); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, UnqualifiedId &FirstTypeName, SourceLocation CCLoc, SourceLocation TildeLoc, UnqualifiedId &SecondTypeName); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, SourceLocation TildeLoc, const DeclSpec& DS); /// MaybeCreateExprWithCleanups - If the current full-expression /// requires any cleanups, surround it with a ExprWithCleanups node. /// Otherwise, just returns the passed-in expression. Expr *MaybeCreateExprWithCleanups(Expr *SubExpr); Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt); ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr); MaterializeTemporaryExpr * CreateMaterializeTemporaryExpr(QualType T, Expr *Temporary, bool BoundToLvalueReference); ExprResult ActOnFinishFullExpr(Expr *Expr, bool DiscardedValue) { return ActOnFinishFullExpr( Expr, Expr ? Expr->getExprLoc() : SourceLocation(), DiscardedValue); } ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC, bool DiscardedValue, bool IsConstexpr = false); StmtResult ActOnFinishFullStmt(Stmt *Stmt); // Marks SS invalid if it represents an incomplete type. bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC); DeclContext *computeDeclContext(QualType T); DeclContext *computeDeclContext(const CXXScopeSpec &SS, bool EnteringContext = false); bool isDependentScopeSpecifier(const CXXScopeSpec &SS); CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS); /// The parser has parsed a global nested-name-specifier '::'. /// /// \param CCLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS); /// The parser has parsed a '__super' nested-name-specifier. /// /// \param SuperLoc The location of the '__super' keyword. /// /// \param ColonColonLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc, SourceLocation ColonColonLoc, CXXScopeSpec &SS); bool isAcceptableNestedNameSpecifier(const NamedDecl *SD, bool *CanCorrect = nullptr); NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS); /// Keeps information about an identifier in a nested-name-spec. /// struct NestedNameSpecInfo { /// The type of the object, if we're parsing nested-name-specifier in /// a member access expression. ParsedType ObjectType; /// The identifier preceding the '::'. IdentifierInfo *Identifier; /// The location of the identifier. SourceLocation IdentifierLoc; /// The location of the '::'. SourceLocation CCLoc; /// Creates info object for the most typical case. NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc, SourceLocation ColonColonLoc, ParsedType ObjectType = ParsedType()) : ObjectType(ObjectType), Identifier(II), IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) { } NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc, SourceLocation ColonColonLoc, QualType ObjectType) : ObjectType(ParsedType::make(ObjectType)), Identifier(II), IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) { } }; bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, NestedNameSpecInfo &IdInfo); bool BuildCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo, bool EnteringContext, CXXScopeSpec &SS, NamedDecl *ScopeLookupResult, bool ErrorRecoveryLookup, bool *IsCorrectedToColon = nullptr, bool OnlyNamespace = false); /// The parser has parsed a nested-name-specifier 'identifier::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param IdInfo Parser information about an identifier in the /// nested-name-spec. /// /// \param EnteringContext Whether we're entering the context nominated by /// this nested-name-specifier. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param ErrorRecoveryLookup If true, then this method is called to improve /// error recovery. In this case do not emit error message. /// /// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':' /// are allowed. The bool value pointed by this parameter is set to 'true' /// if the identifier is treated as if it was followed by ':', not '::'. /// /// \param OnlyNamespace If true, only considers namespaces in lookup. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo, bool EnteringContext, CXXScopeSpec &SS, bool ErrorRecoveryLookup = false, bool *IsCorrectedToColon = nullptr, bool OnlyNamespace = false); ExprResult ActOnDecltypeExpression(Expr *E); bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS, const DeclSpec &DS, SourceLocation ColonColonLoc); bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS, NestedNameSpecInfo &IdInfo, bool EnteringContext); /// The parser has parsed a nested-name-specifier /// 'template[opt] template-name < template-args >::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param TemplateKWLoc the location of the 'template' keyword, if any. /// \param TemplateName the template name. /// \param TemplateNameLoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). /// \param CCLoc The location of the '::'. /// /// \param EnteringContext Whether we're entering the context of the /// nested-name-specifier. /// /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateName, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, SourceLocation CCLoc, bool EnteringContext); /// Given a C++ nested-name-specifier, produce an annotation value /// that the parser can use later to reconstruct the given /// nested-name-specifier. /// /// \param SS A nested-name-specifier. /// /// \returns A pointer containing all of the information in the /// nested-name-specifier \p SS. void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS); /// Given an annotation pointer for a nested-name-specifier, restore /// the nested-name-specifier structure. /// /// \param Annotation The annotation pointer, produced by /// \c SaveNestedNameSpecifierAnnotation(). /// /// \param AnnotationRange The source range corresponding to the annotation. /// /// \param SS The nested-name-specifier that will be updated with the contents /// of the annotation pointer. void RestoreNestedNameSpecifierAnnotation(void *Annotation, SourceRange AnnotationRange, CXXScopeSpec &SS); bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global /// scope or nested-name-specifier) is parsed, part of a declarator-id. /// After this method is called, according to [C++ 3.4.3p3], names should be /// looked up in the declarator-id's scope, until the declarator is parsed and /// ActOnCXXExitDeclaratorScope is called. /// The 'SS' should be a non-empty valid CXXScopeSpec. bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS); /// ActOnCXXExitDeclaratorScope - Called when a declarator that previously /// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same /// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well. /// Used to indicate that names should revert to being looked up in the /// defining scope. void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an /// initializer for the declaration 'Dcl'. /// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a /// static data member of class X, names should be looked up in the scope of /// class X. void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl); /// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an /// initializer for the declaration 'Dcl'. void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl); /// Create a new lambda closure type. CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange, TypeSourceInfo *Info, bool KnownDependent, LambdaCaptureDefault CaptureDefault); /// Start the definition of a lambda expression. CXXMethodDecl *startLambdaDefinition(CXXRecordDecl *Class, SourceRange IntroducerRange, TypeSourceInfo *MethodType, SourceLocation EndLoc, ArrayRef<ParmVarDecl *> Params, bool IsConstexprSpecified); /// Endow the lambda scope info with the relevant properties. void buildLambdaScope(sema::LambdaScopeInfo *LSI, CXXMethodDecl *CallOperator, SourceRange IntroducerRange, LambdaCaptureDefault CaptureDefault, SourceLocation CaptureDefaultLoc, bool ExplicitParams, bool ExplicitResultType, bool Mutable); /// Perform initialization analysis of the init-capture and perform /// any implicit conversions such as an lvalue-to-rvalue conversion if /// not being used to initialize a reference. ParsedType actOnLambdaInitCaptureInitialization( SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc, IdentifierInfo *Id, LambdaCaptureInitKind InitKind, Expr *&Init) { return ParsedType::make(buildLambdaInitCaptureInitialization( Loc, ByRef, EllipsisLoc, None, Id, InitKind != LambdaCaptureInitKind::CopyInit, Init)); } QualType buildLambdaInitCaptureInitialization( SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions, IdentifierInfo *Id, bool DirectInit, Expr *&Init); /// Create a dummy variable within the declcontext of the lambda's /// call operator, for name lookup purposes for a lambda init capture. /// /// CodeGen handles emission of lambda captures, ignoring these dummy /// variables appropriately. VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc, QualType InitCaptureType, SourceLocation EllipsisLoc, IdentifierInfo *Id, unsigned InitStyle, Expr *Init); /// Build the implicit field for an init-capture. FieldDecl *buildInitCaptureField(sema::LambdaScopeInfo *LSI, VarDecl *Var); /// Note that we have finished the explicit captures for the /// given lambda. void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI); /// \brief This is called after parsing the explicit template parameter list /// on a lambda (if it exists) in C++2a. void ActOnLambdaExplicitTemplateParameterList(SourceLocation LAngleLoc, ArrayRef<NamedDecl *> TParams, SourceLocation RAngleLoc); /// Introduce the lambda parameters into scope. void addLambdaParameters( ArrayRef<LambdaIntroducer::LambdaCapture> Captures, CXXMethodDecl *CallOperator, Scope *CurScope); /// Deduce a block or lambda's return type based on the return /// statements present in the body. void deduceClosureReturnType(sema::CapturingScopeInfo &CSI); /// ActOnStartOfLambdaDefinition - This is called just before we start /// parsing the body of a lambda; it analyzes the explicit captures and /// arguments, and sets up various data-structures for the body of the /// lambda. void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro, Declarator &ParamInfo, Scope *CurScope); /// ActOnLambdaError - If there is an error parsing a lambda, this callback /// is invoked to pop the information about the lambda. void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope, bool IsInstantiation = false); /// ActOnLambdaExpr - This is called when the body of a lambda expression /// was successfully completed. ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body, Scope *CurScope); /// Does copying/destroying the captured variable have side effects? bool CaptureHasSideEffects(const sema::Capture &From); /// Diagnose if an explicit lambda capture is unused. Returns true if a /// diagnostic is emitted. bool DiagnoseUnusedLambdaCapture(SourceRange CaptureRange, const sema::Capture &From); /// Complete a lambda-expression having processed and attached the /// lambda body. ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc, sema::LambdaScopeInfo *LSI); /// Get the return type to use for a lambda's conversion function(s) to /// function pointer type, given the type of the call operator. QualType getLambdaConversionFunctionResultType(const FunctionProtoType *CallOpType); /// Define the "body" of the conversion from a lambda object to a /// function pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToFunctionPointerConversion( SourceLocation CurrentLoc, CXXConversionDecl *Conv); /// Define the "body" of the conversion from a lambda object to a /// block pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc, CXXConversionDecl *Conv); ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation, SourceLocation ConvLocation, CXXConversionDecl *Conv, Expr *Src); // ParseObjCStringLiteral - Parse Objective-C string literals. ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs, ArrayRef<Expr *> Strings); ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S); /// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the /// numeric literal expression. Type of the expression will be "NSNumber *" /// or "id" if NSNumber is unavailable. ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number); ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc, bool Value); ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements); /// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the /// '@' prefixed parenthesized expression. The type of the expression will /// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type /// of ValueType, which is allowed to be a built-in numeric type, "char *", /// "const char *" or C structure with attribute 'objc_boxable'. ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr); ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr, Expr *IndexExpr, ObjCMethodDecl *getterMethod, ObjCMethodDecl *setterMethod); ExprResult BuildObjCDictionaryLiteral(SourceRange SR, MutableArrayRef<ObjCDictionaryElement> Elements); ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc, TypeSourceInfo *EncodedTypeInfo, SourceLocation RParenLoc); ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl, CXXConversionDecl *Method, bool HadMultipleCandidates); ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc, SourceLocation EncodeLoc, SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc); /// ParseObjCSelectorExpression - Build selector expression for \@selector ExprResult ParseObjCSelectorExpression(Selector Sel, SourceLocation AtLoc, SourceLocation SelLoc, SourceLocation LParenLoc, SourceLocation RParenLoc, bool WarnMultipleSelectors); /// ParseObjCProtocolExpression - Build protocol expression for \@protocol ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName, SourceLocation AtLoc, SourceLocation ProtoLoc, SourceLocation LParenLoc, SourceLocation ProtoIdLoc, SourceLocation RParenLoc); //===--------------------------------------------------------------------===// // C++ Declarations // Decl *ActOnStartLinkageSpecification(Scope *S, SourceLocation ExternLoc, Expr *LangStr, SourceLocation LBraceLoc); Decl *ActOnFinishLinkageSpecification(Scope *S, Decl *LinkageSpec, SourceLocation RBraceLoc); //===--------------------------------------------------------------------===// // C++ Classes // CXXRecordDecl *getCurrentClass(Scope *S, const CXXScopeSpec *SS); bool isCurrentClassName(const IdentifierInfo &II, Scope *S, const CXXScopeSpec *SS = nullptr); bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS); bool ActOnAccessSpecifier(AccessSpecifier Access, SourceLocation ASLoc, SourceLocation ColonLoc, const ParsedAttributesView &Attrs); NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D, MultiTemplateParamsArg TemplateParameterLists, Expr *BitfieldWidth, const VirtSpecifiers &VS, InClassInitStyle InitStyle); void ActOnStartCXXInClassMemberInitializer(); void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl, SourceLocation EqualLoc, Expr *Init); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, SourceLocation LParenLoc, ArrayRef<Expr *> Args, SourceLocation RParenLoc, SourceLocation EllipsisLoc); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *InitList, SourceLocation EllipsisLoc); MemInitResult BuildMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *Init, SourceLocation EllipsisLoc); MemInitResult BuildMemberInitializer(ValueDecl *Member, Expr *Init, SourceLocation IdLoc); MemInitResult BuildBaseInitializer(QualType BaseType, TypeSourceInfo *BaseTInfo, Expr *Init, CXXRecordDecl *ClassDecl, SourceLocation EllipsisLoc); MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo, Expr *Init, CXXRecordDecl *ClassDecl); bool SetDelegatingInitializer(CXXConstructorDecl *Constructor, CXXCtorInitializer *Initializer); bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors, ArrayRef<CXXCtorInitializer *> Initializers = None); void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation); /// MarkBaseAndMemberDestructorsReferenced - Given a record decl, /// mark all the non-trivial destructors of its members and bases as /// referenced. void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc, CXXRecordDecl *Record); /// The list of classes whose vtables have been used within /// this translation unit, and the source locations at which the /// first use occurred. typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse; /// The list of vtables that are required but have not yet been /// materialized. SmallVector<VTableUse, 16> VTableUses; /// The set of classes whose vtables have been used within /// this translation unit, and a bit that will be true if the vtable is /// required to be emitted (otherwise, it should be emitted only if needed /// by code generation). llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed; /// Load any externally-stored vtable uses. void LoadExternalVTableUses(); /// Note that the vtable for the given class was used at the /// given location. void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class, bool DefinitionRequired = false); /// Mark the exception specifications of all virtual member functions /// in the given class as needed. void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc, const CXXRecordDecl *RD); /// MarkVirtualMembersReferenced - Will mark all members of the given /// CXXRecordDecl referenced. void MarkVirtualMembersReferenced(SourceLocation Loc, const CXXRecordDecl *RD, bool ConstexprOnly = false); /// Define all of the vtables that have been used in this /// translation unit and reference any virtual members used by those /// vtables. /// /// \returns true if any work was done, false otherwise. bool DefineUsedVTables(); void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl); void ActOnMemInitializers(Decl *ConstructorDecl, SourceLocation ColonLoc, ArrayRef<CXXCtorInitializer*> MemInits, bool AnyErrors); /// Check class-level dllimport/dllexport attribute. The caller must /// ensure that referenceDLLExportedClassMethods is called some point later /// when all outer classes of Class are complete. void checkClassLevelDLLAttribute(CXXRecordDecl *Class); void checkClassLevelCodeSegAttribute(CXXRecordDecl *Class); void referenceDLLExportedClassMethods(); void propagateDLLAttrToBaseClassTemplate( CXXRecordDecl *Class, Attr *ClassAttr, ClassTemplateSpecializationDecl *BaseTemplateSpec, SourceLocation BaseLoc); void CheckCompletedCXXClass(CXXRecordDecl *Record); /// Check that the C++ class annoated with "trivial_abi" satisfies all the /// conditions that are needed for the attribute to have an effect. void checkIllFormedTrivialABIStruct(CXXRecordDecl &RD); void ActOnFinishCXXMemberSpecification(Scope *S, SourceLocation RLoc, Decl *TagDecl, SourceLocation LBrac, SourceLocation RBrac, const ParsedAttributesView &AttrList); void ActOnFinishCXXMemberDecls(); void ActOnFinishCXXNonNestedClass(Decl *D); void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param); unsigned ActOnReenterTemplateScope(Scope *S, Decl *Template); void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param); void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnFinishDelayedMemberInitializers(Decl *Record); void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD, CachedTokens &Toks); void UnmarkAsLateParsedTemplate(FunctionDecl *FD); bool IsInsideALocalClassWithinATemplateFunction(); Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, Expr *AssertMessageExpr, SourceLocation RParenLoc); Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, StringLiteral *AssertMessageExpr, SourceLocation RParenLoc, bool Failed); FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart, SourceLocation FriendLoc, TypeSourceInfo *TSInfo); Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS, MultiTemplateParamsArg TemplateParams); NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParams); QualType CheckConstructorDeclarator(Declarator &D, QualType R, StorageClass& SC); void CheckConstructor(CXXConstructorDecl *Constructor); QualType CheckDestructorDeclarator(Declarator &D, QualType R, StorageClass& SC); bool CheckDestructor(CXXDestructorDecl *Destructor); void CheckConversionDeclarator(Declarator &D, QualType &R, StorageClass& SC); Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion); void CheckDeductionGuideDeclarator(Declarator &D, QualType &R, StorageClass &SC); void CheckDeductionGuideTemplate(FunctionTemplateDecl *TD); void CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD); void CheckDelayedMemberExceptionSpecs(); //===--------------------------------------------------------------------===// // C++ Derived Classes // /// ActOnBaseSpecifier - Parsed a base specifier CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class, SourceRange SpecifierRange, bool Virtual, AccessSpecifier Access, TypeSourceInfo *TInfo, SourceLocation EllipsisLoc); BaseResult ActOnBaseSpecifier(Decl *classdecl, SourceRange SpecifierRange, ParsedAttributes &Attrs, bool Virtual, AccessSpecifier Access, ParsedType basetype, SourceLocation BaseLoc, SourceLocation EllipsisLoc); bool AttachBaseSpecifiers(CXXRecordDecl *Class, MutableArrayRef<CXXBaseSpecifier *> Bases); void ActOnBaseSpecifiers(Decl *ClassDecl, MutableArrayRef<CXXBaseSpecifier *> Bases); bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base); bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base, CXXBasePaths &Paths); // FIXME: I don't like this name. void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, SourceLocation Loc, SourceRange Range, CXXCastPath *BasePath = nullptr, bool IgnoreAccess = false); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, unsigned InaccessibleBaseID, unsigned AmbigiousBaseConvID, SourceLocation Loc, SourceRange Range, DeclarationName Name, CXXCastPath *BasePath, bool IgnoreAccess = false); std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths); bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionReturnType - Checks whether the return types are /// covariant, according to C++ [class.virtual]p5. bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionExceptionSpec - Checks whether the exception /// spec is a subset of base spec. bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New, const CXXMethodDecl *Old); bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange); /// CheckOverrideControl - Check C++11 override control semantics. void CheckOverrideControl(NamedDecl *D); /// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was /// not used in the declaration of an overriding method. void DiagnoseAbsenceOfOverrideControl(NamedDecl *D); /// CheckForFunctionMarkedFinal - Checks whether a virtual member function /// overrides a virtual member function marked 'final', according to /// C++11 [class.virtual]p4. bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New, const CXXMethodDecl *Old); //===--------------------------------------------------------------------===// // C++ Access Control // enum AccessResult { AR_accessible, AR_inaccessible, AR_dependent, AR_delayed }; bool SetMemberAccessSpecifier(NamedDecl *MemberDecl, NamedDecl *PrevMemberDecl, AccessSpecifier LexicalAS); AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E, DeclAccessPair FoundDecl); AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E, DeclAccessPair FoundDecl); AccessResult CheckAllocationAccess(SourceLocation OperatorLoc, SourceRange PlacementRange, CXXRecordDecl *NamingClass, DeclAccessPair FoundDecl, bool Diagnose = true); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, DeclAccessPair FoundDecl, const InitializedEntity &Entity, bool IsCopyBindingRefToTemp = false); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, DeclAccessPair FoundDecl, const InitializedEntity &Entity, const PartialDiagnostic &PDiag); AccessResult CheckDestructorAccess(SourceLocation Loc, CXXDestructorDecl *Dtor, const PartialDiagnostic &PDiag, QualType objectType = QualType()); AccessResult CheckFriendAccess(NamedDecl *D); AccessResult CheckMemberAccess(SourceLocation UseLoc, CXXRecordDecl *NamingClass, DeclAccessPair Found); AccessResult CheckStructuredBindingMemberAccess(SourceLocation UseLoc, CXXRecordDecl *DecomposedClass, DeclAccessPair Field); AccessResult CheckMemberOperatorAccess(SourceLocation Loc, Expr *ObjectExpr, Expr *ArgExpr, DeclAccessPair FoundDecl); AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr, DeclAccessPair FoundDecl); AccessResult CheckBaseClassAccess(SourceLocation AccessLoc, QualType Base, QualType Derived, const CXXBasePath &Path, unsigned DiagID, bool ForceCheck = false, bool ForceUnprivileged = false); void CheckLookupAccess(const LookupResult &R); bool IsSimplyAccessible(NamedDecl *Decl, CXXRecordDecl *NamingClass, QualType BaseType); bool isSpecialMemberAccessibleForDeletion(CXXMethodDecl *decl, AccessSpecifier access, QualType objectType); void HandleDependentAccessCheck(const DependentDiagnostic &DD, const MultiLevelTemplateArgumentList &TemplateArgs); void PerformDependentDiagnostics(const DeclContext *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx); /// When true, access checking violations are treated as SFINAE /// failures rather than hard errors. bool AccessCheckingSFINAE; enum AbstractDiagSelID { AbstractNone = -1, AbstractReturnType, AbstractParamType, AbstractVariableType, AbstractFieldType, AbstractIvarType, AbstractSynthesizedIvarType, AbstractArrayType }; bool isAbstractType(SourceLocation Loc, QualType T); bool RequireNonAbstractType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); template <typename... Ts> bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireNonAbstractType(Loc, T, Diagnoser); } void DiagnoseAbstractType(const CXXRecordDecl *RD); //===--------------------------------------------------------------------===// // C++ Overloaded Operators [C++ 13.5] // bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl); bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl); //===--------------------------------------------------------------------===// // C++ Templates [C++ 14] // void FilterAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true, bool AllowDependent = true); bool hasAnyAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true, bool AllowDependent = true, bool AllowNonTemplateFunctions = false); /// Try to interpret the lookup result D as a template-name. /// /// \param D A declaration found by name lookup. /// \param AllowFunctionTemplates Whether function templates should be /// considered valid results. /// \param AllowDependent Whether unresolved using declarations (that might /// name templates) should be considered valid results. NamedDecl *getAsTemplateNameDecl(NamedDecl *D, bool AllowFunctionTemplates = true, bool AllowDependent = true); enum class AssumedTemplateKind { /// This is not assumed to be a template name. None, /// This is assumed to be a template name because lookup found nothing. FoundNothing, /// This is assumed to be a template name because lookup found one or more /// functions (but no function templates). FoundFunctions, }; bool LookupTemplateName(LookupResult &R, Scope *S, CXXScopeSpec &SS, QualType ObjectType, bool EnteringContext, bool &MemberOfUnknownSpecialization, SourceLocation TemplateKWLoc = SourceLocation(), AssumedTemplateKind *ATK = nullptr); TemplateNameKind isTemplateName(Scope *S, CXXScopeSpec &SS, bool hasTemplateKeyword, const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool &MemberOfUnknownSpecialization); /// Try to resolve an undeclared template name as a type template. /// /// Sets II to the identifier corresponding to the template name, and updates /// Name to a corresponding (typo-corrected) type template name and TNK to /// the corresponding kind, if possible. void ActOnUndeclaredTypeTemplateName(Scope *S, TemplateTy &Name, TemplateNameKind &TNK, SourceLocation NameLoc, IdentifierInfo *&II); bool resolveAssumedTemplateNameAsType(Scope *S, TemplateName &Name, SourceLocation NameLoc, bool Diagnose = true); /// Determine whether a particular identifier might be the name in a C++1z /// deduction-guide declaration. bool isDeductionGuideName(Scope *S, const IdentifierInfo &Name, SourceLocation NameLoc, ParsedTemplateTy *Template = nullptr); bool DiagnoseUnknownTemplateName(const IdentifierInfo &II, SourceLocation IILoc, Scope *S, const CXXScopeSpec *SS, TemplateTy &SuggestedTemplate, TemplateNameKind &SuggestedKind); bool DiagnoseUninstantiableTemplate(SourceLocation PointOfInstantiation, NamedDecl *Instantiation, bool InstantiatedFromMember, const NamedDecl *Pattern, const NamedDecl *PatternDef, TemplateSpecializationKind TSK, bool Complain = true); void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl); TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl); NamedDecl *ActOnTypeParameter(Scope *S, bool Typename, SourceLocation EllipsisLoc, SourceLocation KeyLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedType DefaultArg); QualType CheckNonTypeTemplateParameterType(TypeSourceInfo *&TSI, SourceLocation Loc); QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc); NamedDecl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D, unsigned Depth, unsigned Position, SourceLocation EqualLoc, Expr *DefaultArg); NamedDecl *ActOnTemplateTemplateParameter(Scope *S, SourceLocation TmpLoc, TemplateParameterList *Params, SourceLocation EllipsisLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedTemplateArgument DefaultArg); TemplateParameterList * ActOnTemplateParameterList(unsigned Depth, SourceLocation ExportLoc, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ArrayRef<NamedDecl *> Params, SourceLocation RAngleLoc, Expr *RequiresClause); /// The context in which we are checking a template parameter list. enum TemplateParamListContext { TPC_ClassTemplate, TPC_VarTemplate, TPC_FunctionTemplate, TPC_ClassTemplateMember, TPC_FriendClassTemplate, TPC_FriendFunctionTemplate, TPC_FriendFunctionTemplateDefinition, TPC_TypeAliasTemplate }; bool CheckTemplateParameterList(TemplateParameterList *NewParams, TemplateParameterList *OldParams, TemplateParamListContext TPC, SkipBodyInfo *SkipBody = nullptr); TemplateParameterList *MatchTemplateParametersToScopeSpecifier( SourceLocation DeclStartLoc, SourceLocation DeclLoc, const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId, ArrayRef<TemplateParameterList *> ParamLists, bool IsFriend, bool &IsMemberSpecialization, bool &Invalid); DeclResult CheckClassTemplate( Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, TemplateParameterList *TemplateParams, AccessSpecifier AS, SourceLocation ModulePrivateLoc, SourceLocation FriendLoc, unsigned NumOuterTemplateParamLists, TemplateParameterList **OuterTemplateParamLists, SkipBodyInfo *SkipBody = nullptr); TemplateArgumentLoc getTrivialTemplateArgumentLoc(const TemplateArgument &Arg, QualType NTTPType, SourceLocation Loc); void translateTemplateArguments(const ASTTemplateArgsPtr &In, TemplateArgumentListInfo &Out); ParsedTemplateArgument ActOnTemplateTypeArgument(TypeResult ParsedType); void NoteAllFoundTemplates(TemplateName Name); QualType CheckTemplateIdType(TemplateName Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs); TypeResult ActOnTemplateIdType(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy Template, IdentifierInfo *TemplateII, SourceLocation TemplateIILoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, bool IsCtorOrDtorName = false, bool IsClassName = false); /// Parsed an elaborated-type-specifier that refers to a template-id, /// such as \c class T::template apply<U>. TypeResult ActOnTagTemplateIdType(TagUseKind TUK, TypeSpecifierType TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateD, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgsIn, SourceLocation RAngleLoc); DeclResult ActOnVarTemplateSpecialization( Scope *S, Declarator &D, TypeSourceInfo *DI, SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams, StorageClass SC, bool IsPartialSpecialization); DeclResult CheckVarTemplateId(VarTemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation TemplateNameLoc, const TemplateArgumentListInfo &TemplateArgs); ExprResult CheckVarTemplateId(const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, VarTemplateDecl *Template, SourceLocation TemplateLoc, const TemplateArgumentListInfo *TemplateArgs); void diagnoseMissingTemplateArguments(TemplateName Name, SourceLocation Loc); ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, bool RequiresADL, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); TemplateNameKind ActOnDependentTemplateName( Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool AllowInjectedClassName = false); DeclResult ActOnClassTemplateSpecialization( Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, SourceLocation ModulePrivateLoc, TemplateIdAnnotation &TemplateId, const ParsedAttributesView &Attr, MultiTemplateParamsArg TemplateParameterLists, SkipBodyInfo *SkipBody = nullptr); bool CheckTemplatePartialSpecializationArgs(SourceLocation Loc, TemplateDecl *PrimaryTemplate, unsigned NumExplicitArgs, ArrayRef<TemplateArgument> Args); void CheckTemplatePartialSpecialization( ClassTemplatePartialSpecializationDecl *Partial); void CheckTemplatePartialSpecialization( VarTemplatePartialSpecializationDecl *Partial); Decl *ActOnTemplateDeclarator(Scope *S, MultiTemplateParamsArg TemplateParameterLists, Declarator &D); bool CheckSpecializationInstantiationRedecl(SourceLocation NewLoc, TemplateSpecializationKind NewTSK, NamedDecl *PrevDecl, TemplateSpecializationKind PrevTSK, SourceLocation PrevPtOfInstantiation, bool &SuppressNew); bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD, const TemplateArgumentListInfo &ExplicitTemplateArgs, LookupResult &Previous); bool CheckFunctionTemplateSpecialization( FunctionDecl *FD, TemplateArgumentListInfo *ExplicitTemplateArgs, LookupResult &Previous, bool QualifiedFriend = false); bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous); void CompleteMemberSpecialization(NamedDecl *Member, LookupResult &Previous); DeclResult ActOnExplicitInstantiation( Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, const CXXScopeSpec &SS, TemplateTy Template, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, const ParsedAttributesView &Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, Declarator &D); TemplateArgumentLoc SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, Decl *Param, SmallVectorImpl<TemplateArgument> &Converted, bool &HasDefaultArg); /// Specifies the context in which a particular template /// argument is being checked. enum CheckTemplateArgumentKind { /// The template argument was specified in the code or was /// instantiated with some deduced template arguments. CTAK_Specified, /// The template argument was deduced via template argument /// deduction. CTAK_Deduced, /// The template argument was deduced from an array bound /// via template argument deduction. CTAK_DeducedFromArrayBound }; bool CheckTemplateArgument(NamedDecl *Param, TemplateArgumentLoc &Arg, NamedDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, unsigned ArgumentPackIndex, SmallVectorImpl<TemplateArgument> &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); /// Check that the given template arguments can be be provided to /// the given template, converting the arguments along the way. /// /// \param Template The template to which the template arguments are being /// provided. /// /// \param TemplateLoc The location of the template name in the source. /// /// \param TemplateArgs The list of template arguments. If the template is /// a template template parameter, this function may extend the set of /// template arguments to also include substituted, defaulted template /// arguments. /// /// \param PartialTemplateArgs True if the list of template arguments is /// intentionally partial, e.g., because we're checking just the initial /// set of template arguments. /// /// \param Converted Will receive the converted, canonicalized template /// arguments. /// /// \param UpdateArgsWithConversions If \c true, update \p TemplateArgs to /// contain the converted forms of the template arguments as written. /// Otherwise, \p TemplateArgs will not be modified. /// /// \returns true if an error occurred, false otherwise. bool CheckTemplateArgumentList(TemplateDecl *Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs, bool PartialTemplateArgs, SmallVectorImpl<TemplateArgument> &Converted, bool UpdateArgsWithConversions = true); bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param, TemplateArgumentLoc &Arg, SmallVectorImpl<TemplateArgument> &Converted); bool CheckTemplateArgument(TemplateTypeParmDecl *Param, TypeSourceInfo *Arg); ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param, QualType InstantiatedParamType, Expr *Arg, TemplateArgument &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); bool CheckTemplateTemplateArgument(TemplateParameterList *Params, TemplateArgumentLoc &Arg); ExprResult BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg, QualType ParamType, SourceLocation Loc); ExprResult BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg, SourceLocation Loc); /// Enumeration describing how template parameter lists are compared /// for equality. enum TemplateParameterListEqualKind { /// We are matching the template parameter lists of two templates /// that might be redeclarations. /// /// \code /// template<typename T> struct X; /// template<typename T> struct X; /// \endcode TPL_TemplateMatch, /// We are matching the template parameter lists of two template /// template parameters as part of matching the template parameter lists /// of two templates that might be redeclarations. /// /// \code /// template<template<int I> class TT> struct X; /// template<template<int Value> class Other> struct X; /// \endcode TPL_TemplateTemplateParmMatch, /// We are matching the template parameter lists of a template /// template argument against the template parameter lists of a template /// template parameter. /// /// \code /// template<template<int Value> class Metafun> struct X; /// template<int Value> struct integer_c; /// X<integer_c> xic; /// \endcode TPL_TemplateTemplateArgumentMatch }; bool TemplateParameterListsAreEqual(TemplateParameterList *New, TemplateParameterList *Old, bool Complain, TemplateParameterListEqualKind Kind, SourceLocation TemplateArgLoc = SourceLocation()); bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams); /// Called when the parser has parsed a C++ typename /// specifier, e.g., "typename T::type". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param II the identifier we're retrieving (e.g., 'type' in the example). /// \param IdLoc the location of the identifier. TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, const IdentifierInfo &II, SourceLocation IdLoc); /// Called when the parser has parsed a C++ typename /// specifier that ends in a template-id, e.g., /// "typename MetaFun::template apply<T1, T2>". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param TemplateLoc the location of the 'template' keyword, if any. /// \param TemplateName The template name. /// \param TemplateII The identifier used to name the template. /// \param TemplateIILoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, SourceLocation TemplateLoc, TemplateTy TemplateName, IdentifierInfo *TemplateII, SourceLocation TemplateIILoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc); QualType CheckTypenameType(ElaboratedTypeKeyword Keyword, SourceLocation KeywordLoc, NestedNameSpecifierLoc QualifierLoc, const IdentifierInfo &II, SourceLocation IILoc); TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T, SourceLocation Loc, DeclarationName Name); bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS); ExprResult RebuildExprInCurrentInstantiation(Expr *E); bool RebuildTemplateParamsInCurrentInstantiation( TemplateParameterList *Params); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgumentList &Args); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgument *Args, unsigned NumArgs); //===--------------------------------------------------------------------===// // C++ Variadic Templates (C++0x [temp.variadic]) //===--------------------------------------------------------------------===// /// Determine whether an unexpanded parameter pack might be permitted in this /// location. Useful for error recovery. bool isUnexpandedParameterPackPermitted(); /// The context in which an unexpanded parameter pack is /// being diagnosed. /// /// Note that the values of this enumeration line up with the first /// argument to the \c err_unexpanded_parameter_pack diagnostic. enum UnexpandedParameterPackContext { /// An arbitrary expression. UPPC_Expression = 0, /// The base type of a class type. UPPC_BaseType, /// The type of an arbitrary declaration. UPPC_DeclarationType, /// The type of a data member. UPPC_DataMemberType, /// The size of a bit-field. UPPC_BitFieldWidth, /// The expression in a static assertion. UPPC_StaticAssertExpression, /// The fixed underlying type of an enumeration. UPPC_FixedUnderlyingType, /// The enumerator value. UPPC_EnumeratorValue, /// A using declaration. UPPC_UsingDeclaration, /// A friend declaration. UPPC_FriendDeclaration, /// A declaration qualifier. UPPC_DeclarationQualifier, /// An initializer. UPPC_Initializer, /// A default argument. UPPC_DefaultArgument, /// The type of a non-type template parameter. UPPC_NonTypeTemplateParameterType, /// The type of an exception. UPPC_ExceptionType, /// Partial specialization. UPPC_PartialSpecialization, /// Microsoft __if_exists. UPPC_IfExists, /// Microsoft __if_not_exists. UPPC_IfNotExists, /// Lambda expression. UPPC_Lambda, /// Block expression, UPPC_Block }; /// Diagnose unexpanded parameter packs. /// /// \param Loc The location at which we should emit the diagnostic. /// /// \param UPPC The context in which we are diagnosing unexpanded /// parameter packs. /// /// \param Unexpanded the set of unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc, UnexpandedParameterPackContext UPPC, ArrayRef<UnexpandedParameterPack> Unexpanded); /// If the given type contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The source location where a diagnostc should be emitted. /// /// \param T The type that is being checked for unexpanded parameter /// packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T, UnexpandedParameterPackContext UPPC); /// If the given expression contains an unexpanded parameter /// pack, diagnose the error. /// /// \param E The expression that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(Expr *E, UnexpandedParameterPackContext UPPC = UPPC_Expression); /// If the given nested-name-specifier contains an unexpanded /// parameter pack, diagnose the error. /// /// \param SS The nested-name-specifier that is being checked for /// unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS, UnexpandedParameterPackContext UPPC); /// If the given name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param NameInfo The name (with source location information) that /// is being checked for unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo, UnexpandedParameterPackContext UPPC); /// If the given template name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The location of the template name. /// /// \param Template The template name that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TemplateName Template, UnexpandedParameterPackContext UPPC); /// If the given template argument contains an unexpanded parameter /// pack, diagnose the error. /// /// \param Arg The template argument that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg, UnexpandedParameterPackContext UPPC); /// Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgument Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// type. /// /// \param T The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(QualType T, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// type. /// /// \param TL The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TypeLoc TL, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// nested-name-specifier. /// /// \param NNS The nested-name-specifier that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(NestedNameSpecifierLoc NNS, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// name. /// /// \param NameInfo The name that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Invoked when parsing a template argument followed by an /// ellipsis, which creates a pack expansion. /// /// \param Arg The template argument preceding the ellipsis, which /// may already be invalid. /// /// \param EllipsisLoc The location of the ellipsis. ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg, SourceLocation EllipsisLoc); /// Invoked when parsing a type followed by an ellipsis, which /// creates a pack expansion. /// /// \param Type The type preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc); /// Construct a pack expansion type from the pattern of the pack /// expansion. TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Construct a pack expansion type from the pattern of the pack /// expansion. QualType CheckPackExpansion(QualType Pattern, SourceRange PatternRange, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc); /// Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Determine whether we could expand a pack expansion with the /// given set of parameter packs into separate arguments by repeatedly /// transforming the pattern. /// /// \param EllipsisLoc The location of the ellipsis that identifies the /// pack expansion. /// /// \param PatternRange The source range that covers the entire pattern of /// the pack expansion. /// /// \param Unexpanded The set of unexpanded parameter packs within the /// pattern. /// /// \param ShouldExpand Will be set to \c true if the transformer should /// expand the corresponding pack expansions into separate arguments. When /// set, \c NumExpansions must also be set. /// /// \param RetainExpansion Whether the caller should add an unexpanded /// pack expansion after all of the expanded arguments. This is used /// when extending explicitly-specified template argument packs per /// C++0x [temp.arg.explicit]p9. /// /// \param NumExpansions The number of separate arguments that will be in /// the expanded form of the corresponding pack expansion. This is both an /// input and an output parameter, which can be set by the caller if the /// number of expansions is known a priori (e.g., due to a prior substitution) /// and will be set by the callee when the number of expansions is known. /// The callee must set this value when \c ShouldExpand is \c true; it may /// set this value in other cases. /// /// \returns true if an error occurred (e.g., because the parameter packs /// are to be instantiated with arguments of different lengths), false /// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions) /// must be set. bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc, SourceRange PatternRange, ArrayRef<UnexpandedParameterPack> Unexpanded, const MultiLevelTemplateArgumentList &TemplateArgs, bool &ShouldExpand, bool &RetainExpansion, Optional<unsigned> &NumExpansions); /// Determine the number of arguments in the given pack expansion /// type. /// /// This routine assumes that the number of arguments in the expansion is /// consistent across all of the unexpanded parameter packs in its pattern. /// /// Returns an empty Optional if the type can't be expanded. Optional<unsigned> getNumArgumentsInExpansion(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs); /// Determine whether the given declarator contains any unexpanded /// parameter packs. /// /// This routine is used by the parser to disambiguate function declarators /// with an ellipsis prior to the ')', e.g., /// /// \code /// void f(T...); /// \endcode /// /// To determine whether we have an (unnamed) function parameter pack or /// a variadic function. /// /// \returns true if the declarator contains any unexpanded parameter packs, /// false otherwise. bool containsUnexpandedParameterPacks(Declarator &D); /// Returns the pattern of the pack expansion for a template argument. /// /// \param OrigLoc The template argument to expand. /// /// \param Ellipsis Will be set to the location of the ellipsis. /// /// \param NumExpansions Will be set to the number of expansions that will /// be generated from this pack expansion, if known a priori. TemplateArgumentLoc getTemplateArgumentPackExpansionPattern( TemplateArgumentLoc OrigLoc, SourceLocation &Ellipsis, Optional<unsigned> &NumExpansions) const; /// Given a template argument that contains an unexpanded parameter pack, but /// which has already been substituted, attempt to determine the number of /// elements that will be produced once this argument is fully-expanded. /// /// This is intended for use when transforming 'sizeof...(Arg)' in order to /// avoid actually expanding the pack where possible. Optional<unsigned> getFullyPackExpandedSize(TemplateArgument Arg); //===--------------------------------------------------------------------===// // C++ Template Argument Deduction (C++ [temp.deduct]) //===--------------------------------------------------------------------===// /// Adjust the type \p ArgFunctionType to match the calling convention, /// noreturn, and optionally the exception specification of \p FunctionType. /// Deduction often wants to ignore these properties when matching function /// types. QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType, bool AdjustExceptionSpec = false); /// Describes the result of template argument deduction. /// /// The TemplateDeductionResult enumeration describes the result of /// template argument deduction, as returned from /// DeduceTemplateArguments(). The separate TemplateDeductionInfo /// structure provides additional information about the results of /// template argument deduction, e.g., the deduced template argument /// list (if successful) or the specific template parameters or /// deduced arguments that were involved in the failure. enum TemplateDeductionResult { /// Template argument deduction was successful. TDK_Success = 0, /// The declaration was invalid; do nothing. TDK_Invalid, /// Template argument deduction exceeded the maximum template /// instantiation depth (which has already been diagnosed). TDK_InstantiationDepth, /// Template argument deduction did not deduce a value /// for every template parameter. TDK_Incomplete, /// Template argument deduction did not deduce a value for every /// expansion of an expanded template parameter pack. TDK_IncompletePack, /// Template argument deduction produced inconsistent /// deduced values for the given template parameter. TDK_Inconsistent, /// Template argument deduction failed due to inconsistent /// cv-qualifiers on a template parameter type that would /// otherwise be deduced, e.g., we tried to deduce T in "const T" /// but were given a non-const "X". TDK_Underqualified, /// Substitution of the deduced template argument values /// resulted in an error. TDK_SubstitutionFailure, /// After substituting deduced template arguments, a dependent /// parameter type did not match the corresponding argument. TDK_DeducedMismatch, /// After substituting deduced template arguments, an element of /// a dependent parameter type did not match the corresponding element /// of the corresponding argument (when deducing from an initializer list). TDK_DeducedMismatchNested, /// A non-depnedent component of the parameter did not match the /// corresponding component of the argument. TDK_NonDeducedMismatch, /// When performing template argument deduction for a function /// template, there were too many call arguments. TDK_TooManyArguments, /// When performing template argument deduction for a function /// template, there were too few call arguments. TDK_TooFewArguments, /// The explicitly-specified template arguments were not valid /// template arguments for the given template. TDK_InvalidExplicitArguments, /// Checking non-dependent argument conversions failed. TDK_NonDependentConversionFailure, /// Deduction failed; that's all we know. TDK_MiscellaneousDeductionFailure, /// CUDA Target attributes do not match. TDK_CUDATargetMismatch }; TemplateDeductionResult DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult SubstituteExplicitTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo &ExplicitTemplateArgs, SmallVectorImpl<DeducedTemplateArgument> &Deduced, SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType, sema::TemplateDeductionInfo &Info); /// brief A function argument from which we performed template argument // deduction for a call. struct OriginalCallArg { OriginalCallArg(QualType OriginalParamType, bool DecomposedParam, unsigned ArgIdx, QualType OriginalArgType) : OriginalParamType(OriginalParamType), DecomposedParam(DecomposedParam), ArgIdx(ArgIdx), OriginalArgType(OriginalArgType) {} QualType OriginalParamType; bool DecomposedParam; unsigned ArgIdx; QualType OriginalArgType; }; TemplateDeductionResult FinishTemplateArgumentDeduction( FunctionTemplateDecl *FunctionTemplate, SmallVectorImpl<DeducedTemplateArgument> &Deduced, unsigned NumExplicitlySpecified, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr, bool PartialOverloading = false, llvm::function_ref<bool()> CheckNonDependent = []{ return false; }); TemplateDeductionResult DeduceTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool PartialOverloading, llvm::function_ref<bool(ArrayRef<QualType>)> CheckNonDependent); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ArgFunctionType, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool IsAddressOfFunction = false); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, QualType ToType, CXXConversionDecl *&Specialization, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool IsAddressOfFunction = false); /// Substitute Replacement for \p auto in \p TypeWithAuto QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement); /// Substitute Replacement for auto in TypeWithAuto TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto, QualType Replacement); /// Completely replace the \c auto in \p TypeWithAuto by /// \p Replacement. This does not retain any \c auto type sugar. QualType ReplaceAutoType(QualType TypeWithAuto, QualType Replacement); /// Result type of DeduceAutoType. enum DeduceAutoResult { DAR_Succeeded, DAR_Failed, DAR_FailedAlreadyDiagnosed }; DeduceAutoResult DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer, QualType &Result, Optional<unsigned> DependentDeductionDepth = None); DeduceAutoResult DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer, QualType &Result, Optional<unsigned> DependentDeductionDepth = None); void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init); bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc, bool Diagnose = true); /// Declare implicit deduction guides for a class template if we've /// not already done so. void DeclareImplicitDeductionGuides(TemplateDecl *Template, SourceLocation Loc); QualType DeduceTemplateSpecializationFromInitializer( TypeSourceInfo *TInfo, const InitializedEntity &Entity, const InitializationKind &Kind, MultiExprArg Init); QualType deduceVarTypeFromInitializer(VarDecl *VDecl, DeclarationName Name, QualType Type, TypeSourceInfo *TSI, SourceRange Range, bool DirectInit, Expr *Init); TypeLoc getReturnTypeLoc(FunctionDecl *FD) const; bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD, SourceLocation ReturnLoc, Expr *&RetExpr, AutoType *AT); FunctionTemplateDecl *getMoreSpecializedTemplate(FunctionTemplateDecl *FT1, FunctionTemplateDecl *FT2, SourceLocation Loc, TemplatePartialOrderingContext TPOC, unsigned NumCallArguments1, unsigned NumCallArguments2); UnresolvedSetIterator getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd, TemplateSpecCandidateSet &FailedCandidates, SourceLocation Loc, const PartialDiagnostic &NoneDiag, const PartialDiagnostic &AmbigDiag, const PartialDiagnostic &CandidateDiag, bool Complain = true, QualType TargetType = QualType()); ClassTemplatePartialSpecializationDecl * getMoreSpecializedPartialSpecialization( ClassTemplatePartialSpecializationDecl *PS1, ClassTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); bool isMoreSpecializedThanPrimary(ClassTemplatePartialSpecializationDecl *T, sema::TemplateDeductionInfo &Info); VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization( VarTemplatePartialSpecializationDecl *PS1, VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); bool isMoreSpecializedThanPrimary(VarTemplatePartialSpecializationDecl *T, sema::TemplateDeductionInfo &Info); bool isTemplateTemplateParameterAtLeastAsSpecializedAs( TemplateParameterList *P, TemplateDecl *AArg, SourceLocation Loc); void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs, bool OnlyDeduced, unsigned Depth, llvm::SmallBitVector &Used); void MarkDeducedTemplateParameters( const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced) { return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced); } static void MarkDeducedTemplateParameters(ASTContext &Ctx, const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced); //===--------------------------------------------------------------------===// // C++ Template Instantiation // MultiLevelTemplateArgumentList getTemplateInstantiationArgs(NamedDecl *D, const TemplateArgumentList *Innermost = nullptr, bool RelativeToPrimary = false, const FunctionDecl *Pattern = nullptr); /// A context in which code is being synthesized (where a source location /// alone is not sufficient to identify the context). This covers template /// instantiation and various forms of implicitly-generated functions. struct CodeSynthesisContext { /// The kind of template instantiation we are performing enum SynthesisKind { /// We are instantiating a template declaration. The entity is /// the declaration we're instantiating (e.g., a CXXRecordDecl). TemplateInstantiation, /// We are instantiating a default argument for a template /// parameter. The Entity is the template parameter whose argument is /// being instantiated, the Template is the template, and the /// TemplateArgs/NumTemplateArguments provide the template arguments as /// specified. DefaultTemplateArgumentInstantiation, /// We are instantiating a default argument for a function. /// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs /// provides the template arguments as specified. DefaultFunctionArgumentInstantiation, /// We are substituting explicit template arguments provided for /// a function template. The entity is a FunctionTemplateDecl. ExplicitTemplateArgumentSubstitution, /// We are substituting template argument determined as part of /// template argument deduction for either a class template /// partial specialization or a function template. The /// Entity is either a {Class|Var}TemplatePartialSpecializationDecl or /// a TemplateDecl. DeducedTemplateArgumentSubstitution, /// We are substituting prior template arguments into a new /// template parameter. The template parameter itself is either a /// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl. PriorTemplateArgumentSubstitution, /// We are checking the validity of a default template argument that /// has been used when naming a template-id. DefaultTemplateArgumentChecking, /// We are computing the exception specification for a defaulted special /// member function. ExceptionSpecEvaluation, /// We are instantiating the exception specification for a function /// template which was deferred until it was needed. ExceptionSpecInstantiation, /// We are declaring an implicit special member function. DeclaringSpecialMember, /// We are defining a synthesized function (such as a defaulted special /// member). DefiningSynthesizedFunction, /// Added for Template instantiation observation. /// Memoization means we are _not_ instantiating a template because /// it is already instantiated (but we entered a context where we /// would have had to if it was not already instantiated). Memoization } Kind; /// Was the enclosing context a non-instantiation SFINAE context? bool SavedInNonInstantiationSFINAEContext; /// The point of instantiation or synthesis within the source code. SourceLocation PointOfInstantiation; /// The entity that is being synthesized. Decl *Entity; /// The template (or partial specialization) in which we are /// performing the instantiation, for substitutions of prior template /// arguments. NamedDecl *Template; /// The list of template arguments we are substituting, if they /// are not part of the entity. const TemplateArgument *TemplateArgs; // FIXME: Wrap this union around more members, or perhaps store the // kind-specific members in the RAII object owning the context. union { /// The number of template arguments in TemplateArgs. unsigned NumTemplateArgs; /// The special member being declared or defined. CXXSpecialMember SpecialMember; }; ArrayRef<TemplateArgument> template_arguments() const { assert(Kind != DeclaringSpecialMember); return {TemplateArgs, NumTemplateArgs}; } /// The template deduction info object associated with the /// substitution or checking of explicit or deduced template arguments. sema::TemplateDeductionInfo *DeductionInfo; /// The source range that covers the construct that cause /// the instantiation, e.g., the template-id that causes a class /// template instantiation. SourceRange InstantiationRange; CodeSynthesisContext() : Kind(TemplateInstantiation), SavedInNonInstantiationSFINAEContext(false), Entity(nullptr), Template(nullptr), TemplateArgs(nullptr), NumTemplateArgs(0), DeductionInfo(nullptr) {} /// Determines whether this template is an actual instantiation /// that should be counted toward the maximum instantiation depth. bool isInstantiationRecord() const; }; /// List of active code synthesis contexts. /// /// This vector is treated as a stack. As synthesis of one entity requires /// synthesis of another, additional contexts are pushed onto the stack. SmallVector<CodeSynthesisContext, 16> CodeSynthesisContexts; /// Specializations whose definitions are currently being instantiated. llvm::DenseSet<std::pair<Decl *, unsigned>> InstantiatingSpecializations; /// Non-dependent types used in templates that have already been instantiated /// by some template instantiation. llvm::DenseSet<QualType> InstantiatedNonDependentTypes; /// Extra modules inspected when performing a lookup during a template /// instantiation. Computed lazily. SmallVector<Module*, 16> CodeSynthesisContextLookupModules; /// Cache of additional modules that should be used for name lookup /// within the current template instantiation. Computed lazily; use /// getLookupModules() to get a complete set. llvm::DenseSet<Module*> LookupModulesCache; /// Get the set of additional modules that should be checked during /// name lookup. A module and its imports become visible when instanting a /// template defined within it. llvm::DenseSet<Module*> &getLookupModules(); /// Map from the most recent declaration of a namespace to the most /// recent visible declaration of that namespace. llvm::DenseMap<NamedDecl*, NamedDecl*> VisibleNamespaceCache; /// Whether we are in a SFINAE context that is not associated with /// template instantiation. /// /// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside /// of a template instantiation or template argument deduction. bool InNonInstantiationSFINAEContext; /// The number of \p CodeSynthesisContexts that are not template /// instantiations and, therefore, should not be counted as part of the /// instantiation depth. /// /// When the instantiation depth reaches the user-configurable limit /// \p LangOptions::InstantiationDepth we will abort instantiation. // FIXME: Should we have a similar limit for other forms of synthesis? unsigned NonInstantiationEntries; /// The depth of the context stack at the point when the most recent /// error or warning was produced. /// /// This value is used to suppress printing of redundant context stacks /// when there are multiple errors or warnings in the same instantiation. // FIXME: Does this belong in Sema? It's tough to implement it anywhere else. unsigned LastEmittedCodeSynthesisContextDepth = 0; /// The template instantiation callbacks to trace or track /// instantiations (objects can be chained). /// /// This callbacks is used to print, trace or track template /// instantiations as they are being constructed. std::vector<std::unique_ptr<TemplateInstantiationCallback>> TemplateInstCallbacks; /// The current index into pack expansion arguments that will be /// used for substitution of parameter packs. /// /// The pack expansion index will be -1 to indicate that parameter packs /// should be instantiated as themselves. Otherwise, the index specifies /// which argument within the parameter pack will be used for substitution. int ArgumentPackSubstitutionIndex; /// RAII object used to change the argument pack substitution index /// within a \c Sema object. /// /// See \c ArgumentPackSubstitutionIndex for more information. class ArgumentPackSubstitutionIndexRAII { Sema &Self; int OldSubstitutionIndex; public: ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex) : Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) { Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex; } ~ArgumentPackSubstitutionIndexRAII() { Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex; } }; friend class ArgumentPackSubstitutionRAII; /// For each declaration that involved template argument deduction, the /// set of diagnostics that were suppressed during that template argument /// deduction. /// /// FIXME: Serialize this structure to the AST file. typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> > SuppressedDiagnosticsMap; SuppressedDiagnosticsMap SuppressedDiagnostics; /// A stack object to be created when performing template /// instantiation. /// /// Construction of an object of type \c InstantiatingTemplate /// pushes the current instantiation onto the stack of active /// instantiations. If the size of this stack exceeds the maximum /// number of recursive template instantiations, construction /// produces an error and evaluates true. /// /// Destruction of this object will pop the named instantiation off /// the stack. struct InstantiatingTemplate { /// Note that we are instantiating a class template, /// function template, variable template, alias template, /// or a member thereof. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, Decl *Entity, SourceRange InstantiationRange = SourceRange()); struct ExceptionSpecification {}; /// Note that we are instantiating an exception specification /// of a function template. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionDecl *Entity, ExceptionSpecification, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating a default argument in a /// template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateParameter Param, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// Note that we are substituting either explicitly-specified or /// deduced template arguments during function template argument deduction. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionTemplateDecl *FunctionTemplate, ArrayRef<TemplateArgument> TemplateArgs, CodeSynthesisContext::SynthesisKind Kind, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a class template declaration. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a class template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ClassTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a variable template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, VarTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating a default argument for a function /// parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ParmVarDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// Note that we are substituting prior template arguments into a /// non-type parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, NonTypeTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we are substituting prior template arguments into a /// template template parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, TemplateTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we are checking the default template argument /// against the template parameter for a given template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, NamedDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we have finished instantiating this template. void Clear(); ~InstantiatingTemplate() { Clear(); } /// Determines whether we have exceeded the maximum /// recursive template instantiations. bool isInvalid() const { return Invalid; } /// Determine whether we are already instantiating this /// specialization in some surrounding active instantiation. bool isAlreadyInstantiating() const { return AlreadyInstantiating; } private: Sema &SemaRef; bool Invalid; bool AlreadyInstantiating; bool CheckInstantiationDepth(SourceLocation PointOfInstantiation, SourceRange InstantiationRange); InstantiatingTemplate( Sema &SemaRef, CodeSynthesisContext::SynthesisKind Kind, SourceLocation PointOfInstantiation, SourceRange InstantiationRange, Decl *Entity, NamedDecl *Template = nullptr, ArrayRef<TemplateArgument> TemplateArgs = None, sema::TemplateDeductionInfo *DeductionInfo = nullptr); InstantiatingTemplate(const InstantiatingTemplate&) = delete; InstantiatingTemplate& operator=(const InstantiatingTemplate&) = delete; }; void pushCodeSynthesisContext(CodeSynthesisContext Ctx); void popCodeSynthesisContext(); /// Determine whether we are currently performing template instantiation. bool inTemplateInstantiation() const { return CodeSynthesisContexts.size() > NonInstantiationEntries; } void PrintContextStack() { if (!CodeSynthesisContexts.empty() && CodeSynthesisContexts.size() != LastEmittedCodeSynthesisContextDepth) { PrintInstantiationStack(); LastEmittedCodeSynthesisContextDepth = CodeSynthesisContexts.size(); } if (PragmaAttributeCurrentTargetDecl) PrintPragmaAttributeInstantiationPoint(); } void PrintInstantiationStack(); void PrintPragmaAttributeInstantiationPoint(); /// Determines whether we are currently in a context where /// template argument substitution failures are not considered /// errors. /// /// \returns An empty \c Optional if we're not in a SFINAE context. /// Otherwise, contains a pointer that, if non-NULL, contains the nearest /// template-deduction context object, which can be used to capture /// diagnostics that will be suppressed. Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const; /// Determines whether we are currently in a context that /// is not evaluated as per C++ [expr] p5. bool isUnevaluatedContext() const { assert(!ExprEvalContexts.empty() && "Must be in an expression evaluation context"); return ExprEvalContexts.back().isUnevaluated(); } /// RAII class used to determine whether SFINAE has /// trapped any errors that occur during template argument /// deduction. class SFINAETrap { Sema &SemaRef; unsigned PrevSFINAEErrors; bool PrevInNonInstantiationSFINAEContext; bool PrevAccessCheckingSFINAE; bool PrevLastDiagnosticIgnored; public: explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false) : SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors), PrevInNonInstantiationSFINAEContext( SemaRef.InNonInstantiationSFINAEContext), PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE), PrevLastDiagnosticIgnored( SemaRef.getDiagnostics().isLastDiagnosticIgnored()) { if (!SemaRef.isSFINAEContext()) SemaRef.InNonInstantiationSFINAEContext = true; SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE; } ~SFINAETrap() { SemaRef.NumSFINAEErrors = PrevSFINAEErrors; SemaRef.InNonInstantiationSFINAEContext = PrevInNonInstantiationSFINAEContext; SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE; SemaRef.getDiagnostics().setLastDiagnosticIgnored( PrevLastDiagnosticIgnored); } /// Determine whether any SFINAE errors have been trapped. bool hasErrorOccurred() const { return SemaRef.NumSFINAEErrors > PrevSFINAEErrors; } }; /// RAII class used to indicate that we are performing provisional /// semantic analysis to determine the validity of a construct, so /// typo-correction and diagnostics in the immediate context (not within /// implicitly-instantiated templates) should be suppressed. class TentativeAnalysisScope { Sema &SemaRef; // FIXME: Using a SFINAETrap for this is a hack. SFINAETrap Trap; bool PrevDisableTypoCorrection; public: explicit TentativeAnalysisScope(Sema &SemaRef) : SemaRef(SemaRef), Trap(SemaRef, true), PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) { SemaRef.DisableTypoCorrection = true; } ~TentativeAnalysisScope() { SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection; } }; /// The current instantiation scope used to store local /// variables. LocalInstantiationScope *CurrentInstantiationScope; /// Tracks whether we are in a context where typo correction is /// disabled. bool DisableTypoCorrection; /// The number of typos corrected by CorrectTypo. unsigned TyposCorrected; typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet; typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations; /// A cache containing identifiers for which typo correction failed and /// their locations, so that repeated attempts to correct an identifier in a /// given location are ignored if typo correction already failed for it. IdentifierSourceLocations TypoCorrectionFailures; /// Worker object for performing CFG-based warnings. sema::AnalysisBasedWarnings AnalysisWarnings; threadSafety::BeforeSet *ThreadSafetyDeclCache; /// An entity for which implicit template instantiation is required. /// /// The source location associated with the declaration is the first place in /// the source code where the declaration was "used". It is not necessarily /// the point of instantiation (which will be either before or after the /// namespace-scope declaration that triggered this implicit instantiation), /// However, it is the location that diagnostics should generally refer to, /// because users will need to know what code triggered the instantiation. typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation; /// The queue of implicit template instantiations that are required /// but have not yet been performed. std::deque<PendingImplicitInstantiation> PendingInstantiations; /// Queue of implicit template instantiations that cannot be performed /// eagerly. SmallVector<PendingImplicitInstantiation, 1> LateParsedInstantiations; class GlobalEagerInstantiationScope { public: GlobalEagerInstantiationScope(Sema &S, bool Enabled) : S(S), Enabled(Enabled) { if (!Enabled) return; SavedPendingInstantiations.swap(S.PendingInstantiations); SavedVTableUses.swap(S.VTableUses); } void perform() { if (Enabled) { S.DefineUsedVTables(); S.PerformPendingInstantiations(); } } ~GlobalEagerInstantiationScope() { if (!Enabled) return; // Restore the set of pending vtables. assert(S.VTableUses.empty() && "VTableUses should be empty before it is discarded."); S.VTableUses.swap(SavedVTableUses); // Restore the set of pending implicit instantiations. assert(S.PendingInstantiations.empty() && "PendingInstantiations should be empty before it is discarded."); S.PendingInstantiations.swap(SavedPendingInstantiations); } private: Sema &S; SmallVector<VTableUse, 16> SavedVTableUses; std::deque<PendingImplicitInstantiation> SavedPendingInstantiations; bool Enabled; }; /// The queue of implicit template instantiations that are required /// and must be performed within the current local scope. /// /// This queue is only used for member functions of local classes in /// templates, which must be instantiated in the same scope as their /// enclosing function, so that they can reference function-local /// types, static variables, enumerators, etc. std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations; class LocalEagerInstantiationScope { public: LocalEagerInstantiationScope(Sema &S) : S(S) { SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } void perform() { S.PerformPendingInstantiations(/*LocalOnly=*/true); } ~LocalEagerInstantiationScope() { assert(S.PendingLocalImplicitInstantiations.empty() && "there shouldn't be any pending local implicit instantiations"); SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } private: Sema &S; std::deque<PendingImplicitInstantiation> SavedPendingLocalImplicitInstantiations; }; /// A helper class for building up ExtParameterInfos. class ExtParameterInfoBuilder { SmallVector<FunctionProtoType::ExtParameterInfo, 16> Infos; bool HasInteresting = false; public: /// Set the ExtParameterInfo for the parameter at the given index, /// void set(unsigned index, FunctionProtoType::ExtParameterInfo info) { assert(Infos.size() <= index); Infos.resize(index); Infos.push_back(info); if (!HasInteresting) HasInteresting = (info != FunctionProtoType::ExtParameterInfo()); } /// Return a pointer (suitable for setting in an ExtProtoInfo) to the /// ExtParameterInfo array we've built up. const FunctionProtoType::ExtParameterInfo * getPointerOrNull(unsigned numParams) { if (!HasInteresting) return nullptr; Infos.resize(numParams); return Infos.data(); } }; void PerformPendingInstantiations(bool LocalOnly = false); TypeSourceInfo *SubstType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, bool AllowDeducedTST = false); QualType SubstType(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstType(TypeLoc TL, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, CXXRecordDecl *ThisContext, Qualifiers ThisTypeQuals); void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto, const MultiLevelTemplateArgumentList &Args); bool SubstExceptionSpec(SourceLocation Loc, FunctionProtoType::ExceptionSpecInfo &ESI, SmallVectorImpl<QualType> &ExceptionStorage, const MultiLevelTemplateArgumentList &Args); ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, int indexAdjustment, Optional<unsigned> NumExpansions, bool ExpectParameterPack); bool SubstParmTypes(SourceLocation Loc, ArrayRef<ParmVarDecl *> Params, const FunctionProtoType::ExtParameterInfo *ExtParamInfos, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<QualType> &ParamTypes, SmallVectorImpl<ParmVarDecl *> *OutParams, ExtParameterInfoBuilder &ParamInfos); ExprResult SubstExpr(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs); /// Substitute the given template arguments into a list of /// expressions, expanding pack expansions if required. /// /// \param Exprs The list of expressions to substitute into. /// /// \param IsCall Whether this is some form of call, in which case /// default arguments will be dropped. /// /// \param TemplateArgs The set of template arguments to substitute. /// /// \param Outputs Will receive all of the substituted arguments. /// /// \returns true if an error occurred, false otherwise. bool SubstExprs(ArrayRef<Expr *> Exprs, bool IsCall, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<Expr *> &Outputs); StmtResult SubstStmt(Stmt *S, const MultiLevelTemplateArgumentList &TemplateArgs); TemplateParameterList * SubstTemplateParams(TemplateParameterList *Params, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs); Decl *SubstDecl(Decl *D, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs); ExprResult SubstInitializer(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs, bool CXXDirectInit); bool SubstBaseSpecifiers(CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); bool InstantiateClass(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK, bool Complain = true); bool InstantiateEnum(SourceLocation PointOfInstantiation, EnumDecl *Instantiation, EnumDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); bool InstantiateInClassInitializer( SourceLocation PointOfInstantiation, FieldDecl *Instantiation, FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); struct LateInstantiatedAttribute { const Attr *TmplAttr; LocalInstantiationScope *Scope; Decl *NewDecl; LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S, Decl *D) : TmplAttr(A), Scope(S), NewDecl(D) { } }; typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec; void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *OuterMostScope = nullptr); void InstantiateAttrsForDecl(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *OuterMostScope = nullptr); bool usesPartialOrExplicitSpecialization( SourceLocation Loc, ClassTemplateSpecializationDecl *ClassTemplateSpec); bool InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK, bool Complain = true); void InstantiateClassMembers(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); void InstantiateClassTemplateSpecializationMembers( SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK); NestedNameSpecifierLoc SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS, const MultiLevelTemplateArgumentList &TemplateArgs); DeclarationNameInfo SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo, const MultiLevelTemplateArgumentList &TemplateArgs); TemplateName SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name, SourceLocation Loc, const MultiLevelTemplateArgumentList &TemplateArgs); bool Subst(const TemplateArgumentLoc *Args, unsigned NumArgs, TemplateArgumentListInfo &Result, const MultiLevelTemplateArgumentList &TemplateArgs); void InstantiateExceptionSpec(SourceLocation PointOfInstantiation, FunctionDecl *Function); FunctionDecl *InstantiateFunctionDeclaration(FunctionTemplateDecl *FTD, const TemplateArgumentList *Args, SourceLocation Loc); void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation, FunctionDecl *Function, bool Recursive = false, bool DefinitionRequired = false, bool AtEndOfTU = false); VarTemplateSpecializationDecl *BuildVarTemplateInstantiation( VarTemplateDecl *VarTemplate, VarDecl *FromVar, const TemplateArgumentList &TemplateArgList, const TemplateArgumentListInfo &TemplateArgsInfo, SmallVectorImpl<TemplateArgument> &Converted, SourceLocation PointOfInstantiation, void *InsertPos, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *StartingScope = nullptr); VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl( VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl, const MultiLevelTemplateArgumentList &TemplateArgs); void BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs, LateInstantiatedAttrVec *LateAttrs, DeclContext *Owner, LocalInstantiationScope *StartingScope, bool InstantiatingVarTemplate = false, VarTemplateSpecializationDecl *PrevVTSD = nullptr); void InstantiateVariableInitializer( VarDecl *Var, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs); void InstantiateVariableDefinition(SourceLocation PointOfInstantiation, VarDecl *Var, bool Recursive = false, bool DefinitionRequired = false, bool AtEndOfTU = false); void InstantiateMemInitializers(CXXConstructorDecl *New, const CXXConstructorDecl *Tmpl, const MultiLevelTemplateArgumentList &TemplateArgs); NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, bool FindingInstantiatedContext = false); DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC, const MultiLevelTemplateArgumentList &TemplateArgs); // Objective-C declarations. enum ObjCContainerKind { OCK_None = -1, OCK_Interface = 0, OCK_Protocol, OCK_Category, OCK_ClassExtension, OCK_Implementation, OCK_CategoryImplementation }; ObjCContainerKind getObjCContainerKind() const; DeclResult actOnObjCTypeParam(Scope *S, ObjCTypeParamVariance variance, SourceLocation varianceLoc, unsigned index, IdentifierInfo *paramName, SourceLocation paramLoc, SourceLocation colonLoc, ParsedType typeBound); ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc, ArrayRef<Decl *> typeParams, SourceLocation rAngleLoc); void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList); Decl *ActOnStartClassInterface( Scope *S, SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange, Decl *const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); void ActOnSuperClassOfClassInterface(Scope *S, SourceLocation AtInterfaceLoc, ObjCInterfaceDecl *IDecl, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange); void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs, SmallVectorImpl<SourceLocation> &ProtocolLocs, IdentifierInfo *SuperName, SourceLocation SuperLoc); Decl *ActOnCompatibilityAlias( SourceLocation AtCompatibilityAliasLoc, IdentifierInfo *AliasName, SourceLocation AliasLocation, IdentifierInfo *ClassName, SourceLocation ClassLocation); bool CheckForwardProtocolDeclarationForCircularDependency( IdentifierInfo *PName, SourceLocation &PLoc, SourceLocation PrevLoc, const ObjCList<ObjCProtocolDecl> &PList); Decl *ActOnStartProtocolInterface( SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName, SourceLocation ProtocolLoc, Decl *const *ProtoRefNames, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartCategoryInterface( SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *CategoryName, SourceLocation CategoryLoc, Decl *const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartClassImplementation(SourceLocation AtClassImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperClassname, SourceLocation SuperClassLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *CatName, SourceLocation CatLoc, const ParsedAttributesView &AttrList); DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl, ArrayRef<Decl *> Decls); DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc, IdentifierInfo **IdentList, SourceLocation *IdentLocs, ArrayRef<ObjCTypeParamList *> TypeParamLists, unsigned NumElts); DeclGroupPtrTy ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc, ArrayRef<IdentifierLocPair> IdentList, const ParsedAttributesView &attrList); void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer, ArrayRef<IdentifierLocPair> ProtocolId, SmallVectorImpl<Decl *> &Protocols); void DiagnoseTypeArgsAndProtocols(IdentifierInfo *ProtocolId, SourceLocation ProtocolLoc, IdentifierInfo *TypeArgId, SourceLocation TypeArgLoc, bool SelectProtocolFirst = false); /// Given a list of identifiers (and their locations), resolve the /// names to either Objective-C protocol qualifiers or type /// arguments, as appropriate. void actOnObjCTypeArgsOrProtocolQualifiers( Scope *S, ParsedType baseType, SourceLocation lAngleLoc, ArrayRef<IdentifierInfo *> identifiers, ArrayRef<SourceLocation> identifierLocs, SourceLocation rAngleLoc, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SourceLocation &protocolRAngleLoc, bool warnOnIncompleteProtocols); /// Build a an Objective-C protocol-qualified 'id' type where no /// base type was specified. TypeResult actOnObjCProtocolQualifierType( SourceLocation lAngleLoc, ArrayRef<Decl *> protocols, ArrayRef<SourceLocation> protocolLocs, SourceLocation rAngleLoc); /// Build a specialized and/or protocol-qualified Objective-C type. TypeResult actOnObjCTypeArgsAndProtocolQualifiers( Scope *S, SourceLocation Loc, ParsedType BaseType, SourceLocation TypeArgsLAngleLoc, ArrayRef<ParsedType> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<Decl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc); /// Build an Objective-C type parameter type. QualType BuildObjCTypeParamType(const ObjCTypeParamDecl *Decl, SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError = false); /// Build an Objective-C object pointer type. QualType BuildObjCObjectType(QualType BaseType, SourceLocation Loc, SourceLocation TypeArgsLAngleLoc, ArrayRef<TypeSourceInfo *> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError = false); /// Ensure attributes are consistent with type. /// \param [in, out] Attributes The attributes to check; they will /// be modified to be consistent with \p PropertyTy. void CheckObjCPropertyAttributes(Decl *PropertyPtrTy, SourceLocation Loc, unsigned &Attributes, bool propertyInPrimaryClass); /// Process the specified property declaration and create decls for the /// setters and getters as needed. /// \param property The property declaration being processed void ProcessPropertyDecl(ObjCPropertyDecl *property); void DiagnosePropertyMismatch(ObjCPropertyDecl *Property, ObjCPropertyDecl *SuperProperty, const IdentifierInfo *Name, bool OverridingProtocolProperty); void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT, ObjCInterfaceDecl *ID); Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd, ArrayRef<Decl *> allMethods = None, ArrayRef<DeclGroupPtrTy> allTUVars = None); Decl *ActOnProperty(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, ObjCDeclSpec &ODS, Selector GetterSel, Selector SetterSel, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); Decl *ActOnPropertyImplDecl(Scope *S, SourceLocation AtLoc, SourceLocation PropertyLoc, bool ImplKind, IdentifierInfo *PropertyId, IdentifierInfo *PropertyIvar, SourceLocation PropertyIvarLoc, ObjCPropertyQueryKind QueryKind); enum ObjCSpecialMethodKind { OSMK_None, OSMK_Alloc, OSMK_New, OSMK_Copy, OSMK_RetainingInit, OSMK_NonRetainingInit }; struct ObjCArgInfo { IdentifierInfo *Name; SourceLocation NameLoc; // The Type is null if no type was specified, and the DeclSpec is invalid // in this case. ParsedType Type; ObjCDeclSpec DeclSpec; /// ArgAttrs - Attribute list for this argument. ParsedAttributesView ArgAttrs; }; Decl *ActOnMethodDeclaration( Scope *S, SourceLocation BeginLoc, // location of the + or -. SourceLocation EndLoc, // location of the ; or {. tok::TokenKind MethodType, ObjCDeclSpec &ReturnQT, ParsedType ReturnType, ArrayRef<SourceLocation> SelectorLocs, Selector Sel, // optional arguments. The number of types/arguments is obtained // from the Sel.getNumArgs(). ObjCArgInfo *ArgInfo, DeclaratorChunk::ParamInfo *CParamInfo, unsigned CNumArgs, // c-style args const ParsedAttributesView &AttrList, tok::ObjCKeywordKind MethodImplKind, bool isVariadic, bool MethodDefinition); ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel, const ObjCObjectPointerType *OPT, bool IsInstance); ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty, bool IsInstance); bool CheckARCMethodDecl(ObjCMethodDecl *method); bool inferObjCARCLifetime(ValueDecl *decl); ExprResult HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT, Expr *BaseExpr, SourceLocation OpLoc, DeclarationName MemberName, SourceLocation MemberLoc, SourceLocation SuperLoc, QualType SuperType, bool Super); ExprResult ActOnClassPropertyRefExpr(IdentifierInfo &receiverName, IdentifierInfo &propertyName, SourceLocation receiverNameLoc, SourceLocation propertyNameLoc); ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc); /// Describes the kind of message expression indicated by a message /// send that starts with an identifier. enum ObjCMessageKind { /// The message is sent to 'super'. ObjCSuperMessage, /// The message is an instance message. ObjCInstanceMessage, /// The message is a class message, and the identifier is a type /// name. ObjCClassMessage }; ObjCMessageKind getObjCMessageKind(Scope *S, IdentifierInfo *Name, SourceLocation NameLoc, bool IsSuper, bool HasTrailingDot, ParsedType &ReceiverType); ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildClassMessageImplicit(QualType ReceiverType, bool isSuperReceiver, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnClassMessage(Scope *S, ParsedType Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildInstanceMessage(Expr *Receiver, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildInstanceMessageImplicit(Expr *Receiver, QualType ReceiverType, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnInstanceMessage(Scope *S, Expr *Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, TypeSourceInfo *TSInfo, Expr *SubExpr); ExprResult ActOnObjCBridgedCast(Scope *S, SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, ParsedType Type, SourceLocation RParenLoc, Expr *SubExpr); void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr); void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr); bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr, CastKind &Kind); bool checkObjCBridgeRelatedComponents(SourceLocation Loc, QualType DestType, QualType SrcType, ObjCInterfaceDecl *&RelatedClass, ObjCMethodDecl *&ClassMethod, ObjCMethodDecl *&InstanceMethod, TypedefNameDecl *&TDNDecl, bool CfToNs, bool Diagnose = true); bool CheckObjCBridgeRelatedConversions(SourceLocation Loc, QualType DestType, QualType SrcType, Expr *&SrcExpr, bool Diagnose = true); bool ConversionToObjCStringLiteralCheck(QualType DstType, Expr *&SrcExpr, bool Diagnose = true); bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall); /// Check whether the given new method is a valid override of the /// given overridden method, and set any properties that should be inherited. void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod, const ObjCMethodDecl *Overridden); /// Describes the compatibility of a result type with its method. enum ResultTypeCompatibilityKind { RTC_Compatible, RTC_Incompatible, RTC_Unknown }; void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod, ObjCInterfaceDecl *CurrentClass, ResultTypeCompatibilityKind RTC); enum PragmaOptionsAlignKind { POAK_Native, // #pragma options align=native POAK_Natural, // #pragma options align=natural POAK_Packed, // #pragma options align=packed POAK_Power, // #pragma options align=power POAK_Mac68k, // #pragma options align=mac68k POAK_Reset // #pragma options align=reset }; /// ActOnPragmaClangSection - Called on well formed \#pragma clang section void ActOnPragmaClangSection(SourceLocation PragmaLoc, PragmaClangSectionAction Action, PragmaClangSectionKind SecKind, StringRef SecName); /// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align. void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind, SourceLocation PragmaLoc); /// ActOnPragmaPack - Called on well formed \#pragma pack(...). void ActOnPragmaPack(SourceLocation PragmaLoc, PragmaMsStackAction Action, StringRef SlotLabel, Expr *Alignment); enum class PragmaPackDiagnoseKind { NonDefaultStateAtInclude, ChangedStateAtExit }; void DiagnoseNonDefaultPragmaPack(PragmaPackDiagnoseKind Kind, SourceLocation IncludeLoc); void DiagnoseUnterminatedPragmaPack(); /// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off]. void ActOnPragmaMSStruct(PragmaMSStructKind Kind); /// ActOnPragmaMSComment - Called on well formed /// \#pragma comment(kind, "arg"). void ActOnPragmaMSComment(SourceLocation CommentLoc, PragmaMSCommentKind Kind, StringRef Arg); /// ActOnPragmaMSPointersToMembers - called on well formed \#pragma /// pointers_to_members(representation method[, general purpose /// representation]). void ActOnPragmaMSPointersToMembers( LangOptions::PragmaMSPointersToMembersKind Kind, SourceLocation PragmaLoc); /// Called on well formed \#pragma vtordisp(). void ActOnPragmaMSVtorDisp(PragmaMsStackAction Action, SourceLocation PragmaLoc, MSVtorDispAttr::Mode Value); enum PragmaSectionKind { PSK_DataSeg, PSK_BSSSeg, PSK_ConstSeg, PSK_CodeSeg, }; bool UnifySection(StringRef SectionName, int SectionFlags, DeclaratorDecl *TheDecl); bool UnifySection(StringRef SectionName, int SectionFlags, SourceLocation PragmaSectionLocation); /// Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg. void ActOnPragmaMSSeg(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, StringLiteral *SegmentName, llvm::StringRef PragmaName); /// Called on well formed \#pragma section(). void ActOnPragmaMSSection(SourceLocation PragmaLocation, int SectionFlags, StringLiteral *SegmentName); /// Called on well-formed \#pragma init_seg(). void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation, StringLiteral *SegmentName); /// Called on #pragma clang __debug dump II void ActOnPragmaDump(Scope *S, SourceLocation Loc, IdentifierInfo *II); /// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch void ActOnPragmaDetectMismatch(SourceLocation Loc, StringRef Name, StringRef Value); /// ActOnPragmaUnused - Called on well-formed '\#pragma unused'. void ActOnPragmaUnused(const Token &Identifier, Scope *curScope, SourceLocation PragmaLoc); /// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... . void ActOnPragmaVisibility(const IdentifierInfo* VisType, SourceLocation PragmaLoc); NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II, SourceLocation Loc); void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W); /// ActOnPragmaWeakID - Called on well formed \#pragma weak ident. void ActOnPragmaWeakID(IdentifierInfo* WeakName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc); /// ActOnPragmaRedefineExtname - Called on well formed /// \#pragma redefine_extname oldname newname. void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident. void ActOnPragmaWeakAlias(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaFPContract - Called on well formed /// \#pragma {STDC,OPENCL} FP_CONTRACT and /// \#pragma clang fp contract void ActOnPragmaFPContract(LangOptions::FPContractModeKind FPC); /// ActOnPragmaFenvAccess - Called on well formed /// \#pragma STDC FENV_ACCESS void ActOnPragmaFEnvAccess(LangOptions::FEnvAccessModeKind FPC); /// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to /// a the record decl, to handle '\#pragma pack' and '\#pragma options align'. void AddAlignmentAttributesForRecord(RecordDecl *RD); /// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record. void AddMsStructLayoutForRecord(RecordDecl *RD); /// FreePackedContext - Deallocate and null out PackContext. void FreePackedContext(); /// PushNamespaceVisibilityAttr - Note that we've entered a /// namespace with a visibility attribute. void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr, SourceLocation Loc); /// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used, /// add an appropriate visibility attribute. void AddPushedVisibilityAttribute(Decl *RD); /// PopPragmaVisibility - Pop the top element of the visibility stack; used /// for '\#pragma GCC visibility' and visibility attributes on namespaces. void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc); /// FreeVisContext - Deallocate and null out VisContext. void FreeVisContext(); /// AddCFAuditedAttribute - Check whether we're currently within /// '\#pragma clang arc_cf_code_audited' and, if so, consider adding /// the appropriate attribute. void AddCFAuditedAttribute(Decl *D); void ActOnPragmaAttributeAttribute(ParsedAttr &Attribute, SourceLocation PragmaLoc, attr::ParsedSubjectMatchRuleSet Rules); void ActOnPragmaAttributeEmptyPush(SourceLocation PragmaLoc, const IdentifierInfo *Namespace); /// Called on well-formed '\#pragma clang attribute pop'. void ActOnPragmaAttributePop(SourceLocation PragmaLoc, const IdentifierInfo *Namespace); /// Adds the attributes that have been specified using the /// '\#pragma clang attribute push' directives to the given declaration. void AddPragmaAttributes(Scope *S, Decl *D); void DiagnoseUnterminatedPragmaAttribute(); /// Called on well formed \#pragma clang optimize. void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc); /// Get the location for the currently active "\#pragma clang optimize /// off". If this location is invalid, then the state of the pragma is "on". SourceLocation getOptimizeOffPragmaLocation() const { return OptimizeOffPragmaLocation; } /// Only called on function definitions; if there is a pragma in scope /// with the effect of a range-based optnone, consider marking the function /// with attribute optnone. void AddRangeBasedOptnone(FunctionDecl *FD); /// Adds the 'optnone' attribute to the function declaration if there /// are no conflicts; Loc represents the location causing the 'optnone' /// attribute to be added (usually because of a pragma). void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc); /// AddAlignedAttr - Adds an aligned attribute to a particular declaration. void AddAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E, unsigned SpellingListIndex, bool IsPackExpansion); void AddAlignedAttr(SourceRange AttrRange, Decl *D, TypeSourceInfo *T, unsigned SpellingListIndex, bool IsPackExpansion); /// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular /// declaration. void AddAssumeAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E, Expr *OE, unsigned SpellingListIndex); /// AddAllocAlignAttr - Adds an alloc_align attribute to a particular /// declaration. void AddAllocAlignAttr(SourceRange AttrRange, Decl *D, Expr *ParamExpr, unsigned SpellingListIndex); /// AddAlignValueAttr - Adds an align_value attribute to a particular /// declaration. void AddAlignValueAttr(SourceRange AttrRange, Decl *D, Expr *E, unsigned SpellingListIndex); /// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular /// declaration. void AddLaunchBoundsAttr(SourceRange AttrRange, Decl *D, Expr *MaxThreads, Expr *MinBlocks, unsigned SpellingListIndex); /// AddModeAttr - Adds a mode attribute to a particular declaration. void AddModeAttr(SourceRange AttrRange, Decl *D, IdentifierInfo *Name, unsigned SpellingListIndex, bool InInstantiation = false); void AddParameterABIAttr(SourceRange AttrRange, Decl *D, ParameterABI ABI, unsigned SpellingListIndex); enum class RetainOwnershipKind {NS, CF, OS}; void AddXConsumedAttr(Decl *D, SourceRange SR, unsigned SpellingIndex, RetainOwnershipKind K, bool IsTemplateInstantiation); /// addAMDGPUFlatWorkGroupSizeAttr - Adds an amdgpu_flat_work_group_size /// attribute to a particular declaration. void addAMDGPUFlatWorkGroupSizeAttr(SourceRange AttrRange, Decl *D, Expr *Min, Expr *Max, unsigned SpellingListIndex); /// addAMDGPUWavePersEUAttr - Adds an amdgpu_waves_per_eu attribute to a /// particular declaration. void addAMDGPUWavesPerEUAttr(SourceRange AttrRange, Decl *D, Expr *Min, Expr *Max, unsigned SpellingListIndex); bool checkNSReturnsRetainedReturnType(SourceLocation loc, QualType type); //===--------------------------------------------------------------------===// // C++ Coroutines TS // bool ActOnCoroutineBodyStart(Scope *S, SourceLocation KwLoc, StringRef Keyword); ExprResult ActOnCoawaitExpr(Scope *S, SourceLocation KwLoc, Expr *E); ExprResult ActOnCoyieldExpr(Scope *S, SourceLocation KwLoc, Expr *E); StmtResult ActOnCoreturnStmt(Scope *S, SourceLocation KwLoc, Expr *E); ExprResult BuildResolvedCoawaitExpr(SourceLocation KwLoc, Expr *E, bool IsImplicit = false); ExprResult BuildUnresolvedCoawaitExpr(SourceLocation KwLoc, Expr *E, UnresolvedLookupExpr* Lookup); ExprResult BuildCoyieldExpr(SourceLocation KwLoc, Expr *E); StmtResult BuildCoreturnStmt(SourceLocation KwLoc, Expr *E, bool IsImplicit = false); StmtResult BuildCoroutineBodyStmt(CoroutineBodyStmt::CtorArgs); bool buildCoroutineParameterMoves(SourceLocation Loc); VarDecl *buildCoroutinePromise(SourceLocation Loc); void CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body); ClassTemplateDecl *lookupCoroutineTraits(SourceLocation KwLoc, SourceLocation FuncLoc); //===--------------------------------------------------------------------===// // OpenCL extensions. // private: std::string CurrOpenCLExtension; /// Extensions required by an OpenCL type. llvm::DenseMap<const Type*, std::set<std::string>> OpenCLTypeExtMap; /// Extensions required by an OpenCL declaration. llvm::DenseMap<const Decl*, std::set<std::string>> OpenCLDeclExtMap; public: llvm::StringRef getCurrentOpenCLExtension() const { return CurrOpenCLExtension; } /// Check if a function declaration \p FD associates with any /// extensions present in OpenCLDeclExtMap and if so return the /// extension(s) name(s). std::string getOpenCLExtensionsFromDeclExtMap(FunctionDecl *FD); /// Check if a function type \p FT associates with any /// extensions present in OpenCLTypeExtMap and if so return the /// extension(s) name(s). std::string getOpenCLExtensionsFromTypeExtMap(FunctionType *FT); /// Find an extension in an appropriate extension map and return its name template<typename T, typename MapT> std::string getOpenCLExtensionsFromExtMap(T* FT, MapT &Map); void setCurrentOpenCLExtension(llvm::StringRef Ext) { CurrOpenCLExtension = Ext; } /// Set OpenCL extensions for a type which can only be used when these /// OpenCL extensions are enabled. If \p Exts is empty, do nothing. /// \param Exts A space separated list of OpenCL extensions. void setOpenCLExtensionForType(QualType T, llvm::StringRef Exts); /// Set OpenCL extensions for a declaration which can only be /// used when these OpenCL extensions are enabled. If \p Exts is empty, do /// nothing. /// \param Exts A space separated list of OpenCL extensions. void setOpenCLExtensionForDecl(Decl *FD, llvm::StringRef Exts); /// Set current OpenCL extensions for a type which can only be used /// when these OpenCL extensions are enabled. If current OpenCL extension is /// empty, do nothing. void setCurrentOpenCLExtensionForType(QualType T); /// Set current OpenCL extensions for a declaration which /// can only be used when these OpenCL extensions are enabled. If current /// OpenCL extension is empty, do nothing. void setCurrentOpenCLExtensionForDecl(Decl *FD); bool isOpenCLDisabledDecl(Decl *FD); /// Check if type \p T corresponding to declaration specifier \p DS /// is disabled due to required OpenCL extensions being disabled. If so, /// emit diagnostics. /// \return true if type is disabled. bool checkOpenCLDisabledTypeDeclSpec(const DeclSpec &DS, QualType T); /// Check if declaration \p D used by expression \p E /// is disabled due to required OpenCL extensions being disabled. If so, /// emit diagnostics. /// \return true if type is disabled. bool checkOpenCLDisabledDecl(const NamedDecl &D, const Expr &E); //===--------------------------------------------------------------------===// // OpenMP directives and clauses. // private: void *VarDataSharingAttributesStack; /// Number of nested '#pragma omp declare target' directives. unsigned DeclareTargetNestingLevel = 0; /// Initialization of data-sharing attributes stack. void InitDataSharingAttributesStack(); void DestroyDataSharingAttributesStack(); ExprResult VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind, bool StrictlyPositive = true); /// Returns OpenMP nesting level for current directive. unsigned getOpenMPNestingLevel() const; /// Adjusts the function scopes index for the target-based regions. void adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex, unsigned Level) const; /// Push new OpenMP function region for non-capturing function. void pushOpenMPFunctionRegion(); /// Pop OpenMP function region for non-capturing function. void popOpenMPFunctionRegion(const sema::FunctionScopeInfo *OldFSI); /// Check whether we're allowed to call Callee from the current function. void checkOpenMPDeviceFunction(SourceLocation Loc, FunctionDecl *Callee); /// Check if the expression is allowed to be used in expressions for the /// OpenMP devices. void checkOpenMPDeviceExpr(const Expr *E); /// Checks if a type or a declaration is disabled due to the owning extension /// being disabled, and emits diagnostic messages if it is disabled. /// \param D type or declaration to be checked. /// \param DiagLoc source location for the diagnostic message. /// \param DiagInfo information to be emitted for the diagnostic message. /// \param SrcRange source range of the declaration. /// \param Map maps type or declaration to the extensions. /// \param Selector selects diagnostic message: 0 for type and 1 for /// declaration. /// \return true if the type or declaration is disabled. template <typename T, typename DiagLocT, typename DiagInfoT, typename MapT> bool checkOpenCLDisabledTypeOrDecl(T D, DiagLocT DiagLoc, DiagInfoT DiagInfo, MapT &Map, unsigned Selector = 0, SourceRange SrcRange = SourceRange()); public: /// Return true if the provided declaration \a VD should be captured by /// reference. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level) const; /// Check if the specified variable is used in one of the private /// clauses (private, firstprivate, lastprivate, reduction etc.) in OpenMP /// constructs. VarDecl *isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo = false, unsigned StopAt = 0); ExprResult getOpenMPCapturedExpr(VarDecl *Capture, ExprValueKind VK, ExprObjectKind OK, SourceLocation Loc); /// If the current region is a loop-based region, mark the start of the loop /// construct. void startOpenMPLoop(); /// Check if the specified variable is used in 'private' clause. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool isOpenMPPrivateDecl(const ValueDecl *D, unsigned Level) const; /// Sets OpenMP capture kind (OMPC_private, OMPC_firstprivate, OMPC_map etc.) /// for \p FD based on DSA for the provided corresponding captured declaration /// \p D. void setOpenMPCaptureKind(FieldDecl *FD, const ValueDecl *D, unsigned Level); /// Check if the specified variable is captured by 'target' directive. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool isOpenMPTargetCapturedDecl(const ValueDecl *D, unsigned Level) const; ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc, Expr *Op); /// Called on start of new data sharing attribute block. void StartOpenMPDSABlock(OpenMPDirectiveKind K, const DeclarationNameInfo &DirName, Scope *CurScope, SourceLocation Loc); /// Start analysis of clauses. void StartOpenMPClause(OpenMPClauseKind K); /// End analysis of clauses. void EndOpenMPClause(); /// Called on end of data sharing attribute block. void EndOpenMPDSABlock(Stmt *CurDirective); /// Check if the current region is an OpenMP loop region and if it is, /// mark loop control variable, used in \p Init for loop initialization, as /// private by default. /// \param Init First part of the for loop. void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init); // OpenMP directives and clauses. /// Called on correct id-expression from the '#pragma omp /// threadprivate'. ExprResult ActOnOpenMPIdExpression(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id, OpenMPDirectiveKind Kind); /// Called on well-formed '#pragma omp threadprivate'. DeclGroupPtrTy ActOnOpenMPThreadprivateDirective( SourceLocation Loc, ArrayRef<Expr *> VarList); /// Builds a new OpenMPThreadPrivateDecl and checks its correctness. OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl(SourceLocation Loc, ArrayRef<Expr *> VarList); /// Called on well-formed '#pragma omp allocate'. DeclGroupPtrTy ActOnOpenMPAllocateDirective(SourceLocation Loc, ArrayRef<Expr *> VarList, ArrayRef<OMPClause *> Clauses, DeclContext *Owner = nullptr); /// Called on well-formed '#pragma omp requires'. DeclGroupPtrTy ActOnOpenMPRequiresDirective(SourceLocation Loc, ArrayRef<OMPClause *> ClauseList); /// Check restrictions on Requires directive OMPRequiresDecl *CheckOMPRequiresDecl(SourceLocation Loc, ArrayRef<OMPClause *> Clauses); /// Check if the specified type is allowed to be used in 'omp declare /// reduction' construct. QualType ActOnOpenMPDeclareReductionType(SourceLocation TyLoc, TypeResult ParsedType); /// Called on start of '#pragma omp declare reduction'. DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveStart( Scope *S, DeclContext *DC, DeclarationName Name, ArrayRef<std::pair<QualType, SourceLocation>> ReductionTypes, AccessSpecifier AS, Decl *PrevDeclInScope = nullptr); /// Initialize declare reduction construct initializer. void ActOnOpenMPDeclareReductionCombinerStart(Scope *S, Decl *D); /// Finish current declare reduction construct initializer. void ActOnOpenMPDeclareReductionCombinerEnd(Decl *D, Expr *Combiner); /// Initialize declare reduction construct initializer. /// \return omp_priv variable. VarDecl *ActOnOpenMPDeclareReductionInitializerStart(Scope *S, Decl *D); /// Finish current declare reduction construct initializer. void ActOnOpenMPDeclareReductionInitializerEnd(Decl *D, Expr *Initializer, VarDecl *OmpPrivParm); /// Called at the end of '#pragma omp declare reduction'. DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveEnd( Scope *S, DeclGroupPtrTy DeclReductions, bool IsValid); /// Check variable declaration in 'omp declare mapper' construct. TypeResult ActOnOpenMPDeclareMapperVarDecl(Scope *S, Declarator &D); /// Check if the specified type is allowed to be used in 'omp declare /// mapper' construct. QualType ActOnOpenMPDeclareMapperType(SourceLocation TyLoc, TypeResult ParsedType); /// Called on start of '#pragma omp declare mapper'. OMPDeclareMapperDecl *ActOnOpenMPDeclareMapperDirectiveStart( Scope *S, DeclContext *DC, DeclarationName Name, QualType MapperType, SourceLocation StartLoc, DeclarationName VN, AccessSpecifier AS, Decl *PrevDeclInScope = nullptr); /// Build the mapper variable of '#pragma omp declare mapper'. void ActOnOpenMPDeclareMapperDirectiveVarDecl(OMPDeclareMapperDecl *DMD, Scope *S, QualType MapperType, SourceLocation StartLoc, DeclarationName VN); /// Called at the end of '#pragma omp declare mapper'. DeclGroupPtrTy ActOnOpenMPDeclareMapperDirectiveEnd(OMPDeclareMapperDecl *D, Scope *S, ArrayRef<OMPClause *> ClauseList); /// Called on the start of target region i.e. '#pragma omp declare target'. bool ActOnStartOpenMPDeclareTargetDirective(SourceLocation Loc); /// Called at the end of target region i.e. '#pragme omp end declare target'. void ActOnFinishOpenMPDeclareTargetDirective(); /// Called on correct id-expression from the '#pragma omp declare target'. void ActOnOpenMPDeclareTargetName(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id, OMPDeclareTargetDeclAttr::MapTypeTy MT, NamedDeclSetType &SameDirectiveDecls); /// Check declaration inside target region. void checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D, SourceLocation IdLoc = SourceLocation()); /// Return true inside OpenMP declare target region. bool isInOpenMPDeclareTargetContext() const { return DeclareTargetNestingLevel > 0; } /// Return true inside OpenMP target region. bool isInOpenMPTargetExecutionDirective() const; /// Return true if (un)supported features for the current target should be /// diagnosed if OpenMP (offloading) is enabled. bool shouldDiagnoseTargetSupportFromOpenMP() const { return !getLangOpts().OpenMPIsDevice || isInOpenMPDeclareTargetContext() || isInOpenMPTargetExecutionDirective(); } /// Return the number of captured regions created for an OpenMP directive. static int getOpenMPCaptureLevels(OpenMPDirectiveKind Kind); /// Initialization of captured region for OpenMP region. void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope); /// End of OpenMP region. /// /// \param S Statement associated with the current OpenMP region. /// \param Clauses List of clauses for the current OpenMP region. /// /// \returns Statement for finished OpenMP region. StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses); StmtResult ActOnOpenMPExecutableDirective( OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName, OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp parallel' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); using VarsWithInheritedDSAType = llvm::SmallDenseMap<const ValueDecl *, const Expr *, 4>; /// Called on well-formed '\#pragma omp simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp for' after parsing /// of the associated statement. StmtResult ActOnOpenMPForDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp for simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPForSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp sections' after parsing /// of the associated statement. StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp section' after parsing of the /// associated statement. StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp single' after parsing of the /// associated statement. StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp master' after parsing of the /// associated statement. StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp critical' after parsing of the /// associated statement. StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp parallel for' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel for simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel sections' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp task' after parsing of the /// associated statement. StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskyield'. StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp barrier'. StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskwait'. StmtResult ActOnOpenMPTaskwaitDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskgroup'. StmtResult ActOnOpenMPTaskgroupDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp flush'. StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp ordered' after parsing of the /// associated statement. StmtResult ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp atomic' after parsing of the /// associated statement. StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target' after parsing of the /// associated statement. StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target data' after parsing of /// the associated statement. StmtResult ActOnOpenMPTargetDataDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target enter data' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetEnterDataDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp target exit data' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetExitDataDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp target parallel' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target parallel for' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams' after parsing of the /// associated statement. StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp cancellation point'. StmtResult ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// Called on well-formed '\#pragma omp cancel'. StmtResult ActOnOpenMPCancelDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// Called on well-formed '\#pragma omp taskloop' after parsing of the /// associated statement. StmtResult ActOnOpenMPTaskLoopDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp taskloop simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPTaskLoopSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute' after parsing /// of the associated statement. StmtResult ActOnOpenMPDistributeDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target update'. StmtResult ActOnOpenMPTargetUpdateDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp distribute parallel for' after /// parsing of the associated statement. StmtResult ActOnOpenMPDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute parallel for simd' /// after parsing of the associated statement. StmtResult ActOnOpenMPDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target parallel for simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPTargetSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute' after parsing of /// the associated statement. StmtResult ActOnOpenMPTeamsDistributeDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPTeamsDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute parallel for simd' /// after parsing of the associated statement. StmtResult ActOnOpenMPTeamsDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute parallel for' /// after parsing of the associated statement. StmtResult ActOnOpenMPTeamsDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams' after parsing of the /// associated statement. StmtResult ActOnOpenMPTargetTeamsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target teams distribute' after parsing /// of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute parallel for' /// after parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute parallel for /// simd' after parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Checks correctness of linear modifiers. bool CheckOpenMPLinearModifier(OpenMPLinearClauseKind LinKind, SourceLocation LinLoc); /// Checks that the specified declaration matches requirements for the linear /// decls. bool CheckOpenMPLinearDecl(const ValueDecl *D, SourceLocation ELoc, OpenMPLinearClauseKind LinKind, QualType Type); /// Called on well-formed '\#pragma omp declare simd' after parsing of /// the associated method/function. DeclGroupPtrTy ActOnOpenMPDeclareSimdDirective( DeclGroupPtrTy DG, OMPDeclareSimdDeclAttr::BranchStateTy BS, Expr *Simdlen, ArrayRef<Expr *> Uniforms, ArrayRef<Expr *> Aligneds, ArrayRef<Expr *> Alignments, ArrayRef<Expr *> Linears, ArrayRef<unsigned> LinModifiers, ArrayRef<Expr *> Steps, SourceRange SR); OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'allocator' clause. OMPClause *ActOnOpenMPAllocatorClause(Expr *Allocator, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'if' clause. OMPClause *ActOnOpenMPIfClause(OpenMPDirectiveKind NameModifier, Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation NameModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'final' clause. OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'num_threads' clause. OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'safelen' clause. OMPClause *ActOnOpenMPSafelenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'simdlen' clause. OMPClause *ActOnOpenMPSimdlenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'collapse' clause. OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'ordered' clause. OMPClause * ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc, SourceLocation LParenLoc = SourceLocation(), Expr *NumForLoops = nullptr); /// Called on well-formed 'grainsize' clause. OMPClause *ActOnOpenMPGrainsizeClause(Expr *Size, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'num_tasks' clause. OMPClause *ActOnOpenMPNumTasksClause(Expr *NumTasks, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'hint' clause. OMPClause *ActOnOpenMPHintClause(Expr *Hint, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind, unsigned Argument, SourceLocation ArgumentLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'default' clause. OMPClause *ActOnOpenMPDefaultClause(OpenMPDefaultClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'proc_bind' clause. OMPClause *ActOnOpenMPProcBindClause(OpenMPProcBindClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSingleExprWithArgClause( OpenMPClauseKind Kind, ArrayRef<unsigned> Arguments, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, ArrayRef<SourceLocation> ArgumentsLoc, SourceLocation DelimLoc, SourceLocation EndLoc); /// Called on well-formed 'schedule' clause. OMPClause *ActOnOpenMPScheduleClause( OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2, OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation M1Loc, SourceLocation M2Loc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'nowait' clause. OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'untied' clause. OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'mergeable' clause. OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'read' clause. OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'write' clause. OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'update' clause. OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'capture' clause. OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'seq_cst' clause. OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'threads' clause. OMPClause *ActOnOpenMPThreadsClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'simd' clause. OMPClause *ActOnOpenMPSIMDClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'nogroup' clause. OMPClause *ActOnOpenMPNogroupClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'unified_address' clause. OMPClause *ActOnOpenMPUnifiedAddressClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'unified_address' clause. OMPClause *ActOnOpenMPUnifiedSharedMemoryClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'reverse_offload' clause. OMPClause *ActOnOpenMPReverseOffloadClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'dynamic_allocators' clause. OMPClause *ActOnOpenMPDynamicAllocatorsClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'atomic_default_mem_order' clause. OMPClause *ActOnOpenMPAtomicDefaultMemOrderClause( OpenMPAtomicDefaultMemOrderClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPVarListClause( OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *TailExpr, const OMPVarListLocTy &Locs, SourceLocation ColonLoc, CXXScopeSpec &ReductionOrMapperIdScopeSpec, DeclarationNameInfo &ReductionOrMapperId, OpenMPDependClauseKind DepKind, OpenMPLinearClauseKind LinKind, ArrayRef<OpenMPMapModifierKind> MapTypeModifiers, ArrayRef<SourceLocation> MapTypeModifiersLoc, OpenMPMapClauseKind MapType, bool IsMapTypeImplicit, SourceLocation DepLinMapLoc); /// Called on well-formed 'allocate' clause. OMPClause * ActOnOpenMPAllocateClause(Expr *Allocator, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation ColonLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'private' clause. OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'firstprivate' clause. OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'lastprivate' clause. OMPClause *ActOnOpenMPLastprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'shared' clause. OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'reduction' clause. OMPClause *ActOnOpenMPReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'task_reduction' clause. OMPClause *ActOnOpenMPTaskReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'in_reduction' clause. OMPClause *ActOnOpenMPInReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'linear' clause. OMPClause * ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step, SourceLocation StartLoc, SourceLocation LParenLoc, OpenMPLinearClauseKind LinKind, SourceLocation LinLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'aligned' clause. OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList, Expr *Alignment, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'copyin' clause. OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'copyprivate' clause. OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'flush' pseudo clause. OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'depend' clause. OMPClause * ActOnOpenMPDependClause(OpenMPDependClauseKind DepKind, SourceLocation DepLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'device' clause. OMPClause *ActOnOpenMPDeviceClause(Expr *Device, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'map' clause. OMPClause * ActOnOpenMPMapClause(ArrayRef<OpenMPMapModifierKind> MapTypeModifiers, ArrayRef<SourceLocation> MapTypeModifiersLoc, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, OpenMPMapClauseKind MapType, bool IsMapTypeImplicit, SourceLocation MapLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'num_teams' clause. OMPClause *ActOnOpenMPNumTeamsClause(Expr *NumTeams, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'thread_limit' clause. OMPClause *ActOnOpenMPThreadLimitClause(Expr *ThreadLimit, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'priority' clause. OMPClause *ActOnOpenMPPriorityClause(Expr *Priority, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'dist_schedule' clause. OMPClause *ActOnOpenMPDistScheduleClause( OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); /// Called on well-formed 'defaultmap' clause. OMPClause *ActOnOpenMPDefaultmapClause( OpenMPDefaultmapClauseModifier M, OpenMPDefaultmapClauseKind Kind, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc, SourceLocation KindLoc, SourceLocation EndLoc); /// Called on well-formed 'to' clause. OMPClause * ActOnOpenMPToClause(ArrayRef<Expr *> VarList, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'from' clause. OMPClause *ActOnOpenMPFromClause( ArrayRef<Expr *> VarList, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'use_device_ptr' clause. OMPClause *ActOnOpenMPUseDevicePtrClause(ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs); /// Called on well-formed 'is_device_ptr' clause. OMPClause *ActOnOpenMPIsDevicePtrClause(ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs); /// The kind of conversion being performed. enum CheckedConversionKind { /// An implicit conversion. CCK_ImplicitConversion, /// A C-style cast. CCK_CStyleCast, /// A functional-style cast. CCK_FunctionalCast, /// A cast other than a C-style cast. CCK_OtherCast, /// A conversion for an operand of a builtin overloaded operator. CCK_ForBuiltinOverloadedOp }; static bool isCast(CheckedConversionKind CCK) { return CCK == CCK_CStyleCast || CCK == CCK_FunctionalCast || CCK == CCK_OtherCast; } /// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit /// cast. If there is already an implicit cast, merge into the existing one. /// If isLvalue, the result of the cast is an lvalue. ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK, ExprValueKind VK = VK_RValue, const CXXCastPath *BasePath = nullptr, CheckedConversionKind CCK = CCK_ImplicitConversion); /// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding /// to the conversion from scalar type ScalarTy to the Boolean type. static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy); /// IgnoredValueConversions - Given that an expression's result is /// syntactically ignored, perform any conversions that are /// required. ExprResult IgnoredValueConversions(Expr *E); // UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts // functions and arrays to their respective pointers (C99 6.3.2.1). ExprResult UsualUnaryConversions(Expr *E); /// CallExprUnaryConversions - a special case of an unary conversion /// performed on a function designator of a call expression. ExprResult CallExprUnaryConversions(Expr *E); // DefaultFunctionArrayConversion - converts functions and arrays // to their respective pointers (C99 6.3.2.1). ExprResult DefaultFunctionArrayConversion(Expr *E, bool Diagnose = true); // DefaultFunctionArrayLvalueConversion - converts functions and // arrays to their respective pointers and performs the // lvalue-to-rvalue conversion. ExprResult DefaultFunctionArrayLvalueConversion(Expr *E, bool Diagnose = true); // DefaultLvalueConversion - performs lvalue-to-rvalue conversion on // the operand. This is DefaultFunctionArrayLvalueConversion, // except that it assumes the operand isn't of function or array // type. ExprResult DefaultLvalueConversion(Expr *E); // DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that // do not have a prototype. Integer promotions are performed on each // argument, and arguments that have type float are promoted to double. ExprResult DefaultArgumentPromotion(Expr *E); /// If \p E is a prvalue denoting an unmaterialized temporary, materialize /// it as an xvalue. In C++98, the result will still be a prvalue, because /// we don't have xvalues there. ExprResult TemporaryMaterializationConversion(Expr *E); // Used for emitting the right warning by DefaultVariadicArgumentPromotion enum VariadicCallType { VariadicFunction, VariadicBlock, VariadicMethod, VariadicConstructor, VariadicDoesNotApply }; VariadicCallType getVariadicCallType(FunctionDecl *FDecl, const FunctionProtoType *Proto, Expr *Fn); // Used for determining in which context a type is allowed to be passed to a // vararg function. enum VarArgKind { VAK_Valid, VAK_ValidInCXX11, VAK_Undefined, VAK_MSVCUndefined, VAK_Invalid }; // Determines which VarArgKind fits an expression. VarArgKind isValidVarArgType(const QualType &Ty); /// Check to see if the given expression is a valid argument to a variadic /// function, issuing a diagnostic if not. void checkVariadicArgument(const Expr *E, VariadicCallType CT); /// Check to see if a given expression could have '.c_str()' called on it. bool hasCStrMethod(const Expr *E); /// GatherArgumentsForCall - Collector argument expressions for various /// form of call prototypes. bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl, const FunctionProtoType *Proto, unsigned FirstParam, ArrayRef<Expr *> Args, SmallVectorImpl<Expr *> &AllArgs, VariadicCallType CallType = VariadicDoesNotApply, bool AllowExplicit = false, bool IsListInitialization = false); // DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but // will create a runtime trap if the resulting type is not a POD type. ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT, FunctionDecl *FDecl); // UsualArithmeticConversions - performs the UsualUnaryConversions on it's // operands and then handles various conversions that are common to binary // operators (C99 6.3.1.8). If both operands aren't arithmetic, this // routine returns the first non-arithmetic type found. The client is // responsible for emitting appropriate error diagnostics. QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS, bool IsCompAssign = false); /// AssignConvertType - All of the 'assignment' semantic checks return this /// enum to indicate whether the assignment was allowed. These checks are /// done for simple assignments, as well as initialization, return from /// function, argument passing, etc. The query is phrased in terms of a /// source and destination type. enum AssignConvertType { /// Compatible - the types are compatible according to the standard. Compatible, /// PointerToInt - The assignment converts a pointer to an int, which we /// accept as an extension. PointerToInt, /// IntToPointer - The assignment converts an int to a pointer, which we /// accept as an extension. IntToPointer, /// FunctionVoidPointer - The assignment is between a function pointer and /// void*, which the standard doesn't allow, but we accept as an extension. FunctionVoidPointer, /// IncompatiblePointer - The assignment is between two pointers types that /// are not compatible, but we accept them as an extension. IncompatiblePointer, /// IncompatiblePointerSign - The assignment is between two pointers types /// which point to integers which have a different sign, but are otherwise /// identical. This is a subset of the above, but broken out because it's by /// far the most common case of incompatible pointers. IncompatiblePointerSign, /// CompatiblePointerDiscardsQualifiers - The assignment discards /// c/v/r qualifiers, which we accept as an extension. CompatiblePointerDiscardsQualifiers, /// IncompatiblePointerDiscardsQualifiers - The assignment /// discards qualifiers that we don't permit to be discarded, /// like address spaces. IncompatiblePointerDiscardsQualifiers, /// IncompatibleNestedPointerAddressSpaceMismatch - The assignment /// changes address spaces in nested pointer types which is not allowed. /// For instance, converting __private int ** to __generic int ** is /// illegal even though __private could be converted to __generic. IncompatibleNestedPointerAddressSpaceMismatch, /// IncompatibleNestedPointerQualifiers - The assignment is between two /// nested pointer types, and the qualifiers other than the first two /// levels differ e.g. char ** -> const char **, but we accept them as an /// extension. IncompatibleNestedPointerQualifiers, /// IncompatibleVectors - The assignment is between two vector types that /// have the same size, which we accept as an extension. IncompatibleVectors, /// IntToBlockPointer - The assignment converts an int to a block /// pointer. We disallow this. IntToBlockPointer, /// IncompatibleBlockPointer - The assignment is between two block /// pointers types that are not compatible. IncompatibleBlockPointer, /// IncompatibleObjCQualifiedId - The assignment is between a qualified /// id type and something else (that is incompatible with it). For example, /// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol. IncompatibleObjCQualifiedId, /// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an /// object with __weak qualifier. IncompatibleObjCWeakRef, /// Incompatible - We reject this conversion outright, it is invalid to /// represent it in the AST. Incompatible }; /// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the /// assignment conversion type specified by ConvTy. This returns true if the /// conversion was invalid or false if the conversion was accepted. bool DiagnoseAssignmentResult(AssignConvertType ConvTy, SourceLocation Loc, QualType DstType, QualType SrcType, Expr *SrcExpr, AssignmentAction Action, bool *Complained = nullptr); /// IsValueInFlagEnum - Determine if a value is allowed as part of a flag /// enum. If AllowMask is true, then we also allow the complement of a valid /// value, to be used as a mask. bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val, bool AllowMask) const; /// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant /// integer not in the range of enum values. void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType, Expr *SrcExpr); /// CheckAssignmentConstraints - Perform type checking for assignment, /// argument passing, variable initialization, and function return values. /// C99 6.5.16. AssignConvertType CheckAssignmentConstraints(SourceLocation Loc, QualType LHSType, QualType RHSType); /// Check assignment constraints and optionally prepare for a conversion of /// the RHS to the LHS type. The conversion is prepared for if ConvertRHS /// is true. AssignConvertType CheckAssignmentConstraints(QualType LHSType, ExprResult &RHS, CastKind &Kind, bool ConvertRHS = true); /// Check assignment constraints for an assignment of RHS to LHSType. /// /// \param LHSType The destination type for the assignment. /// \param RHS The source expression for the assignment. /// \param Diagnose If \c true, diagnostics may be produced when checking /// for assignability. If a diagnostic is produced, \p RHS will be /// set to ExprError(). Note that this function may still return /// without producing a diagnostic, even for an invalid assignment. /// \param DiagnoseCFAudited If \c true, the target is a function parameter /// in an audited Core Foundation API and does not need to be checked /// for ARC retain issues. /// \param ConvertRHS If \c true, \p RHS will be updated to model the /// conversions necessary to perform the assignment. If \c false, /// \p Diagnose must also be \c false. AssignConvertType CheckSingleAssignmentConstraints( QualType LHSType, ExprResult &RHS, bool Diagnose = true, bool DiagnoseCFAudited = false, bool ConvertRHS = true); // If the lhs type is a transparent union, check whether we // can initialize the transparent union with the given expression. AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType, ExprResult &RHS); bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType); bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit = false); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit, ImplicitConversionSequence& ICS); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const ImplicitConversionSequence& ICS, AssignmentAction Action, CheckedConversionKind CCK = CCK_ImplicitConversion); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const StandardConversionSequence& SCS, AssignmentAction Action, CheckedConversionKind CCK); ExprResult PerformQualificationConversion( Expr *E, QualType Ty, ExprValueKind VK = VK_RValue, CheckedConversionKind CCK = CCK_ImplicitConversion); /// the following "Check" methods will return a valid/converted QualType /// or a null QualType (indicating an error diagnostic was issued). /// type checking binary operators (subroutines of CreateBuiltinBinOp). QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType InvalidLogicalVectorOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType CheckPointerToMemberOperands( // C++ 5.5 ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, SourceLocation OpLoc, bool isIndirect); QualType CheckMultiplyDivideOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool IsDivide); QualType CheckRemainderOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign = false); QualType CheckAdditionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc, QualType* CompLHSTy = nullptr); QualType CheckSubtractionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, QualType* CompLHSTy = nullptr); QualType CheckShiftOperands( // C99 6.5.7 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc, bool IsCompAssign = false); QualType CheckCompareOperands( // C99 6.5.8/9 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckBitwiseOperands( // C99 6.5.[10...12] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckLogicalOperands( // C99 6.5.[13,14] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); // CheckAssignmentOperands is used for both simple and compound assignment. // For simple assignment, pass both expressions and a null converted type. // For compound assignment, pass both expressions and the converted type. QualType CheckAssignmentOperands( // C99 6.5.16.[1,2] Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType); ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opcode, Expr *Op); ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opcode, Expr *LHS, Expr *RHS); ExprResult checkPseudoObjectRValue(Expr *E); Expr *recreateSyntacticForm(PseudoObjectExpr *E); QualType CheckConditionalOperands( // C99 6.5.15 ExprResult &Cond, ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc); QualType CXXCheckConditionalOperands( // C++ 5.16 ExprResult &cond, ExprResult &lhs, ExprResult &rhs, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc); QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2, bool ConvertArgs = true); QualType FindCompositePointerType(SourceLocation Loc, ExprResult &E1, ExprResult &E2, bool ConvertArgs = true) { Expr *E1Tmp = E1.get(), *E2Tmp = E2.get(); QualType Composite = FindCompositePointerType(Loc, E1Tmp, E2Tmp, ConvertArgs); E1 = E1Tmp; E2 = E2Tmp; return Composite; } QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS, SourceLocation QuestionLoc); bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr, SourceLocation QuestionLoc); void DiagnoseAlwaysNonNullPointer(Expr *E, Expr::NullPointerConstantKind NullType, bool IsEqual, SourceRange Range); /// type checking for vector binary operators. QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool AllowBothBool, bool AllowBoolConversion); QualType GetSignedVectorType(QualType V); QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc); bool areLaxCompatibleVectorTypes(QualType srcType, QualType destType); bool isLaxVectorConversion(QualType srcType, QualType destType); /// type checking declaration initializers (C99 6.7.8) bool CheckForConstantInitializer(Expr *e, QualType t); // type checking C++ declaration initializers (C++ [dcl.init]). /// ReferenceCompareResult - Expresses the result of comparing two /// types (cv1 T1 and cv2 T2) to determine their compatibility for the /// purposes of initialization by reference (C++ [dcl.init.ref]p4). enum ReferenceCompareResult { /// Ref_Incompatible - The two types are incompatible, so direct /// reference binding is not possible. Ref_Incompatible = 0, /// Ref_Related - The two types are reference-related, which means /// that their unqualified forms (T1 and T2) are either the same /// or T1 is a base class of T2. Ref_Related, /// Ref_Compatible - The two types are reference-compatible. Ref_Compatible }; ReferenceCompareResult CompareReferenceRelationship(SourceLocation Loc, QualType T1, QualType T2, bool &DerivedToBase, bool &ObjCConversion, bool &ObjCLifetimeConversion); ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType, Expr *CastExpr, CastKind &CastKind, ExprValueKind &VK, CXXCastPath &Path); /// Force an expression with unknown-type to an expression of the /// given type. ExprResult forceUnknownAnyToType(Expr *E, QualType ToType); /// Type-check an expression that's being passed to an /// __unknown_anytype parameter. ExprResult checkUnknownAnyArg(SourceLocation callLoc, Expr *result, QualType &paramType); // CheckVectorCast - check type constraints for vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size. // returns true if the cast is invalid bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty, CastKind &Kind); /// Prepare `SplattedExpr` for a vector splat operation, adding /// implicit casts if necessary. ExprResult prepareVectorSplat(QualType VectorTy, Expr *SplattedExpr); // CheckExtVectorCast - check type constraints for extended vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size, // or vectors and the element type of that vector. // returns the cast expr ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr, CastKind &Kind); ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo, QualType Type, SourceLocation LParenLoc, Expr *CastExpr, SourceLocation RParenLoc); enum ARCConversionResult { ACR_okay, ACR_unbridged, ACR_error }; /// Checks for invalid conversions and casts between /// retainable pointers and other pointer kinds for ARC and Weak. ARCConversionResult CheckObjCConversion(SourceRange castRange, QualType castType, Expr *&op, CheckedConversionKind CCK, bool Diagnose = true, bool DiagnoseCFAudited = false, BinaryOperatorKind Opc = BO_PtrMemD ); Expr *stripARCUnbridgedCast(Expr *e); void diagnoseARCUnbridgedCast(Expr *e); bool CheckObjCARCUnavailableWeakConversion(QualType castType, QualType ExprType); /// checkRetainCycles - Check whether an Objective-C message send /// might create an obvious retain cycle. void checkRetainCycles(ObjCMessageExpr *msg); void checkRetainCycles(Expr *receiver, Expr *argument); void checkRetainCycles(VarDecl *Var, Expr *Init); /// checkUnsafeAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained type. bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS); /// checkUnsafeExprAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained expression. void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS); /// CheckMessageArgumentTypes - Check types in an Obj-C message send. /// \param Method - May be null. /// \param [out] ReturnType - The return type of the send. /// \return true iff there were any incompatible types. bool CheckMessageArgumentTypes(const Expr *Receiver, QualType ReceiverType, MultiExprArg Args, Selector Sel, ArrayRef<SourceLocation> SelectorLocs, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage, SourceLocation lbrac, SourceLocation rbrac, SourceRange RecRange, QualType &ReturnType, ExprValueKind &VK); /// Determine the result of a message send expression based on /// the type of the receiver, the method expected to receive the message, /// and the form of the message send. QualType getMessageSendResultType(const Expr *Receiver, QualType ReceiverType, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage); /// If the given expression involves a message send to a method /// with a related result type, emit a note describing what happened. void EmitRelatedResultTypeNote(const Expr *E); /// Given that we had incompatible pointer types in a return /// statement, check whether we're in a method with a related result /// type, and if so, emit a note describing what happened. void EmitRelatedResultTypeNoteForReturn(QualType destType); class ConditionResult { Decl *ConditionVar; FullExprArg Condition; bool Invalid; bool HasKnownValue; bool KnownValue; friend class Sema; ConditionResult(Sema &S, Decl *ConditionVar, FullExprArg Condition, bool IsConstexpr) : ConditionVar(ConditionVar), Condition(Condition), Invalid(false), HasKnownValue(IsConstexpr && Condition.get() && !Condition.get()->isValueDependent()), KnownValue(HasKnownValue && !!Condition.get()->EvaluateKnownConstInt(S.Context)) {} explicit ConditionResult(bool Invalid) : ConditionVar(nullptr), Condition(nullptr), Invalid(Invalid), HasKnownValue(false), KnownValue(false) {} public: ConditionResult() : ConditionResult(false) {} bool isInvalid() const { return Invalid; } std::pair<VarDecl *, Expr *> get() const { return std::make_pair(cast_or_null<VarDecl>(ConditionVar), Condition.get()); } llvm::Optional<bool> getKnownValue() const { if (!HasKnownValue) return None; return KnownValue; } }; static ConditionResult ConditionError() { return ConditionResult(true); } enum class ConditionKind { Boolean, ///< A boolean condition, from 'if', 'while', 'for', or 'do'. ConstexprIf, ///< A constant boolean condition from 'if constexpr'. Switch ///< An integral condition for a 'switch' statement. }; ConditionResult ActOnCondition(Scope *S, SourceLocation Loc, Expr *SubExpr, ConditionKind CK); ConditionResult ActOnConditionVariable(Decl *ConditionVar, SourceLocation StmtLoc, ConditionKind CK); DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D); ExprResult CheckConditionVariable(VarDecl *ConditionVar, SourceLocation StmtLoc, ConditionKind CK); ExprResult CheckSwitchCondition(SourceLocation SwitchLoc, Expr *Cond); /// CheckBooleanCondition - Diagnose problems involving the use of /// the given expression as a boolean condition (e.g. in an if /// statement). Also performs the standard function and array /// decays, possibly changing the input variable. /// /// \param Loc - A location associated with the condition, e.g. the /// 'if' keyword. /// \return true iff there were any errors ExprResult CheckBooleanCondition(SourceLocation Loc, Expr *E, bool IsConstexpr = false); /// ActOnExplicitBoolSpecifier - Build an ExplicitSpecifier from an expression /// found in an explicit(bool) specifier. ExplicitSpecifier ActOnExplicitBoolSpecifier(Expr *E); /// tryResolveExplicitSpecifier - Attempt to resolve the explict specifier. /// Returns true if the explicit specifier is now resolved. bool tryResolveExplicitSpecifier(ExplicitSpecifier &ExplicitSpec); /// DiagnoseAssignmentAsCondition - Given that an expression is /// being used as a boolean condition, warn if it's an assignment. void DiagnoseAssignmentAsCondition(Expr *E); /// Redundant parentheses over an equality comparison can indicate /// that the user intended an assignment used as condition. void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE); /// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid. ExprResult CheckCXXBooleanCondition(Expr *CondExpr, bool IsConstexpr = false); /// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have /// the specified width and sign. If an overflow occurs, detect it and emit /// the specified diagnostic. void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal, unsigned NewWidth, bool NewSign, SourceLocation Loc, unsigned DiagID); /// Checks that the Objective-C declaration is declared in the global scope. /// Emits an error and marks the declaration as invalid if it's not declared /// in the global scope. bool CheckObjCDeclScope(Decl *D); /// Abstract base class used for diagnosing integer constant /// expression violations. class VerifyICEDiagnoser { public: bool Suppress; VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { } virtual void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) =0; virtual void diagnoseFold(Sema &S, SourceLocation Loc, SourceRange SR); virtual ~VerifyICEDiagnoser() { } }; /// VerifyIntegerConstantExpression - Verifies that an expression is an ICE, /// and reports the appropriate diagnostics. Returns false on success. /// Can optionally return the value of the expression. ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, VerifyICEDiagnoser &Diagnoser, bool AllowFold = true); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, unsigned DiagID, bool AllowFold = true); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result = nullptr); /// VerifyBitField - verifies that a bit field expression is an ICE and has /// the correct width, and that the field type is valid. /// Returns false on success. /// Can optionally return whether the bit-field is of width 0 ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName, QualType FieldTy, bool IsMsStruct, Expr *BitWidth, bool *ZeroWidth = nullptr); private: unsigned ForceCUDAHostDeviceDepth = 0; public: /// Increments our count of the number of times we've seen a pragma forcing /// functions to be __host__ __device__. So long as this count is greater /// than zero, all functions encountered will be __host__ __device__. void PushForceCUDAHostDevice(); /// Decrements our count of the number of times we've seen a pragma forcing /// functions to be __host__ __device__. Returns false if the count is 0 /// before incrementing, so you can emit an error. bool PopForceCUDAHostDevice(); /// Diagnostics that are emitted only if we discover that the given function /// must be codegen'ed. Because handling these correctly adds overhead to /// compilation, this is currently only enabled for CUDA compilations. llvm::DenseMap<CanonicalDeclPtr<FunctionDecl>, std::vector<PartialDiagnosticAt>> DeviceDeferredDiags; /// A pair of a canonical FunctionDecl and a SourceLocation. When used as the /// key in a hashtable, both the FD and location are hashed. struct FunctionDeclAndLoc { CanonicalDeclPtr<FunctionDecl> FD; SourceLocation Loc; }; /// FunctionDecls and SourceLocations for which CheckCUDACall has emitted a /// (maybe deferred) "bad call" diagnostic. We use this to avoid emitting the /// same deferred diag twice. llvm::DenseSet<FunctionDeclAndLoc> LocsWithCUDACallDiags; /// An inverse call graph, mapping known-emitted functions to one of their /// known-emitted callers (plus the location of the call). /// /// Functions that we can tell a priori must be emitted aren't added to this /// map. llvm::DenseMap</* Callee = */ CanonicalDeclPtr<FunctionDecl>, /* Caller = */ FunctionDeclAndLoc> DeviceKnownEmittedFns; /// A partial call graph maintained during CUDA/OpenMP device code compilation /// to support deferred diagnostics. /// /// Functions are only added here if, at the time they're considered, they are /// not known-emitted. As soon as we discover that a function is /// known-emitted, we remove it and everything it transitively calls from this /// set and add those functions to DeviceKnownEmittedFns. llvm::DenseMap</* Caller = */ CanonicalDeclPtr<FunctionDecl>, /* Callees = */ llvm::MapVector<CanonicalDeclPtr<FunctionDecl>, SourceLocation>> DeviceCallGraph; /// Diagnostic builder for CUDA/OpenMP devices errors which may or may not be /// deferred. /// /// In CUDA, there exist constructs (e.g. variable-length arrays, try/catch) /// which are not allowed to appear inside __device__ functions and are /// allowed to appear in __host__ __device__ functions only if the host+device /// function is never codegen'ed. /// /// To handle this, we use the notion of "deferred diagnostics", where we /// attach a diagnostic to a FunctionDecl that's emitted iff it's codegen'ed. /// /// This class lets you emit either a regular diagnostic, a deferred /// diagnostic, or no diagnostic at all, according to an argument you pass to /// its constructor, thus simplifying the process of creating these "maybe /// deferred" diagnostics. class DeviceDiagBuilder { public: enum Kind { /// Emit no diagnostics. K_Nop, /// Emit the diagnostic immediately (i.e., behave like Sema::Diag()). K_Immediate, /// Emit the diagnostic immediately, and, if it's a warning or error, also /// emit a call stack showing how this function can be reached by an a /// priori known-emitted function. K_ImmediateWithCallStack, /// Create a deferred diagnostic, which is emitted only if the function /// it's attached to is codegen'ed. Also emit a call stack as with /// K_ImmediateWithCallStack. K_Deferred }; DeviceDiagBuilder(Kind K, SourceLocation Loc, unsigned DiagID, FunctionDecl *Fn, Sema &S); DeviceDiagBuilder(DeviceDiagBuilder &&D); DeviceDiagBuilder(const DeviceDiagBuilder &) = default; ~DeviceDiagBuilder(); /// Convertible to bool: True if we immediately emitted an error, false if /// we didn't emit an error or we created a deferred error. /// /// Example usage: /// /// if (DeviceDiagBuilder(...) << foo << bar) /// return ExprError(); /// /// But see CUDADiagIfDeviceCode() and CUDADiagIfHostCode() -- you probably /// want to use these instead of creating a DeviceDiagBuilder yourself. operator bool() const { return ImmediateDiag.hasValue(); } template <typename T> friend const DeviceDiagBuilder &operator<<(const DeviceDiagBuilder &Diag, const T &Value) { if (Diag.ImmediateDiag.hasValue()) *Diag.ImmediateDiag << Value; else if (Diag.PartialDiagId.hasValue()) Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId].second << Value; return Diag; } private: Sema &S; SourceLocation Loc; unsigned DiagID; FunctionDecl *Fn; bool ShowCallStack; // Invariant: At most one of these Optionals has a value. // FIXME: Switch these to a Variant once that exists. llvm::Optional<SemaDiagnosticBuilder> ImmediateDiag; llvm::Optional<unsigned> PartialDiagId; }; /// Indicate that this function (and thus everything it transtively calls) /// will be codegen'ed, and emit any deferred diagnostics on this function and /// its (transitive) callees. void markKnownEmitted( Sema &S, FunctionDecl *OrigCaller, FunctionDecl *OrigCallee, SourceLocation OrigLoc, const llvm::function_ref<bool(Sema &, FunctionDecl *)> IsKnownEmitted); /// Creates a DeviceDiagBuilder that emits the diagnostic if the current context /// is "used as device code". /// /// - If CurContext is a __host__ function, does not emit any diagnostics. /// - If CurContext is a __device__ or __global__ function, emits the /// diagnostics immediately. /// - If CurContext is a __host__ __device__ function and we are compiling for /// the device, creates a diagnostic which is emitted if and when we realize /// that the function will be codegen'ed. /// /// Example usage: /// /// // Variable-length arrays are not allowed in CUDA device code. /// if (CUDADiagIfDeviceCode(Loc, diag::err_cuda_vla) << CurrentCUDATarget()) /// return ExprError(); /// // Otherwise, continue parsing as normal. DeviceDiagBuilder CUDADiagIfDeviceCode(SourceLocation Loc, unsigned DiagID); /// Creates a DeviceDiagBuilder that emits the diagnostic if the current context /// is "used as host code". /// /// Same as CUDADiagIfDeviceCode, with "host" and "device" switched. DeviceDiagBuilder CUDADiagIfHostCode(SourceLocation Loc, unsigned DiagID); /// Creates a DeviceDiagBuilder that emits the diagnostic if the current /// context is "used as device code". /// /// - If CurContext is a `declare target` function or it is known that the /// function is emitted for the device, emits the diagnostics immediately. /// - If CurContext is a non-`declare target` function and we are compiling /// for the device, creates a diagnostic which is emitted if and when we /// realize that the function will be codegen'ed. /// /// Example usage: /// /// // Variable-length arrays are not allowed in NVPTX device code. /// if (diagIfOpenMPDeviceCode(Loc, diag::err_vla_unsupported)) /// return ExprError(); /// // Otherwise, continue parsing as normal. DeviceDiagBuilder diagIfOpenMPDeviceCode(SourceLocation Loc, unsigned DiagID); DeviceDiagBuilder targetDiag(SourceLocation Loc, unsigned DiagID); enum CUDAFunctionTarget { CFT_Device, CFT_Global, CFT_Host, CFT_HostDevice, CFT_InvalidTarget }; /// Determines whether the given function is a CUDA device/host/kernel/etc. /// function. /// /// Use this rather than examining the function's attributes yourself -- you /// will get it wrong. Returns CFT_Host if D is null. CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D, bool IgnoreImplicitHDAttr = false); CUDAFunctionTarget IdentifyCUDATarget(const ParsedAttributesView &Attrs); /// Gets the CUDA target for the current context. CUDAFunctionTarget CurrentCUDATarget() { return IdentifyCUDATarget(dyn_cast<FunctionDecl>(CurContext)); } // CUDA function call preference. Must be ordered numerically from // worst to best. enum CUDAFunctionPreference { CFP_Never, // Invalid caller/callee combination. CFP_WrongSide, // Calls from host-device to host or device // function that do not match current compilation // mode. CFP_HostDevice, // Any calls to host/device functions. CFP_SameSide, // Calls from host-device to host or device // function matching current compilation mode. CFP_Native, // host-to-host or device-to-device calls. }; /// Identifies relative preference of a given Caller/Callee /// combination, based on their host/device attributes. /// \param Caller function which needs address of \p Callee. /// nullptr in case of global context. /// \param Callee target function /// /// \returns preference value for particular Caller/Callee combination. CUDAFunctionPreference IdentifyCUDAPreference(const FunctionDecl *Caller, const FunctionDecl *Callee); /// Determines whether Caller may invoke Callee, based on their CUDA /// host/device attributes. Returns false if the call is not allowed. /// /// Note: Will return true for CFP_WrongSide calls. These may appear in /// semantically correct CUDA programs, but only if they're never codegen'ed. bool IsAllowedCUDACall(const FunctionDecl *Caller, const FunctionDecl *Callee) { return IdentifyCUDAPreference(Caller, Callee) != CFP_Never; } /// May add implicit CUDAHostAttr and CUDADeviceAttr attributes to FD, /// depending on FD and the current compilation settings. void maybeAddCUDAHostDeviceAttrs(FunctionDecl *FD, const LookupResult &Previous); public: /// Check whether we're allowed to call Callee from the current context. /// /// - If the call is never allowed in a semantically-correct program /// (CFP_Never), emits an error and returns false. /// /// - If the call is allowed in semantically-correct programs, but only if /// it's never codegen'ed (CFP_WrongSide), creates a deferred diagnostic to /// be emitted if and when the caller is codegen'ed, and returns true. /// /// Will only create deferred diagnostics for a given SourceLocation once, /// so you can safely call this multiple times without generating duplicate /// deferred errors. /// /// - Otherwise, returns true without emitting any diagnostics. bool CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee); /// Set __device__ or __host__ __device__ attributes on the given lambda /// operator() method. /// /// CUDA lambdas declared inside __device__ or __global__ functions inherit /// the __device__ attribute. Similarly, lambdas inside __host__ __device__ /// functions become __host__ __device__ themselves. void CUDASetLambdaAttrs(CXXMethodDecl *Method); /// Finds a function in \p Matches with highest calling priority /// from \p Caller context and erases all functions with lower /// calling priority. void EraseUnwantedCUDAMatches( const FunctionDecl *Caller, SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches); /// Given a implicit special member, infer its CUDA target from the /// calls it needs to make to underlying base/field special members. /// \param ClassDecl the class for which the member is being created. /// \param CSM the kind of special member. /// \param MemberDecl the special member itself. /// \param ConstRHS true if this is a copy operation with a const object on /// its RHS. /// \param Diagnose true if this call should emit diagnostics. /// \return true if there was an error inferring. /// The result of this call is implicit CUDA target attribute(s) attached to /// the member declaration. bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl, CXXSpecialMember CSM, CXXMethodDecl *MemberDecl, bool ConstRHS, bool Diagnose); /// \return true if \p CD can be considered empty according to CUDA /// (E.2.3.1 in CUDA 7.5 Programming guide). bool isEmptyCudaConstructor(SourceLocation Loc, CXXConstructorDecl *CD); bool isEmptyCudaDestructor(SourceLocation Loc, CXXDestructorDecl *CD); // \brief Checks that initializers of \p Var satisfy CUDA restrictions. In // case of error emits appropriate diagnostic and invalidates \p Var. // // \details CUDA allows only empty constructors as initializers for global // variables (see E.2.3.1, CUDA 7.5). The same restriction also applies to all // __shared__ variables whether they are local or not (they all are implicitly // static in CUDA). One exception is that CUDA allows constant initializers // for __constant__ and __device__ variables. void checkAllowedCUDAInitializer(VarDecl *VD); /// Check whether NewFD is a valid overload for CUDA. Emits /// diagnostics and invalidates NewFD if not. void checkCUDATargetOverload(FunctionDecl *NewFD, const LookupResult &Previous); /// Copies target attributes from the template TD to the function FD. void inheritCUDATargetAttrs(FunctionDecl *FD, const FunctionTemplateDecl &TD); /// Returns the name of the launch configuration function. This is the name /// of the function that will be called to configure kernel call, with the /// parameters specified via <<<>>>. std::string getCudaConfigureFuncName() const; /// \name Code completion //@{ /// Describes the context in which code completion occurs. enum ParserCompletionContext { /// Code completion occurs at top-level or namespace context. PCC_Namespace, /// Code completion occurs within a class, struct, or union. PCC_Class, /// Code completion occurs within an Objective-C interface, protocol, /// or category. PCC_ObjCInterface, /// Code completion occurs within an Objective-C implementation or /// category implementation PCC_ObjCImplementation, /// Code completion occurs within the list of instance variables /// in an Objective-C interface, protocol, category, or implementation. PCC_ObjCInstanceVariableList, /// Code completion occurs following one or more template /// headers. PCC_Template, /// Code completion occurs following one or more template /// headers within a class. PCC_MemberTemplate, /// Code completion occurs within an expression. PCC_Expression, /// Code completion occurs within a statement, which may /// also be an expression or a declaration. PCC_Statement, /// Code completion occurs at the beginning of the /// initialization statement (or expression) in a for loop. PCC_ForInit, /// Code completion occurs within the condition of an if, /// while, switch, or for statement. PCC_Condition, /// Code completion occurs within the body of a function on a /// recovery path, where we do not have a specific handle on our position /// in the grammar. PCC_RecoveryInFunction, /// Code completion occurs where only a type is permitted. PCC_Type, /// Code completion occurs in a parenthesized expression, which /// might also be a type cast. PCC_ParenthesizedExpression, /// Code completion occurs within a sequence of declaration /// specifiers within a function, method, or block. PCC_LocalDeclarationSpecifiers }; void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path); void CodeCompleteOrdinaryName(Scope *S, ParserCompletionContext CompletionContext); void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS, bool AllowNonIdentifiers, bool AllowNestedNameSpecifiers); struct CodeCompleteExpressionData; void CodeCompleteExpression(Scope *S, const CodeCompleteExpressionData &Data); void CodeCompleteExpression(Scope *S, QualType PreferredType, bool IsParenthesized = false); void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base, Expr *OtherOpBase, SourceLocation OpLoc, bool IsArrow, bool IsBaseExprStatement, QualType PreferredType); void CodeCompletePostfixExpression(Scope *S, ExprResult LHS, QualType PreferredType); void CodeCompleteTag(Scope *S, unsigned TagSpec); void CodeCompleteTypeQualifiers(DeclSpec &DS); void CodeCompleteFunctionQualifiers(DeclSpec &DS, Declarator &D, const VirtSpecifiers *VS = nullptr); void CodeCompleteBracketDeclarator(Scope *S); void CodeCompleteCase(Scope *S); /// Reports signatures for a call to CodeCompleteConsumer and returns the /// preferred type for the current argument. Returned type can be null. QualType ProduceCallSignatureHelp(Scope *S, Expr *Fn, ArrayRef<Expr *> Args, SourceLocation OpenParLoc); QualType ProduceConstructorSignatureHelp(Scope *S, QualType Type, SourceLocation Loc, ArrayRef<Expr *> Args, SourceLocation OpenParLoc); QualType ProduceCtorInitMemberSignatureHelp(Scope *S, Decl *ConstructorDecl, CXXScopeSpec SS, ParsedType TemplateTypeTy, ArrayRef<Expr *> ArgExprs, IdentifierInfo *II, SourceLocation OpenParLoc); void CodeCompleteInitializer(Scope *S, Decl *D); void CodeCompleteAfterIf(Scope *S); void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS, bool EnteringContext, QualType BaseType); void CodeCompleteUsing(Scope *S); void CodeCompleteUsingDirective(Scope *S); void CodeCompleteNamespaceDecl(Scope *S); void CodeCompleteNamespaceAliasDecl(Scope *S); void CodeCompleteOperatorName(Scope *S); void CodeCompleteConstructorInitializer( Decl *Constructor, ArrayRef<CXXCtorInitializer *> Initializers); void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro, bool AfterAmpersand); void CodeCompleteObjCAtDirective(Scope *S); void CodeCompleteObjCAtVisibility(Scope *S); void CodeCompleteObjCAtStatement(Scope *S); void CodeCompleteObjCAtExpression(Scope *S); void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS); void CodeCompleteObjCPropertyGetter(Scope *S); void CodeCompleteObjCPropertySetter(Scope *S); void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS, bool IsParameter); void CodeCompleteObjCMessageReceiver(Scope *S); void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression); void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, bool IsSuper = false); void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, ObjCInterfaceDecl *Super = nullptr); void CodeCompleteObjCForCollection(Scope *S, DeclGroupPtrTy IterationVar); void CodeCompleteObjCSelector(Scope *S, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompleteObjCProtocolReferences( ArrayRef<IdentifierLocPair> Protocols); void CodeCompleteObjCProtocolDecl(Scope *S); void CodeCompleteObjCInterfaceDecl(Scope *S); void CodeCompleteObjCSuperclass(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationDecl(Scope *S); void CodeCompleteObjCInterfaceCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCPropertyDefinition(Scope *S); void CodeCompleteObjCPropertySynthesizeIvar(Scope *S, IdentifierInfo *PropertyName); void CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod, ParsedType ReturnType); void CodeCompleteObjCMethodDeclSelector(Scope *S, bool IsInstanceMethod, bool AtParameterName, ParsedType ReturnType, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompleteObjCClassPropertyRefExpr(Scope *S, IdentifierInfo &ClassName, SourceLocation ClassNameLoc, bool IsBaseExprStatement); void CodeCompletePreprocessorDirective(bool InConditional); void CodeCompleteInPreprocessorConditionalExclusion(Scope *S); void CodeCompletePreprocessorMacroName(bool IsDefinition); void CodeCompletePreprocessorExpression(); void CodeCompletePreprocessorMacroArgument(Scope *S, IdentifierInfo *Macro, MacroInfo *MacroInfo, unsigned Argument); void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled); void CodeCompleteNaturalLanguage(); void CodeCompleteAvailabilityPlatformName(); void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator, CodeCompletionTUInfo &CCTUInfo, SmallVectorImpl<CodeCompletionResult> &Results); //@} //===--------------------------------------------------------------------===// // Extra semantic analysis beyond the C type system public: SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL, unsigned ByteNo) const; private: void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr, const ArraySubscriptExpr *ASE=nullptr, bool AllowOnePastEnd=true, bool IndexNegated=false); void CheckArrayAccess(const Expr *E); // Used to grab the relevant information from a FormatAttr and a // FunctionDeclaration. struct FormatStringInfo { unsigned FormatIdx; unsigned FirstDataArg; bool HasVAListArg; }; static bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember, FormatStringInfo *FSI); bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc, ArrayRef<const Expr *> Args); bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto); void CheckConstructorCall(FunctionDecl *FDecl, ArrayRef<const Expr *> Args, const FunctionProtoType *Proto, SourceLocation Loc); void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto, const Expr *ThisArg, ArrayRef<const Expr *> Args, bool IsMemberFunction, SourceLocation Loc, SourceRange Range, VariadicCallType CallType); bool CheckObjCString(Expr *Arg); ExprResult CheckOSLogFormatStringArg(Expr *Arg); ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID, CallExpr *TheCall); void checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD, CallExpr *TheCall); bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall, unsigned MaxWidth); bool CheckNeonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckAArch64BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinCpu(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall); bool CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckPPCBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStartARMMicrosoft(CallExpr *Call); bool SemaBuiltinUnorderedCompare(CallExpr *TheCall); bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs); bool SemaBuiltinVSX(CallExpr *TheCall); bool SemaBuiltinOSLogFormat(CallExpr *TheCall); public: // Used by C++ template instantiation. ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall); ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo, SourceLocation BuiltinLoc, SourceLocation RParenLoc); private: bool SemaBuiltinPrefetch(CallExpr *TheCall); bool SemaBuiltinAllocaWithAlign(CallExpr *TheCall); bool SemaBuiltinAssume(CallExpr *TheCall); bool SemaBuiltinAssumeAligned(CallExpr *TheCall); bool SemaBuiltinLongjmp(CallExpr *TheCall); bool SemaBuiltinSetjmp(CallExpr *TheCall); ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult); ExprResult SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult); ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult, AtomicExpr::AtomicOp Op); ExprResult SemaBuiltinOperatorNewDeleteOverloaded(ExprResult TheCallResult, bool IsDelete); bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum, llvm::APSInt &Result); bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low, int High, bool RangeIsError = true); bool SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum, unsigned Multiple); bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall, int ArgNum, unsigned ExpectedFieldNum, bool AllowName); bool SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall); public: enum FormatStringType { FST_Scanf, FST_Printf, FST_NSString, FST_Strftime, FST_Strfmon, FST_Kprintf, FST_FreeBSDKPrintf, FST_OSTrace, FST_OSLog, FST_Unknown }; static FormatStringType GetFormatStringType(const FormatAttr *Format); bool FormatStringHasSArg(const StringLiteral *FExpr); static bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx); private: bool CheckFormatArguments(const FormatAttr *Format, ArrayRef<const Expr *> Args, bool IsCXXMember, VariadicCallType CallType, SourceLocation Loc, SourceRange Range, llvm::SmallBitVector &CheckedVarArgs); bool CheckFormatArguments(ArrayRef<const Expr *> Args, bool HasVAListArg, unsigned format_idx, unsigned firstDataArg, FormatStringType Type, VariadicCallType CallType, SourceLocation Loc, SourceRange range, llvm::SmallBitVector &CheckedVarArgs); void CheckAbsoluteValueFunction(const CallExpr *Call, const FunctionDecl *FDecl); void CheckMaxUnsignedZero(const CallExpr *Call, const FunctionDecl *FDecl); void CheckMemaccessArguments(const CallExpr *Call, unsigned BId, IdentifierInfo *FnName); void CheckStrlcpycatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckStrncatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckReturnValExpr(Expr *RetValExp, QualType lhsType, SourceLocation ReturnLoc, bool isObjCMethod = false, const AttrVec *Attrs = nullptr, const FunctionDecl *FD = nullptr); public: void CheckFloatComparison(SourceLocation Loc, Expr *LHS, Expr *RHS); private: void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation()); void CheckBoolLikeConversion(Expr *E, SourceLocation CC); void CheckForIntOverflow(Expr *E); void CheckUnsequencedOperations(Expr *E); /// Perform semantic checks on a completed expression. This will either /// be a full-expression or a default argument expression. void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(), bool IsConstexpr = false); void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field, Expr *Init); /// Check if there is a field shadowing. void CheckShadowInheritedFields(const SourceLocation &Loc, DeclarationName FieldName, const CXXRecordDecl *RD, bool DeclIsField = true); /// Check if the given expression contains 'break' or 'continue' /// statement that produces control flow different from GCC. void CheckBreakContinueBinding(Expr *E); /// Check whether receiver is mutable ObjC container which /// attempts to add itself into the container void CheckObjCCircularContainer(ObjCMessageExpr *Message); void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE); void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc, bool DeleteWasArrayForm); public: /// Register a magic integral constant to be used as a type tag. void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind, uint64_t MagicValue, QualType Type, bool LayoutCompatible, bool MustBeNull); struct TypeTagData { TypeTagData() {} TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) : Type(Type), LayoutCompatible(LayoutCompatible), MustBeNull(MustBeNull) {} QualType Type; /// If true, \c Type should be compared with other expression's types for /// layout-compatibility. unsigned LayoutCompatible : 1; unsigned MustBeNull : 1; }; /// A pair of ArgumentKind identifier and magic value. This uniquely /// identifies the magic value. typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue; private: /// A map from magic value to type information. std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>> TypeTagForDatatypeMagicValues; /// Peform checks on a call of a function with argument_with_type_tag /// or pointer_with_type_tag attributes. void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr, const ArrayRef<const Expr *> ExprArgs, SourceLocation CallSiteLoc); /// Check if we are taking the address of a packed field /// as this may be a problem if the pointer value is dereferenced. void CheckAddressOfPackedMember(Expr *rhs); /// The parser's current scope. /// /// The parser maintains this state here. Scope *CurScope; mutable IdentifierInfo *Ident_super; mutable IdentifierInfo *Ident___float128; /// Nullability type specifiers. IdentifierInfo *Ident__Nonnull = nullptr; IdentifierInfo *Ident__Nullable = nullptr; IdentifierInfo *Ident__Null_unspecified = nullptr; IdentifierInfo *Ident_NSError = nullptr; /// The handler for the FileChanged preprocessor events. /// /// Used for diagnostics that implement custom semantic analysis for #include /// directives, like -Wpragma-pack. sema::SemaPPCallbacks *SemaPPCallbackHandler; protected: friend class Parser; friend class InitializationSequence; friend class ASTReader; friend class ASTDeclReader; friend class ASTWriter; public: /// Retrieve the keyword associated IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability); /// The struct behind the CFErrorRef pointer. RecordDecl *CFError = nullptr; /// Retrieve the identifier "NSError". IdentifierInfo *getNSErrorIdent(); /// Retrieve the parser's current scope. /// /// This routine must only be used when it is certain that semantic analysis /// and the parser are in precisely the same context, which is not the case /// when, e.g., we are performing any kind of template instantiation. /// Therefore, the only safe places to use this scope are in the parser /// itself and in routines directly invoked from the parser and *never* from /// template substitution or instantiation. Scope *getCurScope() const { return CurScope; } void incrementMSManglingNumber() const { return CurScope->incrementMSManglingNumber(); } IdentifierInfo *getSuperIdentifier() const; IdentifierInfo *getFloat128Identifier() const; Decl *getObjCDeclContext() const; DeclContext *getCurLexicalContext() const { return OriginalLexicalContext ? OriginalLexicalContext : CurContext; } const DeclContext *getCurObjCLexicalContext() const { const DeclContext *DC = getCurLexicalContext(); // A category implicitly has the attribute of the interface. if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC)) DC = CatD->getClassInterface(); return DC; } /// To be used for checking whether the arguments being passed to /// function exceeds the number of parameters expected for it. static bool TooManyArguments(size_t NumParams, size_t NumArgs, bool PartialOverloading = false) { // We check whether we're just after a comma in code-completion. if (NumArgs > 0 && PartialOverloading) return NumArgs + 1 > NumParams; // If so, we view as an extra argument. return NumArgs > NumParams; } // Emitting members of dllexported classes is delayed until the class // (including field initializers) is fully parsed. SmallVector<CXXRecordDecl*, 4> DelayedDllExportClasses; private: class SavePendingParsedClassStateRAII { public: SavePendingParsedClassStateRAII(Sema &S) : S(S) { swapSavedState(); } ~SavePendingParsedClassStateRAII() { assert(S.DelayedOverridingExceptionSpecChecks.empty() && "there shouldn't be any pending delayed exception spec checks"); assert(S.DelayedEquivalentExceptionSpecChecks.empty() && "there shouldn't be any pending delayed exception spec checks"); assert(S.DelayedDllExportClasses.empty() && "there shouldn't be any pending delayed DLL export classes"); swapSavedState(); } private: Sema &S; decltype(DelayedOverridingExceptionSpecChecks) SavedOverridingExceptionSpecChecks; decltype(DelayedEquivalentExceptionSpecChecks) SavedEquivalentExceptionSpecChecks; decltype(DelayedDllExportClasses) SavedDllExportClasses; void swapSavedState() { SavedOverridingExceptionSpecChecks.swap( S.DelayedOverridingExceptionSpecChecks); SavedEquivalentExceptionSpecChecks.swap( S.DelayedEquivalentExceptionSpecChecks); SavedDllExportClasses.swap(S.DelayedDllExportClasses); } }; /// Helper class that collects misaligned member designations and /// their location info for delayed diagnostics. struct MisalignedMember { Expr *E; RecordDecl *RD; ValueDecl *MD; CharUnits Alignment; MisalignedMember() : E(), RD(), MD(), Alignment() {} MisalignedMember(Expr *E, RecordDecl *RD, ValueDecl *MD, CharUnits Alignment) : E(E), RD(RD), MD(MD), Alignment(Alignment) {} explicit MisalignedMember(Expr *E) : MisalignedMember(E, nullptr, nullptr, CharUnits()) {} bool operator==(const MisalignedMember &m) { return this->E == m.E; } }; /// Small set of gathered accesses to potentially misaligned members /// due to the packed attribute. SmallVector<MisalignedMember, 4> MisalignedMembers; /// Adds an expression to the set of gathered misaligned members. void AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD, CharUnits Alignment); public: /// Diagnoses the current set of gathered accesses. This typically /// happens at full expression level. The set is cleared after emitting the /// diagnostics. void DiagnoseMisalignedMembers(); /// This function checks if the expression is in the sef of potentially /// misaligned members and it is converted to some pointer type T with lower /// or equal alignment requirements. If so it removes it. This is used when /// we do not want to diagnose such misaligned access (e.g. in conversions to /// void*). void DiscardMisalignedMemberAddress(const Type *T, Expr *E); /// This function calls Action when it determines that E designates a /// misaligned member due to the packed attribute. This is used to emit /// local diagnostics like in reference binding. void RefersToMemberWithReducedAlignment( Expr *E, llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)> Action); /// Describes the reason a calling convention specification was ignored, used /// for diagnostics. enum class CallingConventionIgnoredReason { ForThisTarget = 0, VariadicFunction, ConstructorDestructor, BuiltinFunction }; }; /// RAII object that enters a new expression evaluation context. class EnterExpressionEvaluationContext { Sema &Actions; bool Entered = true; public: EnterExpressionEvaluationContext( Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext = Sema::ExpressionEvaluationContextRecord::EK_Other, bool ShouldEnter = true) : Actions(Actions), Entered(ShouldEnter) { if (Entered) Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl, ExprContext); } EnterExpressionEvaluationContext( Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Sema::ReuseLambdaContextDecl_t, Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext = Sema::ExpressionEvaluationContextRecord::EK_Other) : Actions(Actions) { Actions.PushExpressionEvaluationContext( NewContext, Sema::ReuseLambdaContextDecl, ExprContext); } enum InitListTag { InitList }; EnterExpressionEvaluationContext(Sema &Actions, InitListTag, bool ShouldEnter = true) : Actions(Actions), Entered(false) { // In C++11 onwards, narrowing checks are performed on the contents of // braced-init-lists, even when they occur within unevaluated operands. // Therefore we still need to instantiate constexpr functions used in such // a context. if (ShouldEnter && Actions.isUnevaluatedContext() && Actions.getLangOpts().CPlusPlus11) { Actions.PushExpressionEvaluationContext( Sema::ExpressionEvaluationContext::UnevaluatedList); Entered = true; } } ~EnterExpressionEvaluationContext() { if (Entered) Actions.PopExpressionEvaluationContext(); } }; DeductionFailureInfo MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK, sema::TemplateDeductionInfo &Info); /// Contains a late templated function. /// Will be parsed at the end of the translation unit, used by Sema & Parser. struct LateParsedTemplate { CachedTokens Toks; /// The template function declaration to be late parsed. Decl *D; }; } // end namespace clang namespace llvm { // Hash a FunctionDeclAndLoc by looking at both its FunctionDecl and its // SourceLocation. template <> struct DenseMapInfo<clang::Sema::FunctionDeclAndLoc> { using FunctionDeclAndLoc = clang::Sema::FunctionDeclAndLoc; using FDBaseInfo = DenseMapInfo<clang::CanonicalDeclPtr<clang::FunctionDecl>>; static FunctionDeclAndLoc getEmptyKey() { return {FDBaseInfo::getEmptyKey(), clang::SourceLocation()}; } static FunctionDeclAndLoc getTombstoneKey() { return {FDBaseInfo::getTombstoneKey(), clang::SourceLocation()}; } static unsigned getHashValue(const FunctionDeclAndLoc &FDL) { return hash_combine(FDBaseInfo::getHashValue(FDL.FD), FDL.Loc.getRawEncoding()); } static bool isEqual(const FunctionDeclAndLoc &LHS, const FunctionDeclAndLoc &RHS) { return LHS.FD == RHS.FD && LHS.Loc == RHS.Loc; } }; } // namespace llvm #endif
IF97_Region3bw.c
// Copyright Martin Lord 2014-2015. // Distributed under the Boost Software License, Version 1.0. // (See accompanying file LICENSE_1_0.txt or copy at // http://www.boost.org/LICENSE_1_0.txt) // IAPWS-IF97 Region 3 Backwards Equations: low temperature supercritical region /* ********************************************************************* * ******* VALIDITY ************ * 623.15 K <=T <= T ( p ) [B23 temperature equation] * p ( T ) [B23 temperature equation] <= p <= 100 MPa . * * * ****************************************************************** */ /* ******************************************************************** * COMPILE AND LINK INSTRUCTIONS (gcc) * * * This library uses math.h, so must have the -lm link flag * * The library is programmed to be able to use OpenMP multithreading * use the -fopenmp compile flag to enable multithreadded code * * ****************************************************************** */ #include "IF97_common.h" //PSTAR TSTAR & sqr #include "IF97_Region4.h" // saturation line used in determining subregion #include "IF97_B23.h" // not strictly required but used in v(p,t) subregion selector as an error detector #include <math.h> // for pow, log /* #ifdef _OPENMP // multithreading via libgomp # include <omp.h> #endif */ #include <stdio.h> //used for debugging only typedef struct sctR3RedCoefs { double vStar; double pStar; double tStar; int N; double a; double b; double c; double d; double e; } typR3RedCoefs; /* ********************************************************** ********* REGION 3 BACKWARDS EQUATIIONS v(p,t) ************** * * Revised Supplementary Release on Backward Equations for Specific Volume * as a Function of Pressure and Temperature v(p,T) for Region 3 of the * IAPWS Industrial Formulation 1997 for the Thermodynamic Properties * of Water and Steam. * * * http://iapws.org/relguide/Supp-VPT3-2014.pdf */ // The following sets of coefficients from table 1. const typIF97Coeffs_Jn T3AB_P_R3_COEFFS[] = { {0, 0.0} //0 i starts at 1, so 0th i is not used ,{0, 0.154793642129415e04} ,{1, -0.187661219490113e03} ,{2, 0.213144632222113e02} ,{-1, -0.191887498864292e04} ,{-2, 0.918419702359447e03} }; const typIF97Coeffs_Jn T3CD_P_R3_COEFFS[] = { {0, 0.0} //0 i starts at 1, so 0th i is not used ,{0, 0.585276966696349e03} ,{1, 0.278233532206915e01} ,{2, -0.127283549295878e-1} ,{3, 0.159090746562729e-3} }; const typIF97Coeffs_Jn T3GH_P_R3_COEFFS[] = { {0, 0.0} //0 i starts at 1, so 0th i is not used ,{0, -0.249284240900418e05} ,{1, 0.428143584791546e04} ,{2, -0.269029173140130e03} ,{3, 0.751608051114157e01} ,{4, -0.787105249910383e-1} }; const typIF97Coeffs_Jn T3IJ_P_R3_COEFFS[] = { {0, 0.0} //0 i starts at 1, so 0th i is not used ,{0, 0.584814781649163e03} ,{1, -0.616179320924617e00} ,{2, 0.260763050899562e00} ,{3, -0.587071076864459e-2} ,{4, 0.515308185433082e-4} }; const typIF97Coeffs_Jn T3JK_P_R3_COEFFS[] = { {0, 0.0} //0 i starts at 1, so 0th i is not used ,{0, 0.617229772068439e03} ,{1, -0.770600270141675e01} ,{2, 0.697072596851896e00} ,{3, -0.157391839848015e-1} ,{4, 0.137897492684194e-3} }; const typIF97Coeffs_Jn T3MN_P_R3_COEFFS[] = { {0, 0.0} //0 i starts at 1, so 0th i is not used ,{0, 0.535339483742384e03} ,{1, 0.761978122720128e01} ,{2, -0.158365725441648e00} ,{3, 0.192871054508108e-2} }; const typIF97Coeffs_Jn T3OP_P_R3_COEFFS[] = { {0, 0.0} //0 i starts at 1, so 0th i is not used ,{0, 0.969461372400213e03} ,{1, -0.332500170441278e03} ,{2, 0.642859598466067e02} ,{-1, 0.773845935768222e03} ,{-2, -0.152313732937084e04} }; const typIF97Coeffs_Jn T3QU_P_R3_COEFFS[] = { {0, 0.0} //0 i starts at 1, so 0th i is not used ,{0, 0.565603648239126e03} ,{1, 0.529062258221122e01} ,{2, -0.102020639611016e00} ,{3, 0.122240301070145e-2} }; const typIF97Coeffs_Jn T3RX_P_R3_COEFFS[] = { {0, 0.0} //0 i starts at 1, so 0th i is not used ,{0, 0.584561202520006e03} ,{1, -0.102961025163669e01} ,{2, 0.243293362700452e00} ,{3, -0.294905044740799e-2} }; const typIF97Coeffs_Jn T3UV_P_R3_COEFFS[] = { {0, 0.0}, //0 i starts at 1, so 0th i is not used {0, 0.528199646263062e3}, {1, 0.890579602135307e1}, {2, -.222814134903755}, {3, 0.286791682263697e-2} }; const typIF97Coeffs_Jn T3WX_P_R3_COEFFS[] = { {0, 0.0}, //0 i starts at 1, so 0th i is not used {0, 0.728052609145380e1}, {1, 0.973505869861952e2}, {2, 0.147370491183191e2}, {-1, 0.329196213998375e3}, {-2, 0.873371668682417e3} }; // Region 3a/3b boundary. see equation 2. critical isentrope from 25MPa to 100 MPa double if97_r3ab_p_t (double p_MPa){ int i; double dblPhiSum = 0.0; for (i=1; i <= (int)(sizeof(T3AB_P_R3_COEFFS)/sizeof(typIF97Coeffs_Jn) -1 ) ; i++) { dblPhiSum += (double)T3AB_P_R3_COEFFS[i].ni * pow( log(p_MPa), T3AB_P_R3_COEFFS[i].Ji ); } return dblPhiSum; }; // Region 3c/3d boundary. See equation 1. valid: 25 - 40 MPa double if97_r3cd_p_t (double p_MPa){ int i; double dblPhiSum = 0.0; for (i=1; i <= (int)(sizeof(T3CD_P_R3_COEFFS)/sizeof(typIF97Coeffs_Jn) -1 ) ; i++) { dblPhiSum += T3CD_P_R3_COEFFS[i].ni * pow( p_MPa, T3CD_P_R3_COEFFS[i].Ji) ; } return dblPhiSum; }; // Region 3e/3f boundary. see equation 3. valid 22.5 - 40 MPa double if97_r3ef_p_t (double p_MPa){ const double DPhiDPi = 3.727888004; return DPhiDPi * (p_MPa- 22.064) + 647.096; }; // Region 3g/3h boundary. See equation 1. Valid 22.5 - 25 MPa double if97_r3gh_p_t (double p_MPa){ int i; double dblPhiSum = 0.0; for (i=1; i <= (int)(sizeof(T3GH_P_R3_COEFFS)/sizeof(typIF97Coeffs_Jn) -1 ) ; i++) { dblPhiSum += T3GH_P_R3_COEFFS[i].ni * pow( p_MPa, T3GH_P_R3_COEFFS[i].Ji) ; } return dblPhiSum; }; // Region 3i/3j boundary. See equation 1. valid 22.5 - 25 MPa ~v= 0.0041 m3/kg double if97_r3ij_p_t (double p_MPa){ int i; double dblPhiSum = 0.0; for (i=1; i <= (int)(sizeof(T3GH_P_R3_COEFFS)/sizeof(typIF97Coeffs_Jn) -1 ) ; i++) { dblPhiSum += T3IJ_P_R3_COEFFS[i].ni * pow( p_MPa, T3IJ_P_R3_COEFFS[i].Ji) ; } return dblPhiSum; }; // Region 3j/3k boundary. See equation 1. Valid 20.5 - 25 MPa. ~ v = v"(20.5 MPa) double if97_r3jk_p_t (double p_MPa){ int i; double dblPhiSum = 0.0; for (i=1; i <= (int)(sizeof(T3JK_P_R3_COEFFS)/sizeof(typIF97Coeffs_Jn) -1 ) ; i++) { dblPhiSum += T3JK_P_R3_COEFFS[i].ni * pow( p_MPa, T3JK_P_R3_COEFFS[i].Ji) ; } return dblPhiSum; }; // Region 3m/3n boundary. See equation 1. valid: 22.5 - 23 MPa. ~v=0.0028 m3/kg double if97_r3mn_p_t (double p_MPa){ int i; double dblPhiSum = 0.0; for (i=1; i <= (int)(sizeof(T3MN_P_R3_COEFFS)/sizeof(typIF97Coeffs_Jn) -1 ) ; i++) { dblPhiSum += T3MN_P_R3_COEFFS[i].ni * pow( p_MPa, T3MN_P_R3_COEFFS[i].Ji) ; } return dblPhiSum; }; // Region 3o/3p boundary. see equation 2. valid: 22.5 - 23 MPa. ~v=0.0034 m3/kg double if97_r3op_p_t (double p_MPa){ int i; double dblPhiSum = 0.0; for (i=1; i <= (int)(sizeof(T3OP_P_R3_COEFFS)/sizeof(typIF97Coeffs_Jn) -1 ) ; i++) { dblPhiSum += T3OP_P_R3_COEFFS[i].ni * pow( log(p_MPa), T3OP_P_R3_COEFFS[i].Ji) ; } return dblPhiSum; }; // Region 3q/3u boundary. See equation 1. valid: Psat(643.15 K) - 22.5 MPa double if97_r3qu_p_t (double p_MPa){ int i; double dblPhiSum = 0.0; for (i=1; i <= (int)(sizeof(T3QU_P_R3_COEFFS)/sizeof(typIF97Coeffs_Jn) -1 ) ; i++) { dblPhiSum += T3QU_P_R3_COEFFS[i].ni * pow( p_MPa, T3QU_P_R3_COEFFS[i].Ji) ; } return dblPhiSum; }; // Region 3r/3x boundary. See equation 1. valid: Psat(643.15 K) - 22.5 MPa double if97_r3rx_p_t (double p_MPa){ int i; double dblPhiSum = 0.0; for (i=1; i <= (int)(sizeof(T3RX_P_R3_COEFFS)/sizeof(typIF97Coeffs_Jn) -1 ) ; i++) { dblPhiSum += T3RX_P_R3_COEFFS[i].ni * pow( p_MPa, T3RX_P_R3_COEFFS[i].Ji) ; } return dblPhiSum; }; /* **** REGION BOUNDARIES CLOSE TO CRITICAL REGIION ******* */ // Region 3u/3v boundary. see equation 1. double if97_r3uv_p_t (double p_MPa){ int i; double dblPhiSum = 0.0; for (i=1; i <= (int)(sizeof(T3UV_P_R3_COEFFS)/sizeof(typIF97Coeffs_Jn) -1 ) ; i++) { dblPhiSum += T3UV_P_R3_COEFFS[i].ni * pow( p_MPa, T3UV_P_R3_COEFFS[i].Ji) ; } return dblPhiSum; } // Region 3w/3x boundary. see equation 2. double if97_r3wx_p_t (double p_MPa){ int i; double dblPhiSum = 0.0; for (i=1; i <= (int)(sizeof(T3WX_P_R3_COEFFS)/sizeof(typIF97Coeffs_Jn) -1 ) ; i++) { dblPhiSum += (double)T3WX_P_R3_COEFFS[i].ni * pow( log(p_MPa), T3WX_P_R3_COEFFS[i].Ji ); } return dblPhiSum; } // Region 1=3a, 2=3b, 3=3c etc... 100 = near critical 0 = error: not region 3 // this function assumes you are already know you are in region 3 // see table 2 char if97_r3_pt_subregion(double p_MPa, double t_K){ const double P3_CD = 1.900881189173929e01; // bottom of table 2 // First, make sure we are in R3 if ((p_MPa > IF97_R3_UPRESS) || (p_MPa < IF97_B23_LPRESS)) return 0 ; // not R3 else if ((t_K < IF97_R3_LTEMP) || (IF97_B23T(p_MPa) < t_K)) return 0 ; // not R3 // now check through table 2 by pressure range if (p_MPa > 40.0) { // already checked below 100 MPa (R3_UPRESS) if (t_K > if97_r3ab_p_t(p_MPa)) return 'b'; //3b else return 'a'; //3a } else if (p_MPa > 25.0) { // already checked below or equal to 40 MPa if (if97_r3ab_p_t(p_MPa) < t_K ){ // right of T3ab line dividing 3d & 3e fig 3 if (t_K <= if97_r3ef_p_t(p_MPa)) return 'e'; //3e else return 'f'; // 3f } // now on or left of T3ab line dividing 3d & 3e fig 3 else if (t_K <= if97_r3cd_p_t(p_MPa)) return 'c'; //3c else return 'd'; // 3d } else if (p_MPa > 23.0) { // already checked below or equal to 25 MPa if (if97_r3ef_p_t(p_MPa) < t_K ){ // right of ef line fig 4 if (t_K > if97_r3jk_p_t(p_MPa)) return 'k'; //3k else if (t_K > if97_r3ij_p_t(p_MPa)) return 'j'; //3j else return 'i'; //3i } // now on or left of ef line else if (t_K <= if97_r3cd_p_t(p_MPa)) return 'c'; //3c else if (t_K <= if97_r3gh_p_t(p_MPa)){ if (p_MPa > 23.5) return 'g'; // 3g 23.5 MPa < p <= 25 MPa else return 'l'; // 3l : 23 MPa < p <= 23.5 MPa } else return 'h'; // 3h } else if (p_MPa > 22.5) { // already checked below or equal to 23.0 MPa if (if97_r3ef_p_t(p_MPa) < t_K ){ // right of ef line fig 4 if (t_K > if97_r3jk_p_t(p_MPa)) return 'k'; //3k else if (t_K > if97_r3ij_p_t(p_MPa)) return 'j'; //3j else if (t_K > if97_r3op_p_t(p_MPa)) return 'p'; //3p else return 'o'; //3o } // now on or left of ef line else if (t_K <= if97_r3cd_p_t(p_MPa)) return 'c'; //3c else if (t_K <= if97_r3gh_p_t(p_MPa)) return 'l'; // 3l else if (t_K <= if97_r3mn_p_t(p_MPa)) return 'm'; //3m else return 'n'; // 3n } // // above Psat(643.15 K) 21.0434 MPa to below or equal 22.5 MPa (already checked) See figure 5 & Figure 3 else if (p_MPa > if97_r4_ps(643.15)) { if (if97_r3rx_p_t(p_MPa) < t_K ){ // right of rx line fig 4 if (t_K > if97_r3jk_p_t(p_MPa)) return 'k'; //3k else return 'r'; //3r } else if ( t_K <= if97_r3qu_p_t(p_MPa)){ // on or left of of qu line fig 4 if (t_K <= if97_r3cd_p_t(p_MPa)) return 'c'; //3c else return 'q'; //3q } // *** now dealing with near supercritical region Fig 5, Table 10 *** //First supercritical near critical else if (22.11 < p_MPa){ // already checked below or equal to 22.6 MPa above if (if97_r3ef_p_t(p_MPa) < t_K){ // already checked <= 3rx if (if97_r3wx_p_t(p_MPa) < t_K) return 'x'; //3x (Auxiliary) else return 'w'; //3w (Auxiliary) } else if (t_K <= if97_r3uv_p_t(p_MPa)) return 'u'; // 3u (Auxiliary. Already checked above 3qu else return 'v'; //3v (Auxiliary } else if (IF97_PC < p_MPa){ // already checked less than or equal to 22.11 MPa if (if97_r3ef_p_t(p_MPa) < t_K){ // already checked <= 3rx if (if97_r3wx_p_t(p_MPa) < t_K) return 'x'; //3x (Auxiliary) else return 'z'; //3z (Auxiliary) } else if (t_K <= if97_r3uv_p_t(p_MPa)) return 'u'; // 3u (Auxiliary. Already checked above 3qu else return 'y'; //3y (Auxiliary } // now subcritical near critical else if (t_K <= if97_r4_ts(p_MPa)){ // water phase near critical if (p_MPa <= 2.193161551e1) return 'u'; // 3u /Auxiliary // see note e of table 10 // already checked above Psat(643.15K) else if (if97_r3uv_p_t(p_MPa) < t_K) return 'y'; // 3y (Auxiliary) // already dealt with 3q above else return 'u'; // 3u (Auxiliary) } // already checked that is is either steam or saturated, near critical if (p_MPa <= 2.190096265e1 ) return 'x'; // 3x /Auxiliary // see note f of table 10 // already checked above Psat(643.15K) else if (if97_r3wx_p_t(p_MPa) < t_K) return 'x'; // 3x (Auxiliary) else return 'z'; //3z (Auxiliary) } // back to normal regions // see figure 3. Now all below Psat(643.15 K) = 21.0434 MPa else if (p_MPa > 20.5) { // above 20.5 to below or equal Psat(643.15 K) if (if97_r4_ts(p_MPa) <= t_K ){ // on or right of saturation line fig 3 if (t_K > if97_r3jk_p_t(p_MPa)) return 'k'; //3k else return 'r'; //3r } // left of saturation line fig 3 else if (t_K <= if97_r3cd_p_t(p_MPa)) return 'c'; //3c else return 'q'; //3q } else if (p_MPa > P3_CD) { // above P3cd to below or equal 20.5 MPa if (if97_r4_ts(p_MPa) <= t_K ) return 't'; //3t : on or right of saturation line fig 3 // left of saturation line fig 3 else if (t_K <= if97_r3cd_p_t(p_MPa)) return 'c'; //3c else return 's'; //3s } else if (p_MPa > if97_r4_ps(IF97_R3_LTEMP)) { // above Psat(623.15 K) to below or equal P3cd if (if97_r4_ts(p_MPa) <= t_K ) return 't'; //3t : on or right of saturation line fig 3 // left of saturation line fig 3 else return 'c'; //3c } else return 0 ; // this should never execute } // checks if the region is handled by lower accuracy auxiliary equations. // For best accuracy use the aux equation result as a starting iteration using the main equation bool isNearCritical (double p_MPa, double t_K){ if (((p_MPa <= 22.5) && (if97_r4_ps(643.15) < p_MPa)) && ((if97_r3qu_p_t(p_MPa) < t_K) && (t_K <= if97_r3rx_p_t(p_MPa)))) return true; else return false; } // The following sets of coefficients from Appendix A1 . // specific volume (m3/kg) in region 3a for a given temperature (K) and pressure (MPa) // equation 4 double if97_r3a_v_pt (double p_MPa, double t_K){ const typR3RedCoefs V3A_PT_RC = { 0.0024, 100.0, 760.0, 30, 0.085, 0.817, 1.0, 1.0, 1.0 }; const typIF97Coeffs_IJn V3A_PT_COEFFS[] = { {0, 0, 0.0} //0 i starts at 1, so 0th i is not used ,{-12, 5, 0.110879558823853e-2} ,{-12, 10, 0.572616740810616e03} ,{-12, 12, -0.767051948380852e05} ,{-10, 5, -0.253321069529674e-1} ,{-10, 10, 0.628008049345689e04} //5 ,{-10, 12, 0.234105654131876e06} ,{-8, 5, 0.216867826045856e00} ,{-8, 8, -0.156237904341963e03} ,{-8, 10, -0.269893956176613e05} ,{-6, 1, -0.180407100085505e-3} //10 ,{-5, 1, 0.116732227668261e-2} ,{-5, 5, 0.266987040856040e02} ,{-5, 10, 0.282776617243286e05} ,{-4, 8, -0.242431520029523e04} ,{-3, 0, 0.435217323022733e-3} //15 ,{-3, 1, -0.122494831387441e-1} ,{-3, 3, 0.179357604019989e01} ,{-3, 6, 0.442729521058314e02} ,{-2, 0, -0.593223489018342e-2} ,{-2, 2, 0.453186261685774e00} //20 ,{-2, 3, 0.135825703129140e01} ,{-1, 0, 0.408748415856745e-1} ,{-1, 1, 0.474686397863312e00} ,{-1, 2, 0.118646814997915e01} ,{0, 0, 0.546987265727549e00} //25 ,{0, 1, 0.195266770452643e00} ,{1, 0, -0.502268790869663e-1} ,{1, 2, -0.369645308193377e00} ,{2, 0, 0.633828037528420e-2} ,{2, 2, 0.797441793901017e-1} //30 }; int i = 1; double omegasum = 0; double pi = p_MPa / V3A_PT_RC.pStar; double theta = t_K / V3A_PT_RC.tStar; #pragma omp parallel for reduction (+:omegasum) for (i = 1; i <= V3A_PT_RC.N; i++) { omegasum += V3A_PT_COEFFS[i].ni * pow(pow((pi - V3A_PT_RC.a), V3A_PT_RC.c) , V3A_PT_COEFFS[i].Ii) * pow(pow((theta - V3A_PT_RC.b), V3A_PT_RC.d ), V3A_PT_COEFFS[i].Ji); } return V3A_PT_RC.vStar * pow(omegasum , V3A_PT_RC.e ); } // specific volume (m3/kg) in region 3b for a given temperature (K) and pressure (MPa) // equation 4 double if97_r3b_v_pt (double p_MPa, double t_K){ const typR3RedCoefs V3B_PT_RC = { 0.0041, 100.0, 860.0, 32, 0.280, 0.779, 1, 1, 1 }; const typIF97Coeffs_IJn V3B_PT_COEFFS[] = { {0, 0, 0.0} //0 i starts at 1, so 0th i is not used ,{-12, 10, -0.827670470003621e-1} ,{-12, 12, 0.416887126010565e02} ,{-10, 8, 0.483651982197059e-1} ,{-10, 14, -0.291032084950276e05} ,{-8, 8, -0.111422582236948e03} //5 ,{-6, 5, -0.202300083904014e-1} ,{-6, 6, 0.294002509338515e03} ,{-6, 8, 0.140244997609658e03} ,{-5, 5, -0.344384158811459e03} ,{-5, 8, 0.361182452612149e03} //10 ,{-5, 10, -0.140699677420738e04} ,{-4, 2, -0.202023902676481e-2} ,{-4, 4, 0.171346792457471e03} ,{-4, 5, -0.425597804058632e01} ,{-3, 0, 0.691346085000334e-5} //15 ,{-3, 1, 0.151140509678925e-2} ,{-3, 2, -0.416375290166236e-1} ,{-3, 3, -0.413754957011042e02} ,{-3, 5, -0.506673295721637e02} ,{-2, 0, -0.572212965569023e-3} //20 ,{-2, 2, 0.608817368401785e01} ,{-2, 5, 0.239600660256161e02} ,{-1, 0, 0.122261479925384e-1} ,{-1, 2, 0.216356057692938e01} ,{0, 0, 0.398198903368642e00} //25 ,{0, 1, -0.116892827834085e00} ,{1, 0, -0.102845919373532e00} ,{1, 2, -0.492676637589284e00} ,{2, 0, 0.655540456406790e-1} ,{3, 2, -0.240462535078530e00} //30 ,{4, 0, -0.269798180310075e-1} ,{4, 1, 0.128369435967012e00} //32 }; int i = 1; double omegasum = 0; double pi = p_MPa / V3B_PT_RC.pStar; double theta = t_K / V3B_PT_RC.tStar; #pragma omp parallel for reduction (+:omegasum) for (i = 1; i <= V3B_PT_RC.N; i++) { omegasum += V3B_PT_COEFFS[i].ni * pow(pow((pi - V3B_PT_RC.a), V3B_PT_RC.c) , V3B_PT_COEFFS[i].Ii) * pow(pow((theta - V3B_PT_RC.b), V3B_PT_RC.d ), V3B_PT_COEFFS[i].Ji); } return V3B_PT_RC.vStar * pow(omegasum , V3B_PT_RC.e ); } // specific volume (m3/kg) in region 3c for a given temperature (K) and pressure (MPa) // equation 4 double if97_r3c_v_pt (double p_MPa, double t_K){ const typR3RedCoefs V3C_PT_RC = { 0.0022, 40.0, 690.0, 35, 0.259, 0.903, 1.0, 1.0, 1.0 }; typIF97Coeffs_IJn V3C_PT_COEFFS[] = { {0, 0, 0.0} //0 i starts at 1, so 0th i is not used ,{-12, 6, 0.311967788763030e01} ,{-12, 8, 0.276713458847564e05} ,{-12, 10, 0.322583103403269e08} ,{-10, 6, -0.342416065095363e03} ,{-10, 8, -0.899732529907377e06} //5 ,{-10, 10, -0.793892049821251e08} ,{-8, 5, 0.953193003217388e02} ,{-8, 6, 0.229784742345072e04} ,{-8, 7, 0.175336675322499e06} ,{-6, 8, 0.791214365222792e07}//10 ,{-5, 1, 0.319933345844209e-4} ,{-5, 4, -0.659508863555767e02} ,{-5, 7, -0.833426563212851e06} ,{-4, 2, 0.645734680583292e-1} ,{-4, 8, -0.382031020570813e07} //15 ,{-3, 0, 0.406398848470079e-4} ,{-3, 3, 0.310327498492008e02} ,{-2, 0, -0.892996718483724e-3} ,{-2, 4, 0.234604891591616e03} ,{-2, 5, 0.377515668966951e04} //20 ,{-1, 0, 0.158646812591361e-1} ,{-1, 1, 0.707906336241843e00} ,{-1, 2, 0.126016225146570e02} ,{0, 0, 0.736143655772152e00} ,{0, 1, 0.676544268999101e00} //25 ,{0, 2, -0.178100588189137e02} ,{1, 0, -0.156531975531713e00} ,{1, 2, 0.117707430048158e02} ,{2, 0, 0.840143653860447e-1} ,{2, 1, -0.186442467471949e00} //30 ,{2, 3, -0.440170203949645e02} ,{2, 7, 0.123290423502494e07} ,{3, 0, -0.240650039730845e-1} ,{3, 7, -0.107077716660869e07} ,{8, 1, 0.438319858566475e-1} //35 }; int i = 1; double omegasum = 0; double pi = p_MPa / V3C_PT_RC.pStar; double theta = t_K / V3C_PT_RC.tStar; #pragma omp parallel for reduction (+:omegasum) for (i = 1; i <= V3C_PT_RC.N; i++) { omegasum += V3C_PT_COEFFS[i].ni * pow(pow((pi - V3C_PT_RC.a), V3C_PT_RC.c) , V3C_PT_COEFFS[i].Ii) * pow(pow((theta - V3C_PT_RC.b), V3C_PT_RC.d ), V3C_PT_COEFFS[i].Ji); } return V3C_PT_RC.vStar * pow(omegasum , V3C_PT_RC.e ); } // specific volume (m3/kg) in region 3d for a given temperature (K) and pressure (MPa) // equation 4 double if97_r3d_v_pt (double p_MPa, double t_K){ const typR3RedCoefs V3D_PT_RC = { 0.0029, 40.0, 690.0, 38, 0.559, 0.939, 1.0, 1.0, 4.0 }; const typIF97Coeffs_IJn V3D_PT_COEFFS[] = { {0, 0, 0.0} //0 i starts at 1, so 0th i is not used ,{-12, 4, -0.452484847171645e-9} ,{-12, 6, 0.315210389538801e-4} ,{-12, 7, -0.214991352047545e-2} ,{-12, 10, 0.508058874808345e03} ,{-12, 12, -0.127123036845932e08} //5 ,{-12, 16, 0.115371133120497e13} ,{-10, 0, -0.197805728776273e-15} ,{-10, 2, 0.241554806033972e-10} ,{-10, 4, -0.156481703640525e-5} ,{-10, 6, 0.277211346836625e-2} //10 ,{-10, 8, -0.203578994462286e2} ,{-10, 10, 0.144369489909053e7} ,{-10, 14, -0.411254217946539e11} ,{-8, 3, 0.623449786243773e-5} ,{-8, 7, -0.221774281146038e2} //15 ,{-8, 8, -0.689315087933158e5} ,{-8, 10, -0.195419525060713e8} ,{-6, 6, 0.316373510564015e4} ,{-6, 8, 0.224040754426988e7} ,{-5, 1, -0.436701347922356e-5} //20 ,{-5, 2, -0.404213852833996e-3} ,{-5, 5, -0.348153203414663e3} ,{-5, 7, -0.385294213555289e6} ,{-4, 0, 0.135203700099403e-6} ,{-4, 1, 0.134648383271089e-3} //25 ,{-4, 7, 0.125031835351736e6} ,{-3, 2, 0.968123678455841e-1} ,{-3, 4, 0.225660517512438e3} ,{-2, 0, -0.190102435341872e-3} ,{-2, 1, -0.299628410819229e-1} //30 ,{-1, 0, 0.500833915372121e-2} ,{-1, 1, 0.387842482998411} ,{-1, 5, -0.138535367777182e4} ,{0, 0, 0.870745245971773} ,{0, 2, 0.171946252068742e1} //35 ,{1, 0, -0.326650121426383e-1} ,{1, 6, 0.498044171727877e4} ,{3, 0, 0.551478022765087e-2} //38*/ }; int i = 1; double omegasum = 0; double pi = p_MPa / V3D_PT_RC.pStar; double theta = t_K / V3D_PT_RC.tStar; #pragma omp parallel for reduction (+:omegasum) for (i = 1; i <= V3D_PT_RC.N; i++) { omegasum += V3D_PT_COEFFS[i].ni * pow(pow((pi - V3D_PT_RC.a), V3D_PT_RC.c) , V3D_PT_COEFFS[i].Ii) * pow(pow((theta - V3D_PT_RC.b), V3D_PT_RC.d ), V3D_PT_COEFFS[i].Ji); } return V3D_PT_RC.vStar * pow(omegasum , V3D_PT_RC.e ); } // specific volume (m3/kg) in region 3e for a given temperature (K) and pressure (MPa) // equation 4 double if97_r3e_v_pt (double p_MPa, double t_K){ const typR3RedCoefs V3E_PT_RC = { 0.0032, 40.0, 710.0, 29, 0.587, 0.918, 1.0, 1.0, 1.0 }; const typIF97Coeffs_IJn V3E_PT_COEFFS[] = { {0, 0, 0.0} //0 i starts at 1, so 0th i is not used ,{-12, 14, 0.715815808404721e09} ,{-12, 16, -0.114328360753449e12} ,{-10, 3, 0.376531002015720e-11} ,{-10, 6, -0.903983668691157e-4} ,{-10, 10, 0.665695908836252e06} // 5 ,{-10, 14, 0.535364174960127e10} ,{-10, 16, 0.794977402335603e11} ,{-8, 7, 0.922230563421437e02} ,{-8, 8, -0.142586073991215e06} ,{-8, 10, -0.111796381424162e07} //10 ,{-6, 6, 0.896121629640760e04} ,{-5, 6, -0.669989239070491e04} ,{-4, 2, 0.451242538486834e-2} ,{-4, 4, -0.339731325977713e02} ,{-3, 2, -0.120523111552278e01} //15 ,{-3, 6, 0.475992667717124e05} ,{-3, 7, -0.266627750390341e06} ,{-2, 0, -0.153314954386524e-3} ,{-2, 1, 0.305638404828265e00} ,{-2, 3, 0.123654999499486e03} //20 ,{-2, 4, -0.104390794213011e04} ,{-1, 0, -0.157496516174308e-1} ,{0, 0, 0.685331118940253e00} ,{0, 1, 0.178373462873903e01} ,{1, 0, -0.544674124878910e00} //25 ,{1, 4, 0.204529931318843e04} ,{1, 6, -0.228342359328752e05} ,{2, 0, 0.413197481515899e00} ,{2, 2, -0.3419311835910405e02} //29 }; int i = 1; double omegasum = 0; double pi = p_MPa / V3E_PT_RC.pStar; double theta = t_K / V3E_PT_RC.tStar; #pragma omp parallel for reduction (+:omegasum) for (i = 1; i <= V3E_PT_RC.N; i++) { omegasum += V3E_PT_COEFFS[i].ni * pow(pow((pi - V3E_PT_RC.a), V3E_PT_RC.c) , V3E_PT_COEFFS[i].Ii) * pow(pow((theta - V3E_PT_RC.b), V3E_PT_RC.d ), V3E_PT_COEFFS[i].Ji); } return V3E_PT_RC.vStar * pow(omegasum , V3E_PT_RC.e ); } // specific volume (m3/kg) in region 3f for a given temperature (K) and pressure (MPa) // equation 4 double if97_r3f_v_pt (double p_MPa, double t_K){ const typR3RedCoefs V3F_PT_RC = { 0.0064, 40.0, 730.0, 42, 0.587, 0.891, 0.5, 1.0, 4.0 }; const typIF97Coeffs_IJn V3F_PT_COEFFS[] = { {0, 0, 0.0} //0 i starts at 1, so 0th i is not used ,{0, -3, -0.251756547792325e-7} ,{0, -2, 0.601307193668763e-5} ,{0, -1, -0.100615977450049e-2} ,{0, 0, 0.999969140252192e00} ,{0, 1, 0.214107759236486e01} //5 ,{0, 2, -0.165175571959086e02} ,{1, -1, -0.141987303638727e-2} ,{1, 1, 0.269251915156554e01} ,{1, 2, 0.349741815858722e02} ,{1, 3, -0.300208695771783e02} //10 ,{2, 0, -0.131546288252539e01} ,{2, 1, -0.839091277286169e01} ,{3, -5, 0.181545608337015e-9} ,{3, -2, -0.591099206478909e-3} ,{3, 0, 0.152115067087106e01} //15 ,{4, -3, 0.252956470663225e-4} ,{5, -8, 0.100726265203786e-14} ,{5, 1, -0.149774533860650e01} ,{6, -6, -0.793940970562969e-9} ,{7, -4, -0.150290891264717e-3} //20 ,{7, 1, 0.151205531275133e01} ,{10, -6, 0.470942606221652e-5} ,{12, -10, 0.195049710391712e-12} ,{12, -8, -0.911627886266077e-8} ,{12, -4, 0.604374640201265e-3} //25 ,{14, -12, -0.225132933900136e-15} ,{14, -10, 0.610916973582981e-11} ,{14, -8, -0.303063908043404e-6} ,{14, -6, -0.137796070798409e-4} ,{14, -4, -0.919296736666106e-3} //30 ,{16, -10, 0.639288223132545e-9} ,{16, -8, 0.753259479898699e-6} ,{18, -12, -0.400321478682929e-12} ,{18, -10, 0.756140294351614e-8} ,{20, -12, -0.912082054034891e-11} //35 ,{20, -10, -0.237612381140539e-7} ,{20, -6, 0.269586010591874e-4} ,{22, -12, -0.732828135157839e-10} ,{24, -12, 0.241995578306660e-9} ,{24, -4, -0.405735532730322e-3} //40 ,{28, -12, 0.189424143498011e-9} ,{32, -12, -0.486632965074563e-9} //42 }; int i = 1; double omegasum = 0; double pi = p_MPa / V3F_PT_RC.pStar; double theta = t_K / V3F_PT_RC.tStar; #pragma omp parallel for reduction (+:omegasum) for (i = 1; i <= V3F_PT_RC.N; i++) { omegasum += V3F_PT_COEFFS[i].ni * pow(pow((pi - V3F_PT_RC.a), V3F_PT_RC.c) , V3F_PT_COEFFS[i].Ii) * pow(pow((theta - V3F_PT_RC.b), V3F_PT_RC.d ), V3F_PT_COEFFS[i].Ji); } return V3F_PT_RC.vStar * pow(omegasum , V3F_PT_RC.e ); } // specific volume (m3/kg) in region 3g for a given temperature (K) and pressure (MPa) // equation 4 double if97_r3g_v_pt (double p_MPa, double t_K){ const typR3RedCoefs V3G_PT_RC = { 0.0027, 25.0, 660.0, 38, 0.872, 0.971, 1.0, 1.0, 4.0 }; const typIF97Coeffs_IJn V3G_PT_COEFFS[] = { {0, 0, 0.0} //0 i starts at 1, so 0th i is not used ,{-12, 7, 0.412209020652996e-4} ,{-12, 12, -0.114987238280587e07} ,{-12, 14, 0.948180885032080e10} ,{-12, 18, -0.195788865718971e18} ,{-12, 22, 0.496250704871300e25} //5 ,{-12, 24, -0.105549884548496e29} ,{-10, 14, -0.758642165988278e12} ,{-10, 20, -0.922172769596101e23} ,{-10, 24, 0.725379072059348e30} ,{-8, 7, -0.617718249205859e02} //10 ,{-8, 8, 0.107555033344858e05} ,{-8, 10, -0.379545802336487e08} ,{-8, 12, 0.228646846221831e12} ,{-6, 8, -0.499741093010619e07} ,{-6, 22, -0.280214310054101e31} //15 ,{-5, 7, 0.104915406769586e07} ,{-5, 20, 0.613754229168619e28} ,{-4, 22, 0.802056715528387e32} ,{-3, 7, -0.298617819828065e08} ,{-2, 3, -0.910782540134681e02} //20 ,{-2, 5, 0.135033227281565e06} ,{-2, 14, -0.712949383408211e19} ,{-2, 24, -0.104578785289542e37} ,{-1, 2, 0.304331584444093e02} ,{-1, 8, 0.593250797959445e10} //25 ,{-1, 18, -0.364174062110798e28} ,{0, 0, 0.921791403532461e00} ,{0, 1, -0.337693609657471e00} ,{0, 2, -0.724644143758508e02} ,{1, 0, -0.110480239272601e00} //30 ,{1, 1, 0.536516031875059e01} ,{1, 3, -0.291441872156205e04} ,{3, 24, 0.616338176535305e40} ,{5, 22, -0.120889175861180e39} ,{6, 12, 0.818396024524612e23} //35 ,{8, 3, 0.940781944835829e09} ,{10, 0, -0.367279669545448e05} ,{10, 6, -0.837513931798655e16} //38 }; int i = 1; double omegasum = 0; double pi = p_MPa / V3G_PT_RC.pStar; double theta = t_K / V3G_PT_RC.tStar; #pragma omp parallel for reduction (+:omegasum) for (i = 1; i <= V3G_PT_RC.N; i++) { omegasum += V3G_PT_COEFFS[i].ni * pow(pow((pi - V3G_PT_RC.a), V3G_PT_RC.c) , V3G_PT_COEFFS[i].Ii) * pow(pow((theta - V3G_PT_RC.b), V3G_PT_RC.d ), V3G_PT_COEFFS[i].Ji); } return V3G_PT_RC.vStar * pow(omegasum , V3G_PT_RC.e ); } // specific volume (m3/kg) in region 3h for a given temperature (K) and pressure (MPa) // equation 4 double if97_r3h_v_pt (double p_MPa, double t_K){ const typR3RedCoefs V3H_PT_RC = { 0.0032, 25.0, 660.0, 29, 0.898, 0.983, 1.0, 1.0, 4.0 }; //From Hummeling if97 Java // http://sourceforge.net/p/if97/git/ci/master/tree/src/main/java/com/hummeling/if97/Region3.java#l1801 const typIF97Coeffs_IJn V3H_PT_COEFFS[] = { {0, 0, 0.0}, //0 i starts at 1, so 0th i is not used {-12, 8, .561379678887577e-1}, {-12, 12, .774135421587083e10}, {-10, 4, .111482975877938e-8}, {-10, 6, -.143987128208183e-2}, {-10, 8, .193696558764920e4}, {-10, 10, -.605971823585005e9}, {-10, 14, .171951568124337e14}, {-10, 16, -.185461154985145e17}, {-8, 0, .387851168078010e-16}, {-8, 1, -.395464327846105e-13}, {-8, 6, -.170875935679023e3}, {-8, 7, -.212010620701220e4}, {-8, 8, .177683337348191e8}, {-6, 4, .110177443629575e2}, {-6, 6, -.234396091693313e6}, {-6, 8, -.656174421999594e7}, {-5, 2, .156362212977396e-4}, {-5, 3, -.212946257021400e1}, {-5, 4, .135249306374858e2}, {-4, 2, .177189164145813}, {-4, 4, .139499167345464e4}, {-3, 1, -.703670932036388e-2}, {-3, 2, -.152011044389648}, {-2, 0, .981916922991113e-4}, {-1, 0, .147199658618076e-2}, {-1, 2, .202618487025578e2}, {0, 0, .899345518944240}, {1, 0, -.211346402240858}, {1, 2, .249971752957491e2} }; int i = 1; double omegasum = 0; double pi = p_MPa / V3H_PT_RC.pStar; double theta = t_K / V3H_PT_RC.tStar; #pragma omp parallel for reduction (+:omegasum) for (i = 1; i <= V3H_PT_RC.N; i++) { omegasum += V3H_PT_COEFFS[i].ni * pow(pow((pi - V3H_PT_RC.a), V3H_PT_RC.c) , V3H_PT_COEFFS[i].Ii) * pow(pow((theta - V3H_PT_RC.b), V3H_PT_RC.d ), V3H_PT_COEFFS[i].Ji); } return V3H_PT_RC.vStar * pow(omegasum , V3H_PT_RC.e ); } // specific volume (m3/kg) in region 3i for a given temperature (K) and pressure (MPa) // equation 4 double if97_r3i_v_pt (double p_MPa, double t_K){ const typR3RedCoefs V3I_PT_RC = { 0.0041, 25.0, 660.0, 42, 0.910, 0.984, 0.5, 1.0, 4.0 }; const typIF97Coeffs_IJn V3I_PT_COEFFS[] = { {0, 0, 0.0}, //0 i starts at 1, so 0th i is not used {0, 0, .106905684359136e1}, {0, 1, -.148620857922333e1}, {0, 10, .259862256980408e15}, {1, -4, -.446352055678749e-11}, {1, -2, -.566620757170032e-6}, {1, -1, -.235302885736849e-2}, {1, 0, -.269226321968839}, {2, 0, .922024992944392e1}, {3, -5, .357633505503772e-11}, {3, 0, -.173942565562222e2}, {4, -3, .700681785556229e-5}, {4, -2, -.267050351075768e-3}, {4, -1, -.231779669675624e1}, {5, -6, -.753533046979752e-12}, {5, -1, .481337131452891e1}, {5, 12, -.223286270422356e22}, {7, -4, -.118746004987383e-4}, {7, -3, .646412934136496e-2}, {8, -6, -.410588536330937e-9}, {8, 10, .422739537057241e20}, {10, -8, .313698180473812e-12}, {12, -12, .164395334345040e-23}, {12, -6, -.339823323754373e-5}, {12, -4, -.135268639905021e-1}, {14, -10, -.723252514211625e-14}, {14, -8, .184386437538366e-8}, {14, -4, -.463959533752385e-1}, {14, 5, -.992263100376750e14}, {18, -12, .688169154439335e-16}, {18, -10, -.222620998452197e-10}, {18, -8, -.540843018624083e-7}, {18, -6, .345570606200257e-2}, {18, 2, .422275800304086e11}, {20, -12, -.126974478770487e-14}, {20, -10, .927237985153679e-9}, {22, -12, .612670812016489e-13}, {24, -12, -.722693924063497e-11}, {24, -8, -.383669502636822e-3}, {32, -10, .374684572410204e-3}, {32, -5, -.931976897511086e5}, {36, -10, -.247690616026922e-1}, {36, -8, .658110546759474e2} //42 }; int i = 1; double omegasum = 0; double pi = p_MPa / V3I_PT_RC.pStar; double theta = t_K / V3I_PT_RC.tStar; #pragma omp parallel for reduction (+:omegasum) for (i = 1; i <= V3I_PT_RC.N; i++) { omegasum += V3I_PT_COEFFS[i].ni * pow(pow((pi - V3I_PT_RC.a), V3I_PT_RC.c) , V3I_PT_COEFFS[i].Ii) * pow(pow((theta - V3I_PT_RC.b), V3I_PT_RC.d ), V3I_PT_COEFFS[i].Ji); } return V3I_PT_RC.vStar * pow(omegasum , V3I_PT_RC.e ); } // specific volume (m3/kg) in region 3j for a given temperature (K) and pressure (MPa) // equation 4 double if97_r3j_v_pt (double p_MPa, double t_K){ const typR3RedCoefs V3J_PT_RC = { 0.0054, 25.0, 670.0, 29, 0.875, 0.964, 0.5, 1.0, 4.0 }; const typIF97Coeffs_IJn V3J_PT_COEFFS[] = { {0, 0, 0.0} //0 i starts at 1, so 0th i is not used ,{0, -1, -0.111371317395540e-3} ,{0, 0, 0.100342892423685e01} ,{0, 1, 0.530615581928979e01} ,{1, -2, 0.179058760078792e-5} ,{1, -1, -0.728541958464774e-3} //5 ,{1, 1, -0.187576133371704e02} ,{2, -1, 0.199060874071849e-2} ,{2, 1, 0.243574755377290e02} ,{3, -2, -0.177040785499444e-3} ,{4, -2, -0.259680385227130e-2} //10 ,{4, 2, -0.198704578406823e03} ,{5, -3, 0.738627790224287e-4} ,{5, -2, -0.236264692844138e-2} ,{5, 0, -0.161023121314333e01} ,{6, 3, 0.622322971786473e04} //15 ,{10, -6, -0.960754116701669e-8} ,{12, -8, -0.510572269720488e-10} ,{12, -3, 0.767373781404211e-2} ,{14, -10, 0.663855469485254e-14} ,{14, -8, -0.717590735526745e-9} //20 ,{14, -5, 0.146564542926508e-4} ,{16, -10, 0.309029474277013e-11} ,{18, -12, -0.464216300971708e-15} ,{20, -12, -0.390499637961161e-13} ,{20, -10, -0.236716126781431e-9} //25 ,{24, -12, 0.454652854268717e-11} ,{24, -6, -0.422271787482497e-2} ,{28, -12, 0.283911742354706e-10} ,{28, -5, 0.270929002720228e01} //29 }; int i = 1; double omegasum = 0; double pi = p_MPa / V3J_PT_RC.pStar; double theta = t_K / V3J_PT_RC.tStar; #pragma omp parallel for reduction (+:omegasum) for (i = 1; i <= V3J_PT_RC.N; i++) { omegasum += V3J_PT_COEFFS[i].ni * pow(pow((pi - V3J_PT_RC.a), V3J_PT_RC.c) , V3J_PT_COEFFS[i].Ii) * pow(pow((theta - V3J_PT_RC.b), V3J_PT_RC.d ), V3J_PT_COEFFS[i].Ji); } return V3J_PT_RC.vStar * pow(omegasum , V3J_PT_RC.e ); } // specific volume (m3/kg) in region 3k for a given temperature (K) and pressure (MPa) // equation 4 double if97_r3k_v_pt (double p_MPa, double t_K){ const typR3RedCoefs V3K_PT_RC = { 0.0077, 25.0, 680.0, 34, 0.802, 0.935, 1.0, 1.0, 1.0 }; const typIF97Coeffs_IJn V3K_PT_COEFFS[] = { {0, 0, 0.0}, //0 i starts at 1, so 0th i is not used {-2, 10, -.401215699576099e9}, {-2, 12, .484501478318406e11}, {-1, -5, .394721471363678e-14}, {-1, 6, .372629967374147e5}, {0, -12, -.369794374168666e-29}, //5 {0, -6, -.380436407012452e-14}, {0, -2, .475361629970233e-6}, {0, -1, -.879148916140706e-3}, {0, 0, .844317863844331}, {0, 1, .122433162656600e2}, //10 {0, 2, -.104529634830279e3}, {0, 3, .589702771277429e3}, {0, 14, -.291026851164444e14}, {1, -3, .170343072841850e-5}, {1, -2, -.277617606975748e-3}, //15 {1, 0, -.344709605486686e1}, {1, 1, .221333862447095e2}, {1, 2, -.194646110037079e3}, {2, -8, .808354639772825e-15}, {2, -6, -.180845209145470e-10}, //20 {2, -3, -.696664158132412e-5}, {2, -2, -.181057560300994e-2}, {2, 0, .255830298579027e1}, {2, 4, .328913873658481e4}, {5, -12, -.173270241249904e-18}, //25 {5, -6, -.661876792558034e-6}, {5, -3, -.395688923421250e-2}, {6, -12, .604203299819132e-17}, {6, -10, -.400879935920517e-13}, {6, -8, .160751107464958e-8}, //30 {6, -5, .383719409025556e-4}, {8, -12, -.649565446702457e-14}, {10, -12, -.149095328506000e-11}, {12, -10, .541449377329581e-8} //34 }; int i = 1; double omegasum = 0; double pi = p_MPa / V3K_PT_RC.pStar; double theta = t_K / V3K_PT_RC.tStar; #pragma omp parallel for reduction (+:omegasum) for (i = 1; i <= V3K_PT_RC.N; i++) { omegasum += V3K_PT_COEFFS[i].ni * pow(pow((pi - V3K_PT_RC.a), V3K_PT_RC.c) , V3K_PT_COEFFS[i].Ii) * pow(pow((theta - V3K_PT_RC.b), V3K_PT_RC.d ), V3K_PT_COEFFS[i].Ji); } return V3K_PT_RC.vStar * pow(omegasum , V3K_PT_RC.e ); } // specific volume (m3/kg) in region 3l for a given temperature (K) and pressure (MPa) // equation 4 double if97_r3l_v_pt (double p_MPa, double t_K){ const typR3RedCoefs V3L_PT_RC = { 0.0026, 24.0, 650.0, 43, 0.908, 0.989, 1.0, 1.0, 4.0 }; const typIF97Coeffs_IJn V3L_PT_COEFFS[] = { {0, 0, 0.0} //0 i starts at 1, so 0th i is not used ,{-12, 14, 0.260702058647537e10} ,{-12, 16, -0.188277213604704e15} ,{-12, 18, 0.554923870289667e19} ,{-12, 20, -0.758966946387758e23} ,{-12, 22, 0.413865186848908e27} //5 ,{-10, 14, -0.815038000738060e12} ,{-10, 24, -0.381458260489955e33} ,{-8, 6, -0.123239564600519e-1} ,{-8, 10, 0.226095631437174e08} ,{-8, 12, -0.495017809506720e12} //10 ,{-8, 14, 0.529482996422863e16} ,{-8, 18, -0.444359478746295e23} ,{-8, 24, 0.521635864527315e35} ,{-8, 36, -0.487095672740742e55} ,{-6, 8, -0.714430209937547e06} //15 ,{-5, 4, 0.127868634615495e00} ,{-5, 5, -0.100752127917598e02} ,{-4, 7, 0.777451437960990e07} ,{-4, 16, -0.108105480796471e25} ,{-3, 1, -0.357578581169659e-5} //20 ,{-3, 3, -0.212857169423484e01} ,{-3, 18, 0.270706111085238e30} ,{-3, 20, -0.695953622348829e33} ,{-2, 2, 0.110609027472280e00} ,{-2, 3, 0.721559163361354e02} //25 ,{-2, 10, -0.306367307532219e15} ,{-1, 0, 0.265839618885530e-4} ,{-1, 1, 0.253392392889754e-1} ,{-1, 3, -0.214443041836579e03} ,{0, 0, 0.937846601489667e00} //30 ,{0, 1, 0.223184043101700e01} ,{0, 2, 0.338401222509191e02} ,{0, 12, 0.494237237179718e21} ,{1, 0, -0.198068404154028e00} ,{1, 16, -0.141415349881140e31} //35 ,{2, 1, -0.993862421613651e02} ,{4, 0, 0.125070534142731e03} ,{5, 0, -0.996473529004439e03} ,{5, 1, 0.473137909872765e05} ,{6, 14, 0.116662121219322e33} //40 ,{10, 4, -0.315874976271533e16} ,{10, 12, -0.445703369196945e33} ,{14, 10, 0.642794932373694e33} //43 }; int i = 1; double omegasum = 0; double pi = p_MPa / V3L_PT_RC.pStar; double theta = t_K / V3L_PT_RC.tStar; #pragma omp parallel for reduction (+:omegasum) for (i = 1; i <= V3L_PT_RC.N; i++) { omegasum += V3L_PT_COEFFS[i].ni * pow(pow((pi - V3L_PT_RC.a), V3L_PT_RC.c) , V3L_PT_COEFFS[i].Ii) * pow(pow((theta - V3L_PT_RC.b), V3L_PT_RC.d ), V3L_PT_COEFFS[i].Ji); } return V3L_PT_RC.vStar * pow(omegasum , V3L_PT_RC.e ); } // specific volume (m3/kg) in region 3m for a given temperature (K) and pressure (MPa) // equation 4 double if97_r3m_v_pt (double p_MPa, double t_K){ const typR3RedCoefs V3M_PT_RC = { 0.0028, 23.0, 650.0, 40, 1.0, 0.997, 1.0, 0.25, 1.0 }; const typIF97Coeffs_IJn V3M_PT_COEFFS[] = { {0, 0, 0.0} //0 i starts at 1, so 0th i is not used ,{0, 0, 0.811384363481847e00} ,{3, 0, -0.568199310990094e04} ,{8, 0, -0.178657198172556e11} ,{20, 2, 0.795537657613427e32} ,{1, 5, -0.814568209346872e05} //5 ,{3, 5, -0.659774567602874e08} ,{4, 5, -0.152861148659302e11} ,{5, 5, -0.560165667510446e12} ,{1, 6, 0.458384828593949e06} ,{6, 6, -0.385754000383848e14} //10 ,{2, 7, 0.453735800004273e08} ,{4, 8, 0.939454935735563e12} ,{14, 8, 0.266572856432938e28} ,{2, 10, -0.547578313899097e10} ,{5, 10, 0.200725701112386e15} //15 ,{3, 12, 0.185007245563239e13} ,{0, 14, 0.185135446828337e09} ,{1, 14, -0.170451090076385e12} ,{1, 18, 0.157890366037614e15} ,{1, 20, -0.202530509748774e16} //20 ,{28, 20, 0.368193926183570e60} ,{2, 22, 0.170215539458936e18} ,{16, 22, 0.639234909918741e42} ,{0, 24, -0.821698160721956e15} ,{5, 24, -0.795260241872306e24} //25 ,{0, 28, 0.233415869478510e18} ,{3, 28, -0.600079934586803e23} ,{4, 28, 0.594584382273384e25} ,{12, 28, 0.189461279349492e40} ,{16, 28, -0.810093428842645e46} //30 ,{1, 32, 0.188813911076809e22} ,{8, 32, 0.111052244098768e36} ,{14, 32, 0.291133958602503e46} ,{0, 36, -0.329421923951460e22} ,{2, 36, -0.137570282536969e26} //35 ,{3, 36, 0.181508996303902e28} ,{4, 36, -0.346865122768353e30} ,{8, 36, -0.211961148774260e38} ,{14, 36, -0.128617899887675e49} ,{24, 36, 0.479817895699239e65} //40 }; int i = 1; double omegasum = 0; double pi = p_MPa / V3M_PT_RC.pStar; double theta = t_K / V3M_PT_RC.tStar; #pragma omp parallel for reduction (+:omegasum) for (i = 1; i <= V3M_PT_RC.N; i++) { omegasum += V3M_PT_COEFFS[i].ni * pow(pow((pi - V3M_PT_RC.a), V3M_PT_RC.c) , V3M_PT_COEFFS[i].Ii) * pow(pow((theta - V3M_PT_RC.b), V3M_PT_RC.d ), V3M_PT_COEFFS[i].Ji); } return V3M_PT_RC.vStar * pow(omegasum , V3M_PT_RC.e ); } // specific volume (m3/kg) in region 3n for a given temperature (K) and pressure (MPa) // equation 5 double if97_r3n_v_pt (double p_MPa, double t_K){ const typR3RedCoefs V3N_PT_RC = { 0.0031, 23.0, 650.0, 39, 0.976, 0.997, 0.0, 0.0, 0.0 }; //From Hummeling if97 Java // http://sourceforge.net/p/if97/git/ci/master/tree/src/main/java/com/hummeling/if97/Region3.java#l1801 const typIF97Coeffs_IJn V3N_PT_COEFFS[] = { {0, 0, 0.0}, //0 i starts at 1, so 0th i is not used {0, -12, .280967799943151e-38}, {3, -12, .614869006573609e-30}, {4, -12, .582238667048942e-27}, {6, -12, .390628369238462e-22}, {7, -12, .821445758255119e-20}, {10, -12, .402137961842776e-14}, {12, -12, .651718171878301e-12}, {14, -12, -.211773355803058e-7}, {18, -12, .264953354380072e-2}, {0, -10, -.135031446451331e-31}, {3, -10, -.607246643970893e-23}, {5, -10, -.402352115234494e-18}, {6, -10, -.744938506925544e-16}, {8, -10, .189917206526237e-12}, {12, -10, .364975183508473e-5}, {0, -8, .177274872361946e-25}, {3, -8, -.334952758812999e-18}, {7, -8, -.421537726098389e-8}, {12, -8, -.391048167929649e-1}, {2, -6, .541276911564176e-13}, {3, -6, .705412100773699e-11}, {4, -6, .258585887897486e-8}, {2, -5, -.493111362030162e-10}, {4, -5, -.158649699894543e-5}, {7, -5, -.525037427886100}, {4, -4, .220019901729615e-2}, {3, -3, -.643064132636925e-2}, {5, -3, .629154149015048e2}, {6, -3, .135147318617061e3}, {0, -2, .240560808321713e-6}, {0, -1, -.890763306701305e-3}, {3, -1, -.440209599407714e4}, {1, 0, -.302807107747776e3}, {0, 1, .159158748314599e4}, {1, 1, .232534272709876e6}, {0, 2, -.792681207132600e6}, {1, 4, -.869871364662769e11}, {0, 5, .354542769185671e12}, {1, 6, .400849240129329e15} //39 }; int i = 1; double omegasum = 0; double pi = p_MPa / V3N_PT_RC.pStar; double theta = t_K / V3N_PT_RC.tStar; #pragma omp parallel for reduction (+:omegasum) for (i = 1; i <= V3N_PT_RC.N; i++) { omegasum += V3N_PT_COEFFS[i].ni * pow((pi - V3N_PT_RC.a), V3N_PT_COEFFS[i].Ii) * pow((theta - V3N_PT_RC.b), V3N_PT_COEFFS[i].Ji ); } return V3N_PT_RC.vStar * exp(omegasum); } // specific volume (m3/kg) in region 3o for a given temperature (K) and pressure (MPa) // equation 4 double if97_r3o_v_pt (double p_MPa, double t_K){ const typR3RedCoefs V3O_PT_RC = { 0.0034, 23.0, 650.0, 24, 0.974, 0.996, 0.5, 1.0, 1.0 }; //From Hummeling if97 Java // http://sourceforge.net/p/if97/git/ci/master/tree/src/main/java/com/hummeling/if97/Region3.java#l1801 const typIF97Coeffs_IJn V3O_PT_COEFFS[] = { {0, 0, 0.}, //0 i starts at 1, so 0th i is not used {0, -12, .128746023979718e-34}, {0, -4, -.735234770382342e-11}, {0, -1, .289078692149150e-2}, {2, -1, .244482731907223}, {3, -10, .141733492030985e-23}, {4, -12, -.354533853059476e-28}, {4, -8, -.594539202901431e-17}, {4, -5, -.585188401782779e-8}, {4, -4, .201377325411803e-5}, {4, -1, .138647388209306e1}, {5, -4, -.173959365084772e-4}, {5, -3, .137680878349369e-2}, {6, -8, .814897605805513e-14}, {7, -12, .425596631351839e-25}, {8, -10, -.387449113787755e-17}, {8, -8, .139814747930240e-12}, {8, -4, -.171849638951521e-2}, {10, -12, .641890529513296e-21}, {10, -8, .118960578072018e-10}, {14, -12, -.155282762571611e-17}, {14, -8, .233907907347507e-7}, {20, -12, -.174093247766213e-12}, {20, -10, .377682649089149e-8}, {24, -12, -.516720236575302e-10} //24 }; int i = 1; double omegasum = 0; double pi = p_MPa / V3O_PT_RC.pStar; double theta = t_K / V3O_PT_RC.tStar; #pragma omp parallel for reduction (+:omegasum) for (i = 1; i <= V3O_PT_RC.N; i++) { omegasum += V3O_PT_COEFFS[i].ni * pow(pow((pi - V3O_PT_RC.a), V3O_PT_RC.c) , V3O_PT_COEFFS[i].Ii) * pow(pow((theta - V3O_PT_RC.b), V3O_PT_RC.d ), V3O_PT_COEFFS[i].Ji); } return V3O_PT_RC.vStar * pow(omegasum , V3O_PT_RC.e ); } // specific volume (m3/kg) in region 3p for a given temperature (K) and pressure (MPa) // equation 4 double if97_r3p_v_pt (double p_MPa, double t_K){ const typR3RedCoefs V3P_PT_RC = { 0.0041, 23.0, 650.0, 27, 0.972, 0.997, 0.5, 1.0, 1.0 }; //From Hummeling if97 Java // http://sourceforge.net/p/if97/git/ci/master/tree/src/main/java/com/hummeling/if97/Region3.java#l1801 const typIF97Coeffs_IJn V3P_PT_COEFFS[] = { {0, 0, 0.}, //0 i starts at 1, so 0th i is not used {0, -1, -.982825342010366e-4}, {0, 0, .105145700850612e1}, {0, 1, .116033094095084e3}, {0, 2, .324664750281543e4}, {1, 1, -.123592348610137e4}, {2, -1, -.561403450013495e-1}, {3, -3, .856677401640869e-7}, {3, 0, .236313425393924e3}, {4, -2, .972503292350109e-2}, {6, -2, -.103001994531927e1}, {7, -5, -.149653706199162e-8}, {7, -4, -.215743778861592e-4}, {8, -2, -.834452198291445e1}, {10, -3, .586602660564988}, {12, -12, .343480022104968e-25}, {12, -6, .816256095947021e-5}, {12, -5, .294985697916798e-2}, {14, -10, .711730466276584e-16}, {14, -8, .400954763806941e-9}, {14, -3, .107766027032853e2}, {16, -8, -.409449599138182e-6}, {18, -8, -.729121307758902e-5}, {20, -10, .677107970938909e-8}, {22, -10, .602745973022975e-7}, {24, -12, -.382323011855257e-10}, {24, -8, .179946628317437e-2}, {36, -12, -.345042834640005e-3} //27 }; int i = 1; double omegasum = 0; double pi = p_MPa / V3P_PT_RC.pStar; double theta = t_K / V3P_PT_RC.tStar; #pragma omp parallel for reduction (+:omegasum) for (i = 1; i <= V3P_PT_RC.N; i++) { omegasum += V3P_PT_COEFFS[i].ni * pow(pow((pi - V3P_PT_RC.a), V3P_PT_RC.c) , V3P_PT_COEFFS[i].Ii) * pow(pow((theta - V3P_PT_RC.b), V3P_PT_RC.d ), V3P_PT_COEFFS[i].Ji); } return V3P_PT_RC.vStar * pow(omegasum , V3P_PT_RC.e ); } // specific volume (m3/kg) in region 3q for a given temperature (K) and pressure (MPa) // equation 4 double if97_r3q_v_pt (double p_MPa, double t_K){ const typR3RedCoefs V3Q_PT_RC = { 0.0022, 23.0, 650.0, 24, 0.848, 0.983, 1.0, 1.0, 4.0 }; //From Hummeling if97 Java // http://sourceforge.net/p/if97/git/ci/master/tree/src/main/java/com/hummeling/if97/Region3.java#l1801 const typIF97Coeffs_IJn V3Q_PT_COEFFS[] = { {0, 0, 0.}, //0 i starts at 1, so 0th i is not used {-12, 10, -.820433843259950e5}, {-12, 12, .473271518461586e11}, {-10, 6, -.805950021005413e-1}, {-10, 7, .328600025435980e2}, {-10, 8, -.356617029982490e4}, {-10, 10, -.172985781433335e10}, {-8, 8, .351769232729192e8}, {-6, 6, -.775489259985144e6}, {-5, 2, .710346691966018e-4}, {-5, 5, .993499883820274e5}, {-4, 3, -.642094171904570}, {-4, 4, -.612842816820083e4}, {-3, 3, .232808472983776e3}, {-2, 0, -.142808220416837e-4}, {-2, 1, -.643596060678456e-2}, {-2, 2, -.428577227475614e1}, {-2, 4, .225689939161918e4}, {-1, 0, .100355651721510e-2}, {-1, 1, .333491455143516}, {-1, 2, .109697576888873e1}, {0, 0, .961917379376452}, {1, 0, -.838165632204598e-1}, {1, 1, .247795908411492e1}, {1, 3, -.319114969006533e4} //24 }; int i = 1; double omegasum = 0; double pi = p_MPa / V3Q_PT_RC.pStar; double theta = t_K / V3Q_PT_RC.tStar; #pragma omp parallel for reduction (+:omegasum) for (i = 1; i <= V3Q_PT_RC.N; i++) { omegasum += V3Q_PT_COEFFS[i].ni * pow(pow((pi - V3Q_PT_RC.a), V3Q_PT_RC.c) , V3Q_PT_COEFFS[i].Ii) * pow(pow((theta - V3Q_PT_RC.b), V3Q_PT_RC.d ), V3Q_PT_COEFFS[i].Ji); } return V3Q_PT_RC.vStar * pow(omegasum , V3Q_PT_RC.e ); } // specific volume (m3/kg) in region 3r for a given temperature (K) and pressure (MPa) // equation 4 double if97_r3r_v_pt (double p_MPa, double t_K){ const typR3RedCoefs V3R_PT_RC = { 0.0054, 23.0, 650.0, 27, 0.874, 0.982, 1.0, 1.0, 1.0 }; //From Hummeling if97 Java // http://sourceforge.net/p/if97/git/ci/master/tree/src/main/java/com/hummeling/if97/Region3.java#l1801 const typIF97Coeffs_IJn V3R_PT_COEFFS[] = { {0, 0, 0.}, //0 i starts at 1, so 0th i is not used {-8, 6, .144165955660863e-2}, {-8, 14, -.701438599628258e13}, {-3, -3, -.830946716459219e-16}, {-3, 3, .261975135368109}, {-3, 4, .393097214706245e3}, {-3, 5, -.104334030654021e5}, {-3, 8, .490112654154211e9}, {0, -1, -.147104222772069e-3}, {0, 0, .103602748043408e1}, {0, 1, .305308890065089e1}, {0, 5, -.399745276971264e7}, {3, -6, .569233719593750e-11}, {3, -2, -.464923504407778e-1}, {8, -12, -.535400396512906e-17}, {8, -10, .399988795693162e-12}, {8, -8, -.536479560201811e-6}, {8, -5, .159536722411202e-1}, {10, -12, .270303248860217e-14}, {10, -10, .244247453858506e-7}, {10, -8, -.983430636716454e-5}, {10, -6, .663513144224454e-1}, {10, -5, -.993456957845006e1}, {10, -4, .546491323528491e3}, {10, -3, -.143365406393758e5}, {10, -2, .150764974125511e6}, {12, -12, -.337209709340105e-9}, {14, -12, .377501980025469e-8} //27 }; int i = 1; double omegasum = 0; double pi = p_MPa / V3R_PT_RC.pStar; double theta = t_K / V3R_PT_RC.tStar; #pragma omp parallel for reduction (+:omegasum) for (i = 1; i <= V3R_PT_RC.N; i++) { omegasum += V3R_PT_COEFFS[i].ni * pow(pow((pi - V3R_PT_RC.a), V3R_PT_RC.c) , V3R_PT_COEFFS[i].Ii) * pow(pow((theta - V3R_PT_RC.b), V3R_PT_RC.d ), V3R_PT_COEFFS[i].Ji); } return V3R_PT_RC.vStar * pow(omegasum , V3R_PT_RC.e ); } // specific volume (m3/kg) in region 3s for a given temperature (K) and pressure (MPa) // equation 4 double if97_r3s_v_pt (double p_MPa, double t_K){ const typR3RedCoefs V3S_PT_RC = { 0.0022, 21.0, 640.0, 29, 0.886, 0.990, 1.0, 1.0, 4.0 }; //From Hummeling if97 Java // http://sourceforge.net/p/if97/git/ci/master/tree/src/main/java/com/hummeling/if97/Region3.java#l1801 const typIF97Coeffs_IJn V3S_PT_COEFFS[] = { {0, 0, 0.}, //0 i starts at 1, so 0th i is not used {-12, 20, -.532466612140254e23}, {-12, 24, .100415480000824e32}, {-10, 22, -.191540001821367e30}, {-8, 14, .105618377808847e17}, {-6, 36, .202281884477061e59}, {-5, 8, .884585472596134e8}, {-5, 16, .166540181638363e23}, {-4, 6, -.313563197669111e6}, {-4, 32, -.185662327545324e54}, {-3, 3, -.624942093918942e-1}, {-3, 8, -.504160724132590e10}, {-2, 4, .187514491833092e5}, {-1, 1, .121399979993217e-2}, {-1, 2, .188317043049455e1}, {-1, 3, -.167073503962060e4}, {0, 0, .965961650599775}, {0, 1, .294885696802488e1}, {0, 4, -.653915627346115e5}, {0, 28, .604012200163444e50}, {1, 0, -.198339358557937}, {1, 32, -.175984090163501e58}, {3, 0, .356314881403987e1}, {3, 1, -.575991255144384e3}, {3, 2, .456213415338071e5}, {4, 3, -.109174044987829e8}, {4, 18, .437796099975134e34}, {4, 24, -.616552611135792e46}, {5, 4, .193568768917797e10}, {14, 24, .950898170425042e54} //29 }; int i = 1; double omegasum = 0; double pi = p_MPa / V3S_PT_RC.pStar; double theta = t_K / V3S_PT_RC.tStar; #pragma omp parallel for reduction (+:omegasum) for (i = 1; i <= V3S_PT_RC.N; i++) { omegasum += V3S_PT_COEFFS[i].ni * pow(pow((pi - V3S_PT_RC.a), V3S_PT_RC.c) , V3S_PT_COEFFS[i].Ii) * pow(pow((theta - V3S_PT_RC.b), V3S_PT_RC.d ), V3S_PT_COEFFS[i].Ji); } return V3S_PT_RC.vStar * pow(omegasum , V3S_PT_RC.e ); } // specific volume (m3/kg) in region 3t for a given temperature (K) and pressure (MPa) // equation 4 double if97_r3t_v_pt (double p_MPa, double t_K){ const typR3RedCoefs V3T_PT_RC = { 0.0088, 20.0, 650.0, 33, 0.803, 1.02, 1.0, 1.0, 1.0 }; //From Hummeling if97 Java // http://sourceforge.net/p/if97/git/ci/master/tree/src/main/java/com/hummeling/if97/Region3.java#l1801 const typIF97Coeffs_IJn V3T_PT_COEFFS[] = { {0, 0, 0.}, //0 i starts at 1, so 0th i is not used {0, 0, .155287249586268e1}, {0, 1, .664235115009031e1}, {0, 4, -.289366236727210e4}, {0, 12, -.385923202309848e13}, {1, 0, -.291002915783761e1}, {1, 10, -.829088246858083e12}, {2, 0, .176814899675218e1}, {2, 6, -.534686695713469e9}, {2, 14, .160464608687834e18}, {3, 3, .196435366560186e6}, {3, 8, .156637427541729e13}, {4, 0, -.178154560260006e1}, {4, 10, -.229746237623692e16}, {7, 3, .385659001648006e8}, {7, 4, .110554446790543e10}, {7, 7, -.677073830687349e14}, {7, 20, -.327910592086523e31}, {7, 36, -.341552040860644e51}, {10, 10, -.527251339709047e21}, {10, 12, .245375640937055e24}, {10, 14, -.168776617209269e27}, {10, 16, .358958955867578e29}, {10, 22, -.656475280339411e36}, {18, 18, .355286045512301e39}, {20, 32, .569021454413270e58}, {22, 22, -.700584546433113e48}, {22, 36, -.705772623326374e65}, {24, 24, .166861176200148e53}, {28, 28, -.300475129680486e61}, {32, 22, -.668481295196808e51}, {32, 32, .428432338620678e69}, {32, 36, -.444227367758304e72}, {36, 36, -.281396013562745e77} //33 }; int i = 1; double omegasum = 0; double pi = p_MPa / V3T_PT_RC.pStar; double theta = t_K / V3T_PT_RC.tStar; #pragma omp parallel for reduction (+:omegasum) for (i = 1; i <= V3T_PT_RC.N; i++) { omegasum += V3T_PT_COEFFS[i].ni * pow(pow((pi - V3T_PT_RC.a), V3T_PT_RC.c) , V3T_PT_COEFFS[i].Ii) * pow(pow((theta - V3T_PT_RC.b), V3T_PT_RC.d ), V3T_PT_COEFFS[i].Ji); } return V3T_PT_RC.vStar * pow(omegasum , V3T_PT_RC.e ); } /* **** AUXILLIARY CONSTANTS FOR CLOSE TO CRITICAL REGIION ******* */ // specific volume (m3/kg) in near critical region 3u for a given temperature (K) and pressure (MPa) // equation 4 double if97_r3u_v_pt (double p_MPa, double t_K){ const typR3RedCoefs V3U_PT_RC = { 0.0026, 23.0, 650.0, 38, 0.902, 0.988, 1.0, 1.0, 1.0 }; //From Hummeling if97 Java // http://sourceforge.net/p/if97/git/ci/master/tree/src/main/java/com/hummeling/if97/Region3.java#l1801 const typIF97Coeffs_IJn V3U_PT_COEFFS[] = { {0, 0, 0.}, //0 i starts at 1, so 0th i is not used {-12, 14, .122088349258355e18}, {-10, 10, .104216468608488e10}, {-10, 12, -.882666931564652e16}, {-10, 14, .259929510849499e20}, {-8, 10, .222612779142211e15}, {-8, 12, -.878473585050085e18}, {-8, 14, -.314432577551552e22}, {-6, 8, -.216934916996285e13}, {-6, 12, .159079648196849e21}, {-5, 4, -.339567617303423e3}, {-5, 8, .884387651337836e13}, {-5, 12, -.843405926846418e21}, {-3, 2, .114178193518022e2}, {-1, -1, -.122708229235641e-3}, {-1, 1, -.106201671767107e3}, {-1, 12, .903443213959313e25}, {-1, 14, -.693996270370852e28}, {0, -3, .648916718965575e-8}, {0, 1, .718957567127851e4}, {1, -2, .105581745346187e-2}, {2, 5, -.651903203602581e15}, {2, 10, -.160116813274676e25}, {3, -5, -.510254294237837e-8}, {5, -4, -.152355388953402}, {5, 2, .677143292290144e12}, {5, 3, .276378438378930e15}, {6, -5, .116862983141686e-1}, {6, 2, -.301426947980171e14}, {8, -8, .169719813884840e-7}, {8, 8, .104674840020929e27}, {10, -4, -.108016904560140e5}, {12, -12, -.990623601934295e-12}, {12, -4, .536116483602738e7}, {12, 4, .226145963747881e22}, {14, -12, -.488731565776210e-9}, {14, -10, .151001548880670e-4}, {14, -6, -.227700464643920e5}, {14, 6, -.781754507698846e28} //38 }; int i = 1; double omegasum = 0; double pi = p_MPa / V3U_PT_RC.pStar; double theta = t_K / V3U_PT_RC.tStar; #pragma omp parallel for reduction (+:omegasum) for (i = 1; i <= V3U_PT_RC.N; i++) { omegasum += V3U_PT_COEFFS[i].ni * pow(pow((pi - V3U_PT_RC.a), V3U_PT_RC.c) , V3U_PT_COEFFS[i].Ii) * pow(pow((theta - V3U_PT_RC.b), V3U_PT_RC.d ), V3U_PT_COEFFS[i].Ji); } return V3U_PT_RC.vStar * pow(omegasum , V3U_PT_RC.e ); } // specific volume (m3/kg) in near critical region 3v for a given temperature (K) and pressure (MPa) // equation 4 double if97_r3v_v_pt (double p_MPa, double t_K){ const typR3RedCoefs V3V_PT_RC = { 0.0031, 23.0, 650.0, 39, 0.960, 0.995, 1.0, 1.0, 1.0 }; //From Hummeling if97 Java // http://sourceforge.net/p/if97/git/ci/master/tree/src/main/java/com/hummeling/if97/Region3.java#l1801 const typIF97Coeffs_IJn V3V_PT_COEFFS[] = { {0, 0, 0.}, //0 i starts at 1, so 0th i is not used {-10, -8, -.415652812061591e-54}, {-8, -12, .177441742924043e-60}, {-6, -12, -.357078668203377e-54}, {-6, -3, .359252213604114e-25}, {-6, 5, -.259123736380269e2}, {-6, 6, .594619766193460e5}, {-6, 8, -.624184007103158e11}, {-6, 10, .313080299915944e17}, {-5, 1, .105006446192036e-8}, {-5, 2, -.192824336984852e-5}, {-5, 6, .654144373749937e6}, {-5, 8, .513117462865044e13}, {-5, 10, -.697595750347391e19}, {-5, 14, -.103977184454767e29}, {-4, -12, .119563135540666e-47}, {-4, -10, -.436677034051655e-41}, {-4, -6, .926990036530639e-29}, {-4, 10, .587793105620748e21}, {-3, -3, .280375725094731e-17}, {-3, 10, -.192359972440634e23}, {-3, 12, .742705723302738e27}, {-2, 2, -.517429682450605e2}, {-2, 4, .820612048645469e7}, {-1, -2, -.188214882341448e-8}, {-1, 0, .184587261114837e-1}, {0, -2, -.135830407782663e-5}, {0, 6, -.723681885626348e17}, {0, 10, -.223449194054124e27}, {1, -12, -.111526741826431e-34}, {1, -10, .276032601145151e-28}, {3, 3, .134856491567853e15}, {4, -6, .652440293345860e-9}, {4, 3, .510655119774360e17}, {4, 10, -.468138358908732e32}, {5, 2, -.760667491183279e16}, {8, -12, -.417247986986821e-18}, {10, -2, .312545677756104e14}, {12, -3, -.100375333864186e15}, {14, 1, .247761392329058e27} //39 }; int i = 1; double omegasum = 0; double pi = p_MPa / V3V_PT_RC.pStar; double theta = t_K / V3V_PT_RC.tStar; #pragma omp parallel for reduction (+:omegasum) for (i = 1; i <= V3V_PT_RC.N; i++) { omegasum += V3V_PT_COEFFS[i].ni * pow(pow((pi - V3V_PT_RC.a), V3V_PT_RC.c) , V3V_PT_COEFFS[i].Ii) * pow(pow((theta - V3V_PT_RC.b), V3V_PT_RC.d ), V3V_PT_COEFFS[i].Ji); } return V3V_PT_RC.vStar * pow(omegasum , V3V_PT_RC.e ); } // specific volume (m3/kg) in near critical region 3w for a given temperature (K) and pressure (MPa) // equation 4 double if97_r3w_v_pt (double p_MPa, double t_K){ const typR3RedCoefs V3W_PT_RC = { 0.0039, 23.0, 650.0, 35, 0.959, 0.995, 1.0, 1.0, 4.0 }; //From Hummeling if97 Java // http://sourceforge.net/p/if97/git/ci/master/tree/src/main/java/com/hummeling/if97/Region3.java#l1801 const typIF97Coeffs_IJn V3W_PT_COEFFS[] = { {0, 0, 0.}, //0 i starts at 1, so 0th i is not used {-12, 8, -.586219133817016e-7}, {-12, 14, -.894460355005526e11}, {-10, -1, .531168037519774e-30}, {-10, 8, .109892402329239}, {-8, 6, -.575368389425212e-1}, {-8, 8, .228276853990249e5}, {-8, 14, -.158548609655002e19}, {-6, -4, .329865748576503e-27}, {-6, -3, -.634987981190669e-24}, {-6, 2, .615762068640611e-8}, {-6, 8, -.961109240985747e8}, {-5, -10, -.406274286652625e-44}, {-4, -1, -.471103725498077e-12}, {-4, 3, .725937724828145}, {-3, -10, .187768525763682e-38}, {-3, 3, -.103308436323771e4}, {-2, 1, -.662552816342168e-1}, {-2, 2, .579514041765710e3}, {-1, -8, .237416732616644e-26}, {-1, -4, .271700235739893e-14}, {-1, 1, -.907886213483600e2}, {0, -12, -.171242509570207e-36}, {0, 1, .156792067854621e3}, {1, -1, .923261357901470}, {2, -1, -.597865988422577e1}, {2, 2, .321988767636389e7}, {3, -12, -.399441390042203e-29}, {3, -5, .493429086046981e-7}, {5, -10, .812036983370565e-19}, {5, -8, -.207610284654137e-11}, {5, -6, -.340821291419719e-6}, {8, -12, .542000573372233e-17}, {8, -10, -.856711586510214e-12}, {10, -12, .266170454405981e-13}, {10, -8, .858133791857099e-5} //35 }; int i = 1; double omegasum = 0; double pi = p_MPa / V3W_PT_RC.pStar; double theta = t_K / V3W_PT_RC.tStar; #pragma omp parallel for reduction (+:omegasum) for (i = 1; i <= V3W_PT_RC.N; i++) { omegasum += V3W_PT_COEFFS[i].ni * pow(pow((pi - V3W_PT_RC.a), V3W_PT_RC.c) , V3W_PT_COEFFS[i].Ii) * pow(pow((theta - V3W_PT_RC.b), V3W_PT_RC.d ), V3W_PT_COEFFS[i].Ji); } return V3W_PT_RC.vStar * pow(omegasum , V3W_PT_RC.e ); } // specific volume (m3/kg) in near critical region 3x for a given temperature (K) and pressure (MPa) // equation 4 double if97_r3x_v_pt (double p_MPa, double t_K){ const typR3RedCoefs V3X_PT_RC = { 0.0049, 23.0, 650.0, 36, 0.910, 0.988, 1.0, 1.0, 1.0 }; //From Hummeling if97 Java // http://sourceforge.net/p/if97/git/ci/master/tree/src/main/java/com/hummeling/if97/Region3.java#l1801 const typIF97Coeffs_IJn V3X_PT_COEFFS[] = { {0, 0, 0.}, //0 i starts at 1, so 0th i is not used {-8, 14, .377373741298151e19}, {-6, 10, -.507100883722913e13}, {-5, 10, -.103363225598860e16}, {-4, 1, .184790814320773e-5}, {-4, 2, -.924729378390945e-3}, //5 {-4, 14, -.425999562292738e24}, {-3, -2, -.462307771873973e-12}, {-3, 12, .107319065855767e22}, {-1, 5, .648662492280682e11}, {0, 0, .244200600688281e1}, //10 {0, 4, -.851535733484258e10}, {0, 10, .169894481433592e22}, {1, -10, .215780222509020e-26}, {1, -1, -.320850551367334}, {2, 6, -.382642448458610e17}, //15 {3, -12, -.275386077674421e-28}, {3, 0, -.563199253391666e6}, {3, 8, -.326068646279314e21}, {4, 3, .397949001553184e14}, {5, -6, .100824008584757e-6}, //20 {5, -2, .162234569738433e5}, {5, 1, -.432355225319745e11}, {6, 1, -.592874245598610e12}, {8, -6, .133061647281106e1}, {8, -3, .157338197797544e7}, //25 {8, 1, .258189614270853e14}, {8, 8, .262413209706358e25}, {10, -8, -.920011937431142e-1}, {12, -10, .220213765905426e-2}, {12, -8, -.110433759109547e2}, //30 {12, -5, .847004870612087e7}, {12, -4, -.592910695762536e9}, {14, -12, -.183027173269660e-4}, {14, -10, .181339603516302}, {14, -8, -.119228759669889e4}, //35 {14, -6, .430867658061468e7} //36 }; int i = 1; double omegasum = 0; double pi = p_MPa / V3X_PT_RC.pStar; double theta = t_K / V3X_PT_RC.tStar; #pragma omp parallel for reduction (+:omegasum) for (i = 1; i <= V3X_PT_RC.N; i++) { omegasum += V3X_PT_COEFFS[i].ni * pow(pow((pi - V3X_PT_RC.a), V3X_PT_RC.c) , V3X_PT_COEFFS[i].Ii) * pow(pow((theta - V3X_PT_RC.b), V3X_PT_RC.d ), V3X_PT_COEFFS[i].Ji); } return V3X_PT_RC.vStar * pow(omegasum , V3X_PT_RC.e ); } // specific volume (m3/kg) in near critical region 3y for a given temperature (K) and pressure (MPa) // equation 4 double if97_r3y_v_pt (double p_MPa, double t_K){ const typR3RedCoefs V3Y_PT_RC = { 0.0031, 22.0, 650.0, 20, 0.996, 0.994, 1.0, 1.0, 4.0 }; //From Hummeling if97 Java // http://sourceforge.net/p/if97/git/ci/master/tree/src/main/java/com/hummeling/if97/Region3.java#l1801 const typIF97Coeffs_IJn V3Y_PT_COEFFS[] = { {0, 0, 0.}, //0 i starts at 1, so 0th i is not used {0, -3, -.525597995024633e-9}, {0, 1, .583441305228407e4}, {0, 5, -.134778968457925e17}, {0, 8, .118973500934212e26}, {1, 8, -.159096490904708e27}, //5 {2, -4, -.315839902302021e-6}, {2, -1, .496212197158239e3}, {2, 4, .327777227273171e19}, {2, 5, -.527114657850696e22}, {3, -8, .210017506281863e-16}, //10 {3, 4, .705106224399834e21}, {3, 8, -.266713136106469e31}, {4, -6, -.145370512554562e-7}, {4, 6, .149333917053130e28}, {5, -2, -.149795620287641e8}, //15 {5, 1, -.381881906271100e16}, {8, -8, .724660165585797e-4}, {8, -2, -.937808169550193e14}, {10, -5, .514411468376383e10}, {12, -8, -.828198594040141e5} //20 }; int i = 1; double omegasum = 0; double pi = p_MPa / V3Y_PT_RC.pStar; double theta = t_K / V3Y_PT_RC.tStar; #pragma omp parallel for reduction (+:omegasum) for (i = 1; i <= V3Y_PT_RC.N; i++) { omegasum += V3Y_PT_COEFFS[i].ni * pow(pow((pi - V3Y_PT_RC.a), V3Y_PT_RC.c) , V3Y_PT_COEFFS[i].Ii) * pow(pow((theta - V3Y_PT_RC.b), V3Y_PT_RC.d ), V3Y_PT_COEFFS[i].Ji); } return V3Y_PT_RC.vStar * pow(omegasum , V3Y_PT_RC.e ); } // specific volume (m3/kg) in near critical region 3z for a given temperature (K) and pressure (MPa) // equation 4 double if97_r3z_v_pt (double p_MPa, double t_K){ const typR3RedCoefs V3Z_PT_RC = { 0.0038, 22.0, 650.0, 23, 0.993, 0.994, 1.0, 1.0, 4.0 }; //From Hummeling if97 Java // http://sourceforge.net/p/if97/git/ci/master/tree/src/main/java/com/hummeling/if97/Region3.java#l1801 const typIF97Coeffs_IJn V3Z_PT_COEFFS[] = { {0, 0, 0.}, //0 i starts at 1, so 0th i is not used {-8, 3, .244007892290650e-10}, {-6, 6, -.463057430331242e7}, {-5, 6, .728803274777712e10}, {-5, 8, .327776302858856e16}, {-4, 5, -.110598170118409e10}, {-4, 6, -.323899915729957e13}, {-4, 8, .923814007023245e16}, {-3, -2, .842250080413712e-12}, {-3, 5, .663221436245506e12}, {-3, 6, -.167170186672139e15}, {-2, 2, .253749358701391e4}, {-1, -6, -.819731559610523e-20}, {0, 3, .328380587890663e12}, {1, 1, -.625004791171543e8}, {2, 6, .803197957462023e21}, {3, -6, -.204397011338353e-10}, {3, -2, -.378391047055938e4}, {6, -6, .972876545938620e-2}, {6, -5, .154355721681459e2}, {6, -4, -.373962862928643e4}, {6, -1, -.682859011374572e11}, {8, -8, -.248488015614543e-3}, {8, -4, .394536049497068e7} //22 }; int i = 1; double omegasum = 0; double pi = p_MPa / V3Z_PT_RC.pStar; double theta = t_K / V3Z_PT_RC.tStar; #pragma omp parallel for reduction (+:omegasum) for (i = 1; i <= V3Z_PT_RC.N; i++) { omegasum += V3Z_PT_COEFFS[i].ni * pow(pow((pi - V3Z_PT_RC.a), V3Z_PT_RC.c) , V3Z_PT_COEFFS[i].Ii) * pow(pow((theta - V3Z_PT_RC.b), V3Z_PT_RC.d ), V3Z_PT_COEFFS[i].Ji); } return V3Z_PT_RC.vStar * pow(omegasum , V3Z_PT_RC.e ); } // Region 3 specific volume (m3/kg) using backwards equations. These meet the // criteria of 0.001% on enthalpy and entropy specific volume and 0.01% on Cp // outside the near critical region, which can be checked with "isNearCritical" // in the near critical region the backwards equations should provide a good starting // guess for iteration double if97_R3bw_v_pt (double p_MPa, double t_K){ //array of pointers to region functions double (*ptrR3bw_v[26])(double, double) = { if97_r3a_v_pt, if97_r3b_v_pt, if97_r3c_v_pt, if97_r3d_v_pt, if97_r3e_v_pt, if97_r3f_v_pt, if97_r3g_v_pt, if97_r3h_v_pt, if97_r3i_v_pt, if97_r3j_v_pt, if97_r3k_v_pt, if97_r3l_v_pt, if97_r3m_v_pt, if97_r3n_v_pt, if97_r3o_v_pt, if97_r3p_v_pt, if97_r3q_v_pt, if97_r3r_v_pt, if97_r3s_v_pt, if97_r3t_v_pt, if97_r3u_v_pt, if97_r3v_v_pt, if97_r3w_v_pt, if97_r3x_v_pt, if97_r3y_v_pt, if97_r3z_v_pt }; int i = 0; char R3_bw_region = if97_r3_pt_subregion(p_MPa, t_K); i =(int)R3_bw_region - (int)'a'; if ((i < 0) || (i >25)) return 0.00; //ERROR else return ptrR3bw_v[i] (p_MPa, t_K); }
task_parallel.c
#include <stdio.h> #include <errno.h> // for errno #include <math.h> #include <limits.h> // for INT_MAX #include <stdlib.h> // for strtol #include <time.h> #include <omp.h> int numThreads = 1; void freeMatrix(double** matrix, long lins){ for (long i = 0; i < lins; ++i) { free(matrix[i]); } free(matrix); } double** allocMatrix(long lins, long cols){ double** matrix = malloc(lins*sizeof(double*)); for (long i = 0; i < lins; ++i) { matrix[i] = malloc(cols*sizeof(double)); } return matrix; } void printMatrix(double** matrix, long lins, long cols){ for (long i = 0; i < lins; ++i) { for (long j = 0; j < cols; ++j) printf("%lf ", matrix[i][j]); printf("\n"); } printf("\n"); } void fillMatrix(double** matrix, long lins, long cols, long seed){ srand(seed); for (long i = 0; i < lins; ++i) { for (long j = 0; j < cols; ++j) { matrix[i][j]= (double) rand() / INT_MAX; } } } void multiply_row(double* linA, double** B, double* result, long colsB, long size){ for (long j = 0; j < colsB; ++j) { result[j] = 0; for (long k = 0; k < size; ++k){ result[j] += linA[k] * B[k][j]; } } } void multiply_matrix(double** A, double** B, double** result, long linsA, long colsB, long size){ #pragma omp parallel num_threads(numThreads) default(none) \ shared(linsA, numThreads, A, B, result, colsB, size) { #pragma omp single { for (long i = 0; i < linsA; ++i) { #pragma omp task multiply_row(A[i], B, result[i], colsB, size); } } } } long convert_str_long(char *str){ char *p; errno = 0; long conv = strtol(str, &p, 10); if (errno != 0 || *p != '\0') { printf("%s não é um número!\n", str); exit(-1); } return (long)conv; } int main(int argc, char **argv){ if (argc != 9) { printf("É necessário informar os seguintes argumentos:\nO número de threads a serem usadas\nSe as matrizes devem ser exibidas\nSeed para gerar a matriz A\nSeed para gerar a matriz B\nNúmero de linhas de A\nNúmero de colunas de A\nNúmero de linhas de B\nNúmero de colunas de B\n"); return -1; } numThreads = convert_str_long(argv[1]); int show_matrix = convert_str_long(argv[2]); long seedA = convert_str_long(argv[3]); long seedB = convert_str_long(argv[4]); long linsA = convert_str_long(argv[5]); long colsA = convert_str_long(argv[6]); long linsB = convert_str_long(argv[7]); long colsB = convert_str_long(argv[8]); if(colsA != linsB){ printf("Número de colunas de A é diferente do número de linhas de B, multiplicação não é possivel.\n"); return -1; } double t = omp_get_wtime(); double** A = allocMatrix(linsA, colsA); double** B = allocMatrix(linsB, colsB); double** R = allocMatrix(linsA, colsB); fillMatrix(A, linsA, colsA, seedA); fillMatrix(B, linsB, colsB, seedB); multiply_matrix(A, B, R, linsA, colsB, colsA); t = omp_get_wtime() - t; printf("%.10lf\n", t); if(show_matrix == 1){ printMatrix(A, linsA, colsA); printMatrix(B, linsB, colsB); printMatrix(R, linsA, colsB); } freeMatrix(A, linsA); freeMatrix(B, linsB); freeMatrix(R, linsA); return 0; } /* main */
program_schedule_static.c
#include <stdio.h> #include <omp.h> #include <stdlib.h> #include <unistd.h> int main(int argc, char* argv[]) { int thread_count = strtol(argv[1], NULL, 10); int n = strtol(argv[2], NULL, 10); #pragma omp parallel for num_threads(thread_count) schedule(static) for (int i = 0; i < n; i ++) { printf("i=%d, thread_id=%d\n", i, omp_get_thread_num()); } return 0; }
pentagon.h
// This is AutoMine/GraphZero implementation #if 0 #pragma omp parallel for schedule(dynamic,1) reduction(+:counter) for(vidType v0 = 0; v0 < g.V(); v0++) { for (auto v1 : g.N(v0)) { if (v1 >= v0) break; for (auto v2 : g.N(v0)) { if (v2 >= v1) break; for (auto v3 : g.N(v2)) { if (v3 >= v0) break; if (v3 == v1) continue; counter += bounded_intersect_except(g, v1, v3, v0, v2); } } } } #else #pragma omp parallel for schedule(dynamic,1) reduction(+:counter) for(vidType v0 = 0; v0 < g.V(); v0++) { auto y0 = g.N(v0); auto y0f0 = bounded(y0,v0); for(vidType idx1 = 0; idx1 < y0f0.size(); idx1++) { auto v1 = y0f0.begin()[idx1]; auto y1 = g.N(v1); VertexSet n0f0y1; difference_set(n0f0y1,y1, y0); auto y0f0n1f1 = difference_set(y0f0, y1, v1); for(vidType idx2 = 0; idx2 < y0f0n1f1.size(); idx2++) { auto v2 = y0f0n1f1.begin()[idx2]; auto y2 = g.N(v2); VertexSet n0f0n1y2; difference_set(n0f0n1y2,difference_set(n0f0n1y2,y2, y0), y1); auto n0y1n2f0 = difference_set(n0f0y1, y2, v0); for(vidType idx3 = 0; idx3 < n0y1n2f0.size(); idx3++) { auto v3 = n0y1n2f0.begin()[idx3]; auto y3 = g.N(v3); counter += intersection_num(n0f0n1y2, y3, v0); } } } } #endif
DRB008-indirectaccess4-orig-yes.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* Two pointers have a distance of 12 (xa2 - xa1 = 12). They are used as base addresses for indirect array accesses using an index set (another array). The index set has two indices with distance of 12 : indexSet[1]- indexSet[0] = 533 - 521 = 12 So xa1[idx] and xa2[idx] may cause loop carried dependence for N=0 and N=3. We use the default loop scheduling (static even) in OpenMP. It is possible that two dependent iterations will be scheduled within a same chunk to a same thread. So there is no runtime data races. N is 180, two iteraions with N=0 and N= 1 have loop carried dependences. For static even scheduling, we must have at least 180 threads (180/180=1 iterations) so iteration 0 and 1 will be scheduled to two different threads. Data race pair: xa1[idx]@128:5 vs. xa2[idx]@129:5 */ #include <assert.h> #include <stdio.h> #include <stdlib.h> #define N 180 #include <omp.h> int indexSet[180] = {(521), (533), (525), (527), (529), (531), (547), (549), (551), (553), (555), (557), (573), (575), (577), (579), (581), (583), (599), (601), (603), (605), (607), (609), (625), (627), (629), (631), (633), (635), (651), (653), (655), (657), (659), (661), (859), (861), (863), (865), (867), (869), (885), (887), (889), (891), (893), (895), (911), (913), (915), (917), (919), (921), (937), (939), (941), (943), (945), (947), (963), (965), (967), (969), (971), (973), (989), (991), (993), (995), (997), (999), (1197), (1199), (1201), (1203), (1205), (1207), (1223), (1225), (1227), (1229), (1231), (1233), (1249), (1251), (1253), (1255), (1257), (1259), (1275), (1277), (1279), (1281), (1283), (1285), (1301), (1303), (1305), (1307), (1309), (1311), (1327), (1329), (1331), (1333), (1335), (1337), (1535), (1537), (1539), (1541), (1543), (1545), (1561), (1563), (1565), (1567), (1569), (1571), (1587), (1589), (1591), (1593), (1595), (1597), (1613), (1615), (1617), (1619), (1621), (1623), (1639), (1641), (1643), (1645), (1647), (1649), (1665), (1667), (1669), (1671), (1673), (1675), (1873), (1875), (1877), (1879), (1881), (1883), (1899), (1901), (1903), (1905), (1907), (1909), (1925), (1927), (1929), (1931), (1933), (1935), (1951), (1953), (1955), (1957), (1959), (1961), (1977), (1979), (1981), (1983), (1985), (1987), (2003), (2005), (2007), (2009), (2011), (2013) // 521+12=533 }; int main(int argc,char *argv[]) { double *base = (double *)(malloc(sizeof(double ) * (2013 + 12 + 1))); if (base == 0) { printf("Error in malloc(). Aborting ...\n"); return 1; } double *xa1 = base; double *xa2 = xa1 + 12; int i; // initialize segments touched by indexSet #pragma omp parallel for private (i) for (i = 521; i <= 2025; i += 1) { base[i] = 0.5 * i; } for (i = 0; i <= 179; i += 1) { int idx = indexSet[i]; xa1[idx] += 1.0; xa2[idx] += 3.0; } printf("x1[999]=%f xa2[1285]=%f\n",xa1[999],xa2[1285]); free(base); return 0; }
rowwise_pick.h
/*! * Copyright (c) 2020 by Contributors * \file array/cpu/rowwise_pick.h * \brief Template implementation for rowwise pick operators. */ #ifndef DGL_ARRAY_CPU_ROWWISE_PICK_H_ #define DGL_ARRAY_CPU_ROWWISE_PICK_H_ #include <dgl/array.h> #include <functional> #include <algorithm> #include <string> #include <vector> namespace dgl { namespace aten { namespace impl { // User-defined function for picking elements from one row. // // The column indices of the given row are stored in // [col + off, col + off + len) // // Similarly, the data indices are stored in // [data + off, data + off + len) // Data index pointer could be NULL, which means data[i] == i // // *ATTENTION*: This function will be invoked concurrently. Please make sure // it is thread-safe. // // \param rowid The row to pick from. // \param off Starting offset of this row. // \param len NNZ of the row. // \param col Pointer of the column indices. // \param data Pointer of the data indices. // \param out_idx Picked indices in [off, off + len). template <typename IdxType> using PickFn = std::function<void( IdxType rowid, IdxType off, IdxType len, const IdxType* col, const IdxType* data, IdxType* out_idx)>; // User-defined function for picking elements from a range within a row. // // The column indices of each element is in // off + et_idx[et_offset+i]), where i is in [et_offset, et_offset+et_len) // // Similarly, the data indices are stored in // data[off+et_idx[et_offset+i])] // Data index pointer could be NULL, which means data[i] == off+et_idx[et_offset+i]) // // *ATTENTION*: This function will be invoked concurrently. Please make sure // it is thread-safe. // // \param off Starting offset of this row. // \param et_offset Starting offset of this range. // \param et_len Length of the range. // \param et_idx A map from local idx to column id. // \param data Pointer of the data indices. // \param out_idx Picked indices in [et_offset, et_offset + et_len). template <typename IdxType> using RangePickFn = std::function<void( IdxType off, IdxType et_offset, IdxType et_len, const std::vector<IdxType> &et_idx, const IdxType* data, IdxType* out_idx)>; // Template for picking non-zero values row-wise. The implementation utilizes // OpenMP parallelization on rows because each row performs computation independently. template <typename IdxType> COOMatrix CSRRowWisePick(CSRMatrix mat, IdArray rows, int64_t num_picks, bool replace, PickFn<IdxType> pick_fn) { using namespace aten; const IdxType* indptr = static_cast<IdxType*>(mat.indptr->data); const IdxType* indices = static_cast<IdxType*>(mat.indices->data); const IdxType* data = CSRHasData(mat)? static_cast<IdxType*>(mat.data->data) : nullptr; const IdxType* rows_data = static_cast<IdxType*>(rows->data); const int64_t num_rows = rows->shape[0]; const auto& ctx = mat.indptr->ctx; // To leverage OMP parallelization, we create two arrays to store // picked src and dst indices. Each array is of length num_rows * num_picks. // For rows whose nnz < num_picks, the indices are padded with -1. // // We check whether all the given rows // have at least num_picks number of nnz when replace is false. // // If the check holds, remove -1 elements by remove_if operation, which simply // moves valid elements to the head of arrays and create a view of the original // array. The implementation consumes a little extra memory than the actual requirement. // // Otherwise, directly use the row and col arrays to construct the result COO matrix. // // [02/29/2020 update]: OMP is disabled for now since batch-wise parallelism is more // significant. (minjie) IdArray picked_row = Full(-1, num_rows * num_picks, sizeof(IdxType) * 8, ctx); IdArray picked_col = Full(-1, num_rows * num_picks, sizeof(IdxType) * 8, ctx); IdArray picked_idx = Full(-1, num_rows * num_picks, sizeof(IdxType) * 8, ctx); IdxType* picked_rdata = static_cast<IdxType*>(picked_row->data); IdxType* picked_cdata = static_cast<IdxType*>(picked_col->data); IdxType* picked_idata = static_cast<IdxType*>(picked_idx->data); bool all_has_fanout = true; #pragma omp parallel for reduction(&&:all_has_fanout) for (int64_t i = 0; i < num_rows; ++i) { const IdxType rid = rows_data[i]; const IdxType len = indptr[rid + 1] - indptr[rid]; // If a node has no neighbor then all_has_fanout must be false even if replace is // true. all_has_fanout = all_has_fanout && (len >= (replace ? 1 : num_picks)); } #pragma omp parallel for for (int64_t i = 0; i < num_rows; ++i) { const IdxType rid = rows_data[i]; CHECK_LT(rid, mat.num_rows); const IdxType off = indptr[rid]; const IdxType len = indptr[rid + 1] - off; if (len == 0) continue; if (len <= num_picks && !replace) { // nnz <= num_picks and w/o replacement, take all nnz for (int64_t j = 0; j < len; ++j) { picked_rdata[i * num_picks + j] = rid; picked_cdata[i * num_picks + j] = indices[off + j]; picked_idata[i * num_picks + j] = data? data[off + j] : off + j; } } else { pick_fn(rid, off, len, indices, data, picked_idata + i * num_picks); for (int64_t j = 0; j < num_picks; ++j) { const IdxType picked = picked_idata[i * num_picks + j]; picked_rdata[i * num_picks + j] = rid; picked_cdata[i * num_picks + j] = indices[picked]; picked_idata[i * num_picks + j] = data? data[picked] : picked; } } } if (!all_has_fanout) { // correct the array by remove_if IdxType* new_row_end = std::remove_if(picked_rdata, picked_rdata + num_rows * num_picks, [] (IdxType i) { return i == -1; }); IdxType* new_col_end = std::remove_if(picked_cdata, picked_cdata + num_rows * num_picks, [] (IdxType i) { return i == -1; }); IdxType* new_idx_end = std::remove_if(picked_idata, picked_idata + num_rows * num_picks, [] (IdxType i) { return i == -1; }); const int64_t new_len = (new_row_end - picked_rdata); CHECK_EQ(new_col_end - picked_cdata, new_len); CHECK_EQ(new_idx_end - picked_idata, new_len); picked_row = picked_row.CreateView({new_len}, picked_row->dtype); picked_col = picked_col.CreateView({new_len}, picked_col->dtype); picked_idx = picked_idx.CreateView({new_len}, picked_idx->dtype); } return COOMatrix(mat.num_rows, mat.num_cols, picked_row, picked_col, picked_idx); } // Template for picking non-zero values row-wise. The implementation utilizes // OpenMP parallelization on rows because each row performs computation independently. template <typename IdxType> COOMatrix CSRRowWisePerEtypePick(CSRMatrix mat, IdArray rows, IdArray etypes, int64_t num_picks, bool replace, RangePickFn<IdxType> pick_fn) { using namespace aten; const IdxType* indptr = static_cast<IdxType*>(mat.indptr->data); const IdxType* indices = static_cast<IdxType*>(mat.indices->data); const IdxType* data = CSRHasData(mat)? static_cast<IdxType*>(mat.data->data) : nullptr; const IdxType* rows_data = static_cast<IdxType*>(rows->data); const int32_t* etype_data = static_cast<int32_t*>(etypes->data); const int64_t num_rows = rows->shape[0]; const auto& ctx = mat.indptr->ctx; CHECK_EQ(etypes->dtype.bits / 8, sizeof(int32_t)); std::vector<IdArray> picked_rows(rows->shape[0]); std::vector<IdArray> picked_cols(rows->shape[0]); std::vector<IdArray> picked_idxs(rows->shape[0]); #pragma omp parallel for for (int64_t i = 0; i < num_rows; ++i) { const IdxType rid = rows_data[i]; CHECK_LT(rid, mat.num_rows); const IdxType off = indptr[rid]; const IdxType len = indptr[rid + 1] - off; // do something here if (len == 0) { picked_rows[i] = NewIdArray(0, ctx, sizeof(IdxType) * 8); picked_cols[i] = NewIdArray(0, ctx, sizeof(IdxType) * 8); picked_idxs[i] = NewIdArray(0, ctx, sizeof(IdxType) * 8); continue; } // fast path if (len <= num_picks && !replace) { IdArray rows = Full(rid, len, sizeof(IdxType) * 8, ctx); IdArray cols = Full(-1, len, sizeof(IdxType) * 8, ctx); IdArray idx = Full(-1, len, sizeof(IdxType) * 8, ctx); IdxType* cdata = static_cast<IdxType*>(cols->data); IdxType* idata = static_cast<IdxType*>(idx->data); for (int64_t j = 0; j < len; ++j) { cdata[j] = indices[off + j]; idata[j] = data ? data[off + j] : off + j; } picked_rows[i] = rows; picked_cols[i] = cols; picked_idxs[i] = idx; } else { // need to do per edge type sample std::vector<IdxType> rows; std::vector<IdxType> cols; std::vector<IdxType> idx; std::vector<IdxType> et(len); std::vector<IdxType> et_idx(len); std::iota(et_idx.begin(), et_idx.end(), 0); for (int64_t j = 0; j < len; ++j) { et[j] = data ? etype_data[data[off+j]] : etype_data[off+j]; } std::sort(et_idx.begin(), et_idx.end(), [&et](IdxType i1, IdxType i2) {return et[i1] < et[i2];}); IdxType cur_et = et[et_idx[0]]; int64_t et_offset = 0; int64_t et_len = 1; for (int64_t j = 0; j < len; ++j) { if ((j+1 == len) || cur_et != et[et_idx[j+1]]) { // 1 end of the current etype // 2 end of the row // random pick for current etype if (et_len <= num_picks && !replace) { // fast path, select all for (int64_t k = 0; k < et_len; ++k) { rows.push_back(rid); cols.push_back(indices[off+et_idx[et_offset+k]]); if (data) idx.push_back(data[off+et_idx[et_offset+k]]); else idx.push_back(off+et_idx[et_offset+k]); } } else { IdArray picked_idx = Full(-1, num_picks, sizeof(IdxType) * 8, ctx); IdxType* picked_idata = static_cast<IdxType*>(picked_idx->data); // need call random pick pick_fn(off, et_offset, et_len, et_idx, data, picked_idata); for (int64_t k = 0; k < num_picks; ++k) { const IdxType picked = picked_idata[k]; rows.push_back(rid); cols.push_back(indices[off+et_idx[et_offset+picked]]); if (data) idx.push_back(data[off+et_idx[et_offset+picked]]); else idx.push_back(off+et_idx[et_offset+picked]); } } if (j+1 == len) break; // next etype cur_et = et[et_idx[j+1]]; et_offset = j+1; et_len = 1; } else { et_len++; } } picked_rows[i] = VecToIdArray(rows, sizeof(IdxType) * 8, ctx); picked_cols[i] = VecToIdArray(cols, sizeof(IdxType) * 8, ctx); picked_idxs[i] = VecToIdArray(idx, sizeof(IdxType) * 8, ctx); } // end processing one row CHECK_EQ(picked_rows[i]->shape[0], picked_cols[i]->shape[0]); CHECK_EQ(picked_rows[i]->shape[0], picked_idxs[i]->shape[0]); } // end processing all rows IdArray picked_row = Concat(picked_rows); IdArray picked_col = Concat(picked_cols); IdArray picked_idx = Concat(picked_idxs); return COOMatrix(mat.num_rows, mat.num_cols, picked_row, picked_col, picked_idx); } // Template for picking non-zero values row-wise. The implementation first slices // out the corresponding rows and then converts it to CSR format. It then performs // row-wise pick on the CSR matrix and rectifies the returned results. template <typename IdxType> COOMatrix COORowWisePick(COOMatrix mat, IdArray rows, int64_t num_picks, bool replace, PickFn<IdxType> pick_fn) { using namespace aten; const auto& csr = COOToCSR(COOSliceRows(mat, rows)); const IdArray new_rows = Range(0, rows->shape[0], rows->dtype.bits, rows->ctx); const auto& picked = CSRRowWisePick<IdxType>(csr, new_rows, num_picks, replace, pick_fn); return COOMatrix(mat.num_rows, mat.num_cols, IndexSelect(rows, picked.row), // map the row index to the correct one picked.col, picked.data); } // Template for picking non-zero values row-wise. The implementation first slices // out the corresponding rows and then converts it to CSR format. It then performs // row-wise pick on the CSR matrix and rectifies the returned results. template <typename IdxType> COOMatrix COORowWisePerEtypePick(COOMatrix mat, IdArray rows, IdArray etypes, int64_t num_picks, bool replace, RangePickFn<IdxType> pick_fn) { using namespace aten; const auto& csr = COOToCSR(COOSliceRows(mat, rows)); const IdArray new_rows = Range(0, rows->shape[0], rows->dtype.bits, rows->ctx); const auto& picked = CSRRowWisePerEtypePick<IdxType>( csr, new_rows, etypes, num_picks, replace, pick_fn); return COOMatrix(mat.num_rows, mat.num_cols, IndexSelect(rows, picked.row), // map the row index to the correct one picked.col, picked.data); } } // namespace impl } // namespace aten } // namespace dgl #endif // DGL_ARRAY_CPU_ROWWISE_PICK_H_
CGOpenMPRuntime.h
//===----- CGOpenMPRuntime.h - Interface to OpenMP Runtimes -----*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This provides a class for OpenMP runtime code generation. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_LIB_CODEGEN_CGOPENMPRUNTIME_H #define LLVM_CLANG_LIB_CODEGEN_CGOPENMPRUNTIME_H #include "CGValue.h" #include "clang/AST/DeclOpenMP.h" #include "clang/AST/GlobalDecl.h" #include "clang/AST/Type.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/SourceLocation.h" #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/PointerIntPair.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/StringMap.h" #include "llvm/ADT/StringSet.h" #include "llvm/Frontend/OpenMP/OMPConstants.h" #include "llvm/Frontend/OpenMP/OMPIRBuilder.h" #include "llvm/IR/Function.h" #include "llvm/IR/ValueHandle.h" #include "llvm/Support/AtomicOrdering.h" namespace llvm { class ArrayType; class Constant; class FunctionType; class GlobalVariable; class StructType; class Type; class Value; class OpenMPIRBuilder; } // namespace llvm namespace clang { class Expr; class OMPDependClause; class OMPExecutableDirective; class OMPLoopDirective; class VarDecl; class OMPDeclareReductionDecl; class IdentifierInfo; namespace CodeGen { class Address; class CodeGenFunction; class CodeGenModule; /// A basic class for pre|post-action for advanced codegen sequence for OpenMP /// region. class PrePostActionTy { public: explicit PrePostActionTy() {} virtual void Enter(CodeGenFunction &CGF) {} virtual void Exit(CodeGenFunction &CGF) {} virtual ~PrePostActionTy() {} }; /// Class provides a way to call simple version of codegen for OpenMP region, or /// an advanced with possible pre|post-actions in codegen. class RegionCodeGenTy final { intptr_t CodeGen; typedef void (*CodeGenTy)(intptr_t, CodeGenFunction &, PrePostActionTy &); CodeGenTy Callback; mutable PrePostActionTy *PrePostAction; RegionCodeGenTy() = delete; template <typename Callable> static void CallbackFn(intptr_t CodeGen, CodeGenFunction &CGF, PrePostActionTy &Action) { return (*reinterpret_cast<Callable *>(CodeGen))(CGF, Action); } public: template <typename Callable> RegionCodeGenTy( Callable &&CodeGen, std::enable_if_t<!std::is_same<std::remove_reference_t<Callable>, RegionCodeGenTy>::value> * = nullptr) : CodeGen(reinterpret_cast<intptr_t>(&CodeGen)), Callback(CallbackFn<std::remove_reference_t<Callable>>), PrePostAction(nullptr) {} void setAction(PrePostActionTy &Action) const { PrePostAction = &Action; } void operator()(CodeGenFunction &CGF) const; }; struct OMPTaskDataTy final { SmallVector<const Expr *, 4> PrivateVars; SmallVector<const Expr *, 4> PrivateCopies; SmallVector<const Expr *, 4> FirstprivateVars; SmallVector<const Expr *, 4> FirstprivateCopies; SmallVector<const Expr *, 4> FirstprivateInits; SmallVector<const Expr *, 4> LastprivateVars; SmallVector<const Expr *, 4> LastprivateCopies; SmallVector<const Expr *, 4> ReductionVars; SmallVector<const Expr *, 4> ReductionOrigs; SmallVector<const Expr *, 4> ReductionCopies; SmallVector<const Expr *, 4> ReductionOps; SmallVector<CanonicalDeclPtr<const VarDecl>, 4> PrivateLocals; struct DependData { OpenMPDependClauseKind DepKind = OMPC_DEPEND_unknown; const Expr *IteratorExpr = nullptr; SmallVector<const Expr *, 4> DepExprs; explicit DependData() = default; DependData(OpenMPDependClauseKind DepKind, const Expr *IteratorExpr) : DepKind(DepKind), IteratorExpr(IteratorExpr) {} }; SmallVector<DependData, 4> Dependences; llvm::PointerIntPair<llvm::Value *, 1, bool> Final; llvm::PointerIntPair<llvm::Value *, 1, bool> Schedule; llvm::PointerIntPair<llvm::Value *, 1, bool> Priority; llvm::Value *Reductions = nullptr; unsigned NumberOfParts = 0; bool Tied = true; bool Nogroup = false; bool IsReductionWithTaskMod = false; bool IsWorksharingReduction = false; }; /// Class intended to support codegen of all kind of the reduction clauses. class ReductionCodeGen { private: /// Data required for codegen of reduction clauses. struct ReductionData { /// Reference to the item shared between tasks to reduce into. const Expr *Shared = nullptr; /// Reference to the original item. const Expr *Ref = nullptr; /// Helper expression for generation of private copy. const Expr *Private = nullptr; /// Helper expression for generation reduction operation. const Expr *ReductionOp = nullptr; ReductionData(const Expr *Shared, const Expr *Ref, const Expr *Private, const Expr *ReductionOp) : Shared(Shared), Ref(Ref), Private(Private), ReductionOp(ReductionOp) { } }; /// List of reduction-based clauses. SmallVector<ReductionData, 4> ClausesData; /// List of addresses of shared variables/expressions. SmallVector<std::pair<LValue, LValue>, 4> SharedAddresses; /// List of addresses of original variables/expressions. SmallVector<std::pair<LValue, LValue>, 4> OrigAddresses; /// Sizes of the reduction items in chars. SmallVector<std::pair<llvm::Value *, llvm::Value *>, 4> Sizes; /// Base declarations for the reduction items. SmallVector<const VarDecl *, 4> BaseDecls; /// Emits lvalue for shared expression. LValue emitSharedLValue(CodeGenFunction &CGF, const Expr *E); /// Emits upper bound for shared expression (if array section). LValue emitSharedLValueUB(CodeGenFunction &CGF, const Expr *E); /// Performs aggregate initialization. /// \param N Number of reduction item in the common list. /// \param PrivateAddr Address of the corresponding private item. /// \param SharedLVal Address of the original shared variable. /// \param DRD Declare reduction construct used for reduction item. void emitAggregateInitialization(CodeGenFunction &CGF, unsigned N, Address PrivateAddr, LValue SharedLVal, const OMPDeclareReductionDecl *DRD); public: ReductionCodeGen(ArrayRef<const Expr *> Shareds, ArrayRef<const Expr *> Origs, ArrayRef<const Expr *> Privates, ArrayRef<const Expr *> ReductionOps); /// Emits lvalue for the shared and original reduction item. /// \param N Number of the reduction item. void emitSharedOrigLValue(CodeGenFunction &CGF, unsigned N); /// Emits the code for the variable-modified type, if required. /// \param N Number of the reduction item. void emitAggregateType(CodeGenFunction &CGF, unsigned N); /// Emits the code for the variable-modified type, if required. /// \param N Number of the reduction item. /// \param Size Size of the type in chars. void emitAggregateType(CodeGenFunction &CGF, unsigned N, llvm::Value *Size); /// Performs initialization of the private copy for the reduction item. /// \param N Number of the reduction item. /// \param PrivateAddr Address of the corresponding private item. /// \param DefaultInit Default initialization sequence that should be /// performed if no reduction specific initialization is found. /// \param SharedLVal Address of the original shared variable. void emitInitialization(CodeGenFunction &CGF, unsigned N, Address PrivateAddr, LValue SharedLVal, llvm::function_ref<bool(CodeGenFunction &)> DefaultInit); /// Returns true if the private copy requires cleanups. bool needCleanups(unsigned N); /// Emits cleanup code for the reduction item. /// \param N Number of the reduction item. /// \param PrivateAddr Address of the corresponding private item. void emitCleanups(CodeGenFunction &CGF, unsigned N, Address PrivateAddr); /// Adjusts \p PrivatedAddr for using instead of the original variable /// address in normal operations. /// \param N Number of the reduction item. /// \param PrivateAddr Address of the corresponding private item. Address adjustPrivateAddress(CodeGenFunction &CGF, unsigned N, Address PrivateAddr); /// Returns LValue for the reduction item. LValue getSharedLValue(unsigned N) const { return SharedAddresses[N].first; } /// Returns LValue for the original reduction item. LValue getOrigLValue(unsigned N) const { return OrigAddresses[N].first; } /// Returns the size of the reduction item (in chars and total number of /// elements in the item), or nullptr, if the size is a constant. std::pair<llvm::Value *, llvm::Value *> getSizes(unsigned N) const { return Sizes[N]; } /// Returns the base declaration of the reduction item. const VarDecl *getBaseDecl(unsigned N) const { return BaseDecls[N]; } /// Returns the base declaration of the reduction item. const Expr *getRefExpr(unsigned N) const { return ClausesData[N].Ref; } /// Returns true if the initialization of the reduction item uses initializer /// from declare reduction construct. bool usesReductionInitializer(unsigned N) const; }; class CGOpenMPRuntime { public: /// Allows to disable automatic handling of functions used in target regions /// as those marked as `omp declare target`. class DisableAutoDeclareTargetRAII { CodeGenModule &CGM; bool SavedShouldMarkAsGlobal; public: DisableAutoDeclareTargetRAII(CodeGenModule &CGM); ~DisableAutoDeclareTargetRAII(); }; /// Manages list of nontemporal decls for the specified directive. class NontemporalDeclsRAII { CodeGenModule &CGM; const bool NeedToPush; public: NontemporalDeclsRAII(CodeGenModule &CGM, const OMPLoopDirective &S); ~NontemporalDeclsRAII(); }; /// Manages list of nontemporal decls for the specified directive. class UntiedTaskLocalDeclsRAII { CodeGenModule &CGM; const bool NeedToPush; public: UntiedTaskLocalDeclsRAII( CodeGenFunction &CGF, const llvm::MapVector<CanonicalDeclPtr<const VarDecl>, std::pair<Address, Address>> &LocalVars); ~UntiedTaskLocalDeclsRAII(); }; /// Maps the expression for the lastprivate variable to the global copy used /// to store new value because original variables are not mapped in inner /// parallel regions. Only private copies are captured but we need also to /// store private copy in shared address. /// Also, stores the expression for the private loop counter and it /// threaprivate name. struct LastprivateConditionalData { llvm::MapVector<CanonicalDeclPtr<const Decl>, SmallString<16>> DeclToUniqueName; LValue IVLVal; llvm::Function *Fn = nullptr; bool Disabled = false; }; /// Manages list of lastprivate conditional decls for the specified directive. class LastprivateConditionalRAII { enum class ActionToDo { DoNotPush, PushAsLastprivateConditional, DisableLastprivateConditional, }; CodeGenModule &CGM; ActionToDo Action = ActionToDo::DoNotPush; /// Check and try to disable analysis of inner regions for changes in /// lastprivate conditional. void tryToDisableInnerAnalysis(const OMPExecutableDirective &S, llvm::DenseSet<CanonicalDeclPtr<const Decl>> &NeedToAddForLPCsAsDisabled) const; LastprivateConditionalRAII(CodeGenFunction &CGF, const OMPExecutableDirective &S); public: explicit LastprivateConditionalRAII(CodeGenFunction &CGF, const OMPExecutableDirective &S, LValue IVLVal); static LastprivateConditionalRAII disable(CodeGenFunction &CGF, const OMPExecutableDirective &S); ~LastprivateConditionalRAII(); }; llvm::OpenMPIRBuilder &getOMPBuilder() { return OMPBuilder; } protected: CodeGenModule &CGM; StringRef FirstSeparator, Separator; /// An OpenMP-IR-Builder instance. llvm::OpenMPIRBuilder OMPBuilder; /// Constructor allowing to redefine the name separator for the variables. explicit CGOpenMPRuntime(CodeGenModule &CGM, StringRef FirstSeparator, StringRef Separator); /// Creates offloading entry for the provided entry ID \a ID, /// address \a Addr, size \a Size, and flags \a Flags. virtual void createOffloadEntry(llvm::Constant *ID, llvm::Constant *Addr, uint64_t Size, int32_t Flags, llvm::GlobalValue::LinkageTypes Linkage); /// Helper to emit outlined function for 'target' directive. /// \param D Directive to emit. /// \param ParentName Name of the function that encloses the target region. /// \param OutlinedFn Outlined function value to be defined by this call. /// \param OutlinedFnID Outlined function ID value to be defined by this call. /// \param IsOffloadEntry True if the outlined function is an offload entry. /// \param CodeGen Lambda codegen specific to an accelerator device. /// An outlined function may not be an entry if, e.g. the if clause always /// evaluates to false. virtual void emitTargetOutlinedFunctionHelper(const OMPExecutableDirective &D, StringRef ParentName, llvm::Function *&OutlinedFn, llvm::Constant *&OutlinedFnID, bool IsOffloadEntry, const RegionCodeGenTy &CodeGen); /// Emits object of ident_t type with info for source location. /// \param Flags Flags for OpenMP location. /// llvm::Value *emitUpdateLocation(CodeGenFunction &CGF, SourceLocation Loc, unsigned Flags = 0); /// Emit the number of teams for a target directive. Inspect the num_teams /// clause associated with a teams construct combined or closely nested /// with the target directive. /// /// Emit a team of size one for directives such as 'target parallel' that /// have no associated teams construct. /// /// Otherwise, return nullptr. const Expr *getNumTeamsExprForTargetDirective(CodeGenFunction &CGF, const OMPExecutableDirective &D, int32_t &DefaultVal); llvm::Value *emitNumTeamsForTargetDirective(CodeGenFunction &CGF, const OMPExecutableDirective &D); /// Emit the number of threads for a target directive. Inspect the /// thread_limit clause associated with a teams construct combined or closely /// nested with the target directive. /// /// Emit the num_threads clause for directives such as 'target parallel' that /// have no associated teams construct. /// /// Otherwise, return nullptr. const Expr * getNumThreadsExprForTargetDirective(CodeGenFunction &CGF, const OMPExecutableDirective &D, int32_t &DefaultVal); llvm::Value * emitNumThreadsForTargetDirective(CodeGenFunction &CGF, const OMPExecutableDirective &D); /// Returns pointer to ident_t type. llvm::Type *getIdentTyPointerTy(); /// Gets thread id value for the current thread. /// llvm::Value *getThreadID(CodeGenFunction &CGF, SourceLocation Loc); /// Get the function name of an outlined region. // The name can be customized depending on the target. // virtual StringRef getOutlinedHelperName() const { return ".omp_outlined."; } /// Emits \p Callee function call with arguments \p Args with location \p Loc. void emitCall(CodeGenFunction &CGF, SourceLocation Loc, llvm::FunctionCallee Callee, ArrayRef<llvm::Value *> Args = llvm::None) const; /// Emits address of the word in a memory where current thread id is /// stored. virtual Address emitThreadIDAddress(CodeGenFunction &CGF, SourceLocation Loc); void setLocThreadIdInsertPt(CodeGenFunction &CGF, bool AtCurrentPoint = false); void clearLocThreadIdInsertPt(CodeGenFunction &CGF); /// Check if the default location must be constant. /// Default is false to support OMPT/OMPD. virtual bool isDefaultLocationConstant() const { return false; } /// Returns additional flags that can be stored in reserved_2 field of the /// default location. virtual unsigned getDefaultLocationReserved2Flags() const { return 0; } /// Returns default flags for the barriers depending on the directive, for /// which this barier is going to be emitted. static unsigned getDefaultFlagsForBarriers(OpenMPDirectiveKind Kind); /// Get the LLVM type for the critical name. llvm::ArrayType *getKmpCriticalNameTy() const {return KmpCriticalNameTy;} /// Returns corresponding lock object for the specified critical region /// name. If the lock object does not exist it is created, otherwise the /// reference to the existing copy is returned. /// \param CriticalName Name of the critical region. /// llvm::Value *getCriticalRegionLock(StringRef CriticalName); private: /// Map for SourceLocation and OpenMP runtime library debug locations. typedef llvm::DenseMap<SourceLocation, llvm::Value *> OpenMPDebugLocMapTy; OpenMPDebugLocMapTy OpenMPDebugLocMap; /// The type for a microtask which gets passed to __kmpc_fork_call(). /// Original representation is: /// typedef void (kmpc_micro)(kmp_int32 global_tid, kmp_int32 bound_tid,...); llvm::FunctionType *Kmpc_MicroTy = nullptr; /// Stores debug location and ThreadID for the function. struct DebugLocThreadIdTy { llvm::Value *DebugLoc; llvm::Value *ThreadID; /// Insert point for the service instructions. llvm::AssertingVH<llvm::Instruction> ServiceInsertPt = nullptr; }; /// Map of local debug location, ThreadId and functions. typedef llvm::DenseMap<llvm::Function *, DebugLocThreadIdTy> OpenMPLocThreadIDMapTy; OpenMPLocThreadIDMapTy OpenMPLocThreadIDMap; /// Map of UDRs and corresponding combiner/initializer. typedef llvm::DenseMap<const OMPDeclareReductionDecl *, std::pair<llvm::Function *, llvm::Function *>> UDRMapTy; UDRMapTy UDRMap; /// Map of functions and locally defined UDRs. typedef llvm::DenseMap<llvm::Function *, SmallVector<const OMPDeclareReductionDecl *, 4>> FunctionUDRMapTy; FunctionUDRMapTy FunctionUDRMap; /// Map from the user-defined mapper declaration to its corresponding /// functions. llvm::DenseMap<const OMPDeclareMapperDecl *, llvm::Function *> UDMMap; /// Map of functions and their local user-defined mappers. using FunctionUDMMapTy = llvm::DenseMap<llvm::Function *, SmallVector<const OMPDeclareMapperDecl *, 4>>; FunctionUDMMapTy FunctionUDMMap; /// Maps local variables marked as lastprivate conditional to their internal /// types. llvm::DenseMap<llvm::Function *, llvm::DenseMap<CanonicalDeclPtr<const Decl>, std::tuple<QualType, const FieldDecl *, const FieldDecl *, LValue>>> LastprivateConditionalToTypes; /// Maps function to the position of the untied task locals stack. llvm::DenseMap<llvm::Function *, unsigned> FunctionToUntiedTaskStackMap; /// Type kmp_critical_name, originally defined as typedef kmp_int32 /// kmp_critical_name[8]; llvm::ArrayType *KmpCriticalNameTy; /// An ordered map of auto-generated variables to their unique names. /// It stores variables with the following names: 1) ".gomp_critical_user_" + /// <critical_section_name> + ".var" for "omp critical" directives; 2) /// <mangled_name_for_global_var> + ".cache." for cache for threadprivate /// variables. llvm::StringMap<llvm::AssertingVH<llvm::GlobalVariable>, llvm::BumpPtrAllocator> InternalVars; /// Type typedef kmp_int32 (* kmp_routine_entry_t)(kmp_int32, void *); llvm::Type *KmpRoutineEntryPtrTy = nullptr; QualType KmpRoutineEntryPtrQTy; /// Type typedef struct kmp_task { /// void * shareds; /**< pointer to block of pointers to /// shared vars */ /// kmp_routine_entry_t routine; /**< pointer to routine to call for /// executing task */ /// kmp_int32 part_id; /**< part id for the task */ /// kmp_routine_entry_t destructors; /* pointer to function to invoke /// deconstructors of firstprivate C++ objects */ /// } kmp_task_t; QualType KmpTaskTQTy; /// Saved kmp_task_t for task directive. QualType SavedKmpTaskTQTy; /// Saved kmp_task_t for taskloop-based directive. QualType SavedKmpTaskloopTQTy; /// Type typedef struct kmp_depend_info { /// kmp_intptr_t base_addr; /// size_t len; /// struct { /// bool in:1; /// bool out:1; /// } flags; /// } kmp_depend_info_t; QualType KmpDependInfoTy; /// Type typedef struct kmp_task_affinity_info { /// kmp_intptr_t base_addr; /// size_t len; /// struct { /// bool flag1 : 1; /// bool flag2 : 1; /// kmp_int32 reserved : 30; /// } flags; /// } kmp_task_affinity_info_t; QualType KmpTaskAffinityInfoTy; /// struct kmp_dim { // loop bounds info casted to kmp_int64 /// kmp_int64 lo; // lower /// kmp_int64 up; // upper /// kmp_int64 st; // stride /// }; QualType KmpDimTy; /// Type struct __tgt_offload_entry{ /// void *addr; // Pointer to the offload entry info. /// // (function or global) /// char *name; // Name of the function or global. /// size_t size; // Size of the entry info (0 if it a function). /// int32_t flags; /// int32_t reserved; /// }; QualType TgtOffloadEntryQTy; /// Entity that registers the offloading constants that were emitted so /// far. class OffloadEntriesInfoManagerTy { CodeGenModule &CGM; /// Number of entries registered so far. unsigned OffloadingEntriesNum = 0; public: /// Base class of the entries info. class OffloadEntryInfo { public: /// Kind of a given entry. enum OffloadingEntryInfoKinds : unsigned { /// Entry is a target region. OffloadingEntryInfoTargetRegion = 0, /// Entry is a declare target variable. OffloadingEntryInfoDeviceGlobalVar = 1, /// Invalid entry info. OffloadingEntryInfoInvalid = ~0u }; protected: OffloadEntryInfo() = delete; explicit OffloadEntryInfo(OffloadingEntryInfoKinds Kind) : Kind(Kind) {} explicit OffloadEntryInfo(OffloadingEntryInfoKinds Kind, unsigned Order, uint32_t Flags) : Flags(Flags), Order(Order), Kind(Kind) {} ~OffloadEntryInfo() = default; public: bool isValid() const { return Order != ~0u; } unsigned getOrder() const { return Order; } OffloadingEntryInfoKinds getKind() const { return Kind; } uint32_t getFlags() const { return Flags; } void setFlags(uint32_t NewFlags) { Flags = NewFlags; } llvm::Constant *getAddress() const { return cast_or_null<llvm::Constant>(Addr); } void setAddress(llvm::Constant *V) { assert(!Addr.pointsToAliveValue() && "Address has been set before!"); Addr = V; } static bool classof(const OffloadEntryInfo *Info) { return true; } private: /// Address of the entity that has to be mapped for offloading. llvm::WeakTrackingVH Addr; /// Flags associated with the device global. uint32_t Flags = 0u; /// Order this entry was emitted. unsigned Order = ~0u; OffloadingEntryInfoKinds Kind = OffloadingEntryInfoInvalid; }; /// Return true if a there are no entries defined. bool empty() const; /// Return number of entries defined so far. unsigned size() const { return OffloadingEntriesNum; } OffloadEntriesInfoManagerTy(CodeGenModule &CGM) : CGM(CGM) {} // // Target region entries related. // /// Kind of the target registry entry. enum OMPTargetRegionEntryKind : uint32_t { /// Mark the entry as target region. OMPTargetRegionEntryTargetRegion = 0x0, /// Mark the entry as a global constructor. OMPTargetRegionEntryCtor = 0x02, /// Mark the entry as a global destructor. OMPTargetRegionEntryDtor = 0x04, }; /// Target region entries info. class OffloadEntryInfoTargetRegion final : public OffloadEntryInfo { /// Address that can be used as the ID of the entry. llvm::Constant *ID = nullptr; public: OffloadEntryInfoTargetRegion() : OffloadEntryInfo(OffloadingEntryInfoTargetRegion) {} explicit OffloadEntryInfoTargetRegion(unsigned Order, llvm::Constant *Addr, llvm::Constant *ID, OMPTargetRegionEntryKind Flags) : OffloadEntryInfo(OffloadingEntryInfoTargetRegion, Order, Flags), ID(ID) { setAddress(Addr); } llvm::Constant *getID() const { return ID; } void setID(llvm::Constant *V) { assert(!ID && "ID has been set before!"); ID = V; } static bool classof(const OffloadEntryInfo *Info) { return Info->getKind() == OffloadingEntryInfoTargetRegion; } }; /// Initialize target region entry. void initializeTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID, StringRef ParentName, unsigned LineNum, unsigned Order); /// Register target region entry. void registerTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID, StringRef ParentName, unsigned LineNum, llvm::Constant *Addr, llvm::Constant *ID, OMPTargetRegionEntryKind Flags); /// Return true if a target region entry with the provided information /// exists. bool hasTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID, StringRef ParentName, unsigned LineNum, bool IgnoreAddressId = false) const; /// brief Applies action \a Action on all registered entries. typedef llvm::function_ref<void(unsigned, unsigned, StringRef, unsigned, const OffloadEntryInfoTargetRegion &)> OffloadTargetRegionEntryInfoActTy; void actOnTargetRegionEntriesInfo( const OffloadTargetRegionEntryInfoActTy &Action); // // Device global variable entries related. // /// Kind of the global variable entry.. enum OMPTargetGlobalVarEntryKind : uint32_t { /// Mark the entry as a to declare target. OMPTargetGlobalVarEntryTo = 0x0, /// Mark the entry as a to declare target link. OMPTargetGlobalVarEntryLink = 0x1, }; /// Device global variable entries info. class OffloadEntryInfoDeviceGlobalVar final : public OffloadEntryInfo { /// Type of the global variable. CharUnits VarSize; llvm::GlobalValue::LinkageTypes Linkage; public: OffloadEntryInfoDeviceGlobalVar() : OffloadEntryInfo(OffloadingEntryInfoDeviceGlobalVar) {} explicit OffloadEntryInfoDeviceGlobalVar(unsigned Order, OMPTargetGlobalVarEntryKind Flags) : OffloadEntryInfo(OffloadingEntryInfoDeviceGlobalVar, Order, Flags) {} explicit OffloadEntryInfoDeviceGlobalVar( unsigned Order, llvm::Constant *Addr, CharUnits VarSize, OMPTargetGlobalVarEntryKind Flags, llvm::GlobalValue::LinkageTypes Linkage) : OffloadEntryInfo(OffloadingEntryInfoDeviceGlobalVar, Order, Flags), VarSize(VarSize), Linkage(Linkage) { setAddress(Addr); } CharUnits getVarSize() const { return VarSize; } void setVarSize(CharUnits Size) { VarSize = Size; } llvm::GlobalValue::LinkageTypes getLinkage() const { return Linkage; } void setLinkage(llvm::GlobalValue::LinkageTypes LT) { Linkage = LT; } static bool classof(const OffloadEntryInfo *Info) { return Info->getKind() == OffloadingEntryInfoDeviceGlobalVar; } }; /// Initialize device global variable entry. void initializeDeviceGlobalVarEntryInfo(StringRef Name, OMPTargetGlobalVarEntryKind Flags, unsigned Order); /// Register device global variable entry. void registerDeviceGlobalVarEntryInfo(StringRef VarName, llvm::Constant *Addr, CharUnits VarSize, OMPTargetGlobalVarEntryKind Flags, llvm::GlobalValue::LinkageTypes Linkage); /// Checks if the variable with the given name has been registered already. bool hasDeviceGlobalVarEntryInfo(StringRef VarName) const { return OffloadEntriesDeviceGlobalVar.count(VarName) > 0; } /// Applies action \a Action on all registered entries. typedef llvm::function_ref<void(StringRef, const OffloadEntryInfoDeviceGlobalVar &)> OffloadDeviceGlobalVarEntryInfoActTy; void actOnDeviceGlobalVarEntriesInfo( const OffloadDeviceGlobalVarEntryInfoActTy &Action); private: // Storage for target region entries kind. The storage is to be indexed by // file ID, device ID, parent function name and line number. typedef llvm::DenseMap<unsigned, OffloadEntryInfoTargetRegion> OffloadEntriesTargetRegionPerLine; typedef llvm::StringMap<OffloadEntriesTargetRegionPerLine> OffloadEntriesTargetRegionPerParentName; typedef llvm::DenseMap<unsigned, OffloadEntriesTargetRegionPerParentName> OffloadEntriesTargetRegionPerFile; typedef llvm::DenseMap<unsigned, OffloadEntriesTargetRegionPerFile> OffloadEntriesTargetRegionPerDevice; typedef OffloadEntriesTargetRegionPerDevice OffloadEntriesTargetRegionTy; OffloadEntriesTargetRegionTy OffloadEntriesTargetRegion; /// Storage for device global variable entries kind. The storage is to be /// indexed by mangled name. typedef llvm::StringMap<OffloadEntryInfoDeviceGlobalVar> OffloadEntriesDeviceGlobalVarTy; OffloadEntriesDeviceGlobalVarTy OffloadEntriesDeviceGlobalVar; }; OffloadEntriesInfoManagerTy OffloadEntriesInfoManager; bool ShouldMarkAsGlobal = true; /// List of the emitted declarations. llvm::DenseSet<CanonicalDeclPtr<const Decl>> AlreadyEmittedTargetDecls; /// List of the global variables with their addresses that should not be /// emitted for the target. llvm::StringMap<llvm::WeakTrackingVH> EmittedNonTargetVariables; /// List of variables that can become declare target implicitly and, thus, /// must be emitted. llvm::SmallDenseSet<const VarDecl *> DeferredGlobalVariables; using NontemporalDeclsSet = llvm::SmallDenseSet<CanonicalDeclPtr<const Decl>>; /// Stack for list of declarations in current context marked as nontemporal. /// The set is the union of all current stack elements. llvm::SmallVector<NontemporalDeclsSet, 4> NontemporalDeclsStack; using UntiedLocalVarsAddressesMap = llvm::MapVector<CanonicalDeclPtr<const VarDecl>, std::pair<Address, Address>>; llvm::SmallVector<UntiedLocalVarsAddressesMap, 4> UntiedLocalVarsStack; /// Stack for list of addresses of declarations in current context marked as /// lastprivate conditional. The set is the union of all current stack /// elements. llvm::SmallVector<LastprivateConditionalData, 4> LastprivateConditionalStack; /// Flag for keeping track of weather a requires unified_shared_memory /// directive is present. bool HasRequiresUnifiedSharedMemory = false; /// Atomic ordering from the omp requires directive. llvm::AtomicOrdering RequiresAtomicOrdering = llvm::AtomicOrdering::Monotonic; /// Flag for keeping track of weather a target region has been emitted. bool HasEmittedTargetRegion = false; /// Flag for keeping track of weather a device routine has been emitted. /// Device routines are specific to the bool HasEmittedDeclareTargetRegion = false; /// Loads all the offload entries information from the host IR /// metadata. void loadOffloadInfoMetadata(); /// Returns __tgt_offload_entry type. QualType getTgtOffloadEntryQTy(); /// Start scanning from statement \a S and and emit all target regions /// found along the way. /// \param S Starting statement. /// \param ParentName Name of the function declaration that is being scanned. void scanForTargetRegionsFunctions(const Stmt *S, StringRef ParentName); /// Build type kmp_routine_entry_t (if not built yet). void emitKmpRoutineEntryT(QualType KmpInt32Ty); /// Returns pointer to kmpc_micro type. llvm::Type *getKmpc_MicroPointerTy(); /// Returns __kmpc_for_static_init_* runtime function for the specified /// size \a IVSize and sign \a IVSigned. Will create a distribute call /// __kmpc_distribute_static_init* if \a IsGPUDistribute is set. llvm::FunctionCallee createForStaticInitFunction(unsigned IVSize, bool IVSigned, bool IsGPUDistribute); /// Returns __kmpc_dispatch_init_* runtime function for the specified /// size \a IVSize and sign \a IVSigned. llvm::FunctionCallee createDispatchInitFunction(unsigned IVSize, bool IVSigned); /// Returns __kmpc_dispatch_next_* runtime function for the specified /// size \a IVSize and sign \a IVSigned. llvm::FunctionCallee createDispatchNextFunction(unsigned IVSize, bool IVSigned); /// Returns __kmpc_dispatch_fini_* runtime function for the specified /// size \a IVSize and sign \a IVSigned. llvm::FunctionCallee createDispatchFiniFunction(unsigned IVSize, bool IVSigned); /// If the specified mangled name is not in the module, create and /// return threadprivate cache object. This object is a pointer's worth of /// storage that's reserved for use by the OpenMP runtime. /// \param VD Threadprivate variable. /// \return Cache variable for the specified threadprivate. llvm::Constant *getOrCreateThreadPrivateCache(const VarDecl *VD); /// Gets (if variable with the given name already exist) or creates /// internal global variable with the specified Name. The created variable has /// linkage CommonLinkage by default and is initialized by null value. /// \param Ty Type of the global variable. If it is exist already the type /// must be the same. /// \param Name Name of the variable. llvm::GlobalVariable *getOrCreateInternalVariable(llvm::Type *Ty, const llvm::Twine &Name, unsigned AddressSpace = 0); /// Set of threadprivate variables with the generated initializer. llvm::StringSet<> ThreadPrivateWithDefinition; /// Set of declare target variables with the generated initializer. llvm::StringSet<> DeclareTargetWithDefinition; /// Emits initialization code for the threadprivate variables. /// \param VDAddr Address of the global variable \a VD. /// \param Ctor Pointer to a global init function for \a VD. /// \param CopyCtor Pointer to a global copy function for \a VD. /// \param Dtor Pointer to a global destructor function for \a VD. /// \param Loc Location of threadprivate declaration. void emitThreadPrivateVarInit(CodeGenFunction &CGF, Address VDAddr, llvm::Value *Ctor, llvm::Value *CopyCtor, llvm::Value *Dtor, SourceLocation Loc); /// Emit the array initialization or deletion portion for user-defined mapper /// code generation. void emitUDMapperArrayInitOrDel(CodeGenFunction &MapperCGF, llvm::Value *Handle, llvm::Value *BasePtr, llvm::Value *Ptr, llvm::Value *Size, llvm::Value *MapType, llvm::Value *MapName, CharUnits ElementSize, llvm::BasicBlock *ExitBB, bool IsInit); struct TaskResultTy { llvm::Value *NewTask = nullptr; llvm::Function *TaskEntry = nullptr; llvm::Value *NewTaskNewTaskTTy = nullptr; LValue TDBase; const RecordDecl *KmpTaskTQTyRD = nullptr; llvm::Value *TaskDupFn = nullptr; }; /// Emit task region for the task directive. The task region is emitted in /// several steps: /// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32 /// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds, /// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the /// function: /// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) { /// TaskFunction(gtid, tt->part_id, tt->shareds); /// return 0; /// } /// 2. Copy a list of shared variables to field shareds of the resulting /// structure kmp_task_t returned by the previous call (if any). /// 3. Copy a pointer to destructions function to field destructions of the /// resulting structure kmp_task_t. /// \param D Current task directive. /// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32 /// /*part_id*/, captured_struct */*__context*/); /// \param SharedsTy A type which contains references the shared variables. /// \param Shareds Context with the list of shared variables from the \p /// TaskFunction. /// \param Data Additional data for task generation like tiednsee, final /// state, list of privates etc. TaskResultTy emitTaskInit(CodeGenFunction &CGF, SourceLocation Loc, const OMPExecutableDirective &D, llvm::Function *TaskFunction, QualType SharedsTy, Address Shareds, const OMPTaskDataTy &Data); /// Emit code that pushes the trip count of loops associated with constructs /// 'target teams distribute' and 'teams distribute parallel for'. /// \param SizeEmitter Emits the int64 value for the number of iterations of /// the associated loop. void emitTargetNumIterationsCall( CodeGenFunction &CGF, const OMPExecutableDirective &D, llvm::Value *DeviceID, llvm::function_ref<llvm::Value *(CodeGenFunction &CGF, const OMPLoopDirective &D)> SizeEmitter); /// Emit update for lastprivate conditional data. void emitLastprivateConditionalUpdate(CodeGenFunction &CGF, LValue IVLVal, StringRef UniqueDeclName, LValue LVal, SourceLocation Loc); /// Returns the number of the elements and the address of the depobj /// dependency array. /// \return Number of elements in depobj array and the pointer to the array of /// dependencies. std::pair<llvm::Value *, LValue> getDepobjElements(CodeGenFunction &CGF, LValue DepobjLVal, SourceLocation Loc); public: explicit CGOpenMPRuntime(CodeGenModule &CGM) : CGOpenMPRuntime(CGM, ".", ".") {} virtual ~CGOpenMPRuntime() {} virtual void clear(); /// Emits code for OpenMP 'if' clause using specified \a CodeGen /// function. Here is the logic: /// if (Cond) { /// ThenGen(); /// } else { /// ElseGen(); /// } void emitIfClause(CodeGenFunction &CGF, const Expr *Cond, const RegionCodeGenTy &ThenGen, const RegionCodeGenTy &ElseGen); /// Checks if the \p Body is the \a CompoundStmt and returns its child /// statement iff there is only one that is not evaluatable at the compile /// time. static const Stmt *getSingleCompoundChild(ASTContext &Ctx, const Stmt *Body); /// Get the platform-specific name separator. std::string getName(ArrayRef<StringRef> Parts) const; /// Emit code for the specified user defined reduction construct. virtual void emitUserDefinedReduction(CodeGenFunction *CGF, const OMPDeclareReductionDecl *D); /// Get combiner/initializer for the specified user-defined reduction, if any. virtual std::pair<llvm::Function *, llvm::Function *> getUserDefinedReduction(const OMPDeclareReductionDecl *D); /// Emit the function for the user defined mapper construct. void emitUserDefinedMapper(const OMPDeclareMapperDecl *D, CodeGenFunction *CGF = nullptr); /// Get the function for the specified user-defined mapper. If it does not /// exist, create one. llvm::Function * getOrCreateUserDefinedMapperFunc(const OMPDeclareMapperDecl *D); /// Emits outlined function for the specified OpenMP parallel directive /// \a D. This outlined function has type void(*)(kmp_int32 *ThreadID, /// kmp_int32 BoundID, struct context_vars*). /// \param D OpenMP directive. /// \param ThreadIDVar Variable for thread id in the current OpenMP region. /// \param InnermostKind Kind of innermost directive (for simple directives it /// is a directive itself, for combined - its innermost directive). /// \param CodeGen Code generation sequence for the \a D directive. virtual llvm::Function *emitParallelOutlinedFunction( const OMPExecutableDirective &D, const VarDecl *ThreadIDVar, OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen); /// Emits outlined function for the specified OpenMP teams directive /// \a D. This outlined function has type void(*)(kmp_int32 *ThreadID, /// kmp_int32 BoundID, struct context_vars*). /// \param D OpenMP directive. /// \param ThreadIDVar Variable for thread id in the current OpenMP region. /// \param InnermostKind Kind of innermost directive (for simple directives it /// is a directive itself, for combined - its innermost directive). /// \param CodeGen Code generation sequence for the \a D directive. virtual llvm::Function *emitTeamsOutlinedFunction( const OMPExecutableDirective &D, const VarDecl *ThreadIDVar, OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen); /// Emits outlined function for the OpenMP task directive \a D. This /// outlined function has type void(*)(kmp_int32 ThreadID, struct task_t* /// TaskT). /// \param D OpenMP directive. /// \param ThreadIDVar Variable for thread id in the current OpenMP region. /// \param PartIDVar Variable for partition id in the current OpenMP untied /// task region. /// \param TaskTVar Variable for task_t argument. /// \param InnermostKind Kind of innermost directive (for simple directives it /// is a directive itself, for combined - its innermost directive). /// \param CodeGen Code generation sequence for the \a D directive. /// \param Tied true if task is generated for tied task, false otherwise. /// \param NumberOfParts Number of parts in untied task. Ignored for tied /// tasks. /// virtual llvm::Function *emitTaskOutlinedFunction( const OMPExecutableDirective &D, const VarDecl *ThreadIDVar, const VarDecl *PartIDVar, const VarDecl *TaskTVar, OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen, bool Tied, unsigned &NumberOfParts); /// Cleans up references to the objects in finished function. /// virtual void functionFinished(CodeGenFunction &CGF); /// Emits code for parallel or serial call of the \a OutlinedFn with /// variables captured in a record which address is stored in \a /// CapturedStruct. /// \param OutlinedFn Outlined function to be run in parallel threads. Type of /// this function is void(*)(kmp_int32 *, kmp_int32, struct context_vars*). /// \param CapturedVars A pointer to the record with the references to /// variables used in \a OutlinedFn function. /// \param IfCond Condition in the associated 'if' clause, if it was /// specified, nullptr otherwise. /// \param NumThreads The value corresponding to the num_threads clause, if /// any, or nullptr. /// virtual void emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc, llvm::Function *OutlinedFn, ArrayRef<llvm::Value *> CapturedVars, const Expr *IfCond, llvm::Value *NumThreads); /// Emits a critical region. /// \param CriticalName Name of the critical region. /// \param CriticalOpGen Generator for the statement associated with the given /// critical region. /// \param Hint Value of the 'hint' clause (optional). virtual void emitCriticalRegion(CodeGenFunction &CGF, StringRef CriticalName, const RegionCodeGenTy &CriticalOpGen, SourceLocation Loc, const Expr *Hint = nullptr); /// Emits a master region. /// \param MasterOpGen Generator for the statement associated with the given /// master region. virtual void emitMasterRegion(CodeGenFunction &CGF, const RegionCodeGenTy &MasterOpGen, SourceLocation Loc); /// Emits a masked region. /// \param MaskedOpGen Generator for the statement associated with the given /// masked region. virtual void emitMaskedRegion(CodeGenFunction &CGF, const RegionCodeGenTy &MaskedOpGen, SourceLocation Loc, const Expr *Filter = nullptr); /// Emits code for a taskyield directive. virtual void emitTaskyieldCall(CodeGenFunction &CGF, SourceLocation Loc); /// Emit a taskgroup region. /// \param TaskgroupOpGen Generator for the statement associated with the /// given taskgroup region. virtual void emitTaskgroupRegion(CodeGenFunction &CGF, const RegionCodeGenTy &TaskgroupOpGen, SourceLocation Loc); /// Emits a single region. /// \param SingleOpGen Generator for the statement associated with the given /// single region. virtual void emitSingleRegion(CodeGenFunction &CGF, const RegionCodeGenTy &SingleOpGen, SourceLocation Loc, ArrayRef<const Expr *> CopyprivateVars, ArrayRef<const Expr *> DestExprs, ArrayRef<const Expr *> SrcExprs, ArrayRef<const Expr *> AssignmentOps); /// Emit an ordered region. /// \param OrderedOpGen Generator for the statement associated with the given /// ordered region. virtual void emitOrderedRegion(CodeGenFunction &CGF, const RegionCodeGenTy &OrderedOpGen, SourceLocation Loc, bool IsThreads); /// Emit an implicit/explicit barrier for OpenMP threads. /// \param Kind Directive for which this implicit barrier call must be /// generated. Must be OMPD_barrier for explicit barrier generation. /// \param EmitChecks true if need to emit checks for cancellation barriers. /// \param ForceSimpleCall true simple barrier call must be emitted, false if /// runtime class decides which one to emit (simple or with cancellation /// checks). /// virtual void emitBarrierCall(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind Kind, bool EmitChecks = true, bool ForceSimpleCall = false); /// Check if the specified \a ScheduleKind is static non-chunked. /// This kind of worksharing directive is emitted without outer loop. /// \param ScheduleKind Schedule kind specified in the 'schedule' clause. /// \param Chunked True if chunk is specified in the clause. /// virtual bool isStaticNonchunked(OpenMPScheduleClauseKind ScheduleKind, bool Chunked) const; /// Check if the specified \a ScheduleKind is static non-chunked. /// This kind of distribute directive is emitted without outer loop. /// \param ScheduleKind Schedule kind specified in the 'dist_schedule' clause. /// \param Chunked True if chunk is specified in the clause. /// virtual bool isStaticNonchunked(OpenMPDistScheduleClauseKind ScheduleKind, bool Chunked) const; /// Check if the specified \a ScheduleKind is static chunked. /// \param ScheduleKind Schedule kind specified in the 'schedule' clause. /// \param Chunked True if chunk is specified in the clause. /// virtual bool isStaticChunked(OpenMPScheduleClauseKind ScheduleKind, bool Chunked) const; /// Check if the specified \a ScheduleKind is static non-chunked. /// \param ScheduleKind Schedule kind specified in the 'dist_schedule' clause. /// \param Chunked True if chunk is specified in the clause. /// virtual bool isStaticChunked(OpenMPDistScheduleClauseKind ScheduleKind, bool Chunked) const; /// Check if the specified \a ScheduleKind is dynamic. /// This kind of worksharing directive is emitted without outer loop. /// \param ScheduleKind Schedule Kind specified in the 'schedule' clause. /// virtual bool isDynamic(OpenMPScheduleClauseKind ScheduleKind) const; /// struct with the values to be passed to the dispatch runtime function struct DispatchRTInput { /// Loop lower bound llvm::Value *LB = nullptr; /// Loop upper bound llvm::Value *UB = nullptr; /// Chunk size specified using 'schedule' clause (nullptr if chunk /// was not specified) llvm::Value *Chunk = nullptr; DispatchRTInput() = default; DispatchRTInput(llvm::Value *LB, llvm::Value *UB, llvm::Value *Chunk) : LB(LB), UB(UB), Chunk(Chunk) {} }; /// Call the appropriate runtime routine to initialize it before start /// of loop. /// This is used for non static scheduled types and when the ordered /// clause is present on the loop construct. /// Depending on the loop schedule, it is necessary to call some runtime /// routine before start of the OpenMP loop to get the loop upper / lower /// bounds \a LB and \a UB and stride \a ST. /// /// \param CGF Reference to current CodeGenFunction. /// \param Loc Clang source location. /// \param ScheduleKind Schedule kind, specified by the 'schedule' clause. /// \param IVSize Size of the iteration variable in bits. /// \param IVSigned Sign of the iteration variable. /// \param Ordered true if loop is ordered, false otherwise. /// \param DispatchValues struct containing llvm values for lower bound, upper /// bound, and chunk expression. /// For the default (nullptr) value, the chunk 1 will be used. /// virtual void emitForDispatchInit(CodeGenFunction &CGF, SourceLocation Loc, const OpenMPScheduleTy &ScheduleKind, unsigned IVSize, bool IVSigned, bool Ordered, const DispatchRTInput &DispatchValues); /// Struct with the values to be passed to the static runtime function struct StaticRTInput { /// Size of the iteration variable in bits. unsigned IVSize = 0; /// Sign of the iteration variable. bool IVSigned = false; /// true if loop is ordered, false otherwise. bool Ordered = false; /// Address of the output variable in which the flag of the last iteration /// is returned. Address IL = Address::invalid(); /// Address of the output variable in which the lower iteration number is /// returned. Address LB = Address::invalid(); /// Address of the output variable in which the upper iteration number is /// returned. Address UB = Address::invalid(); /// Address of the output variable in which the stride value is returned /// necessary to generated the static_chunked scheduled loop. Address ST = Address::invalid(); /// Value of the chunk for the static_chunked scheduled loop. For the /// default (nullptr) value, the chunk 1 will be used. llvm::Value *Chunk = nullptr; StaticRTInput(unsigned IVSize, bool IVSigned, bool Ordered, Address IL, Address LB, Address UB, Address ST, llvm::Value *Chunk = nullptr) : IVSize(IVSize), IVSigned(IVSigned), Ordered(Ordered), IL(IL), LB(LB), UB(UB), ST(ST), Chunk(Chunk) {} }; /// Call the appropriate runtime routine to initialize it before start /// of loop. /// /// This is used only in case of static schedule, when the user did not /// specify a ordered clause on the loop construct. /// Depending on the loop schedule, it is necessary to call some runtime /// routine before start of the OpenMP loop to get the loop upper / lower /// bounds LB and UB and stride ST. /// /// \param CGF Reference to current CodeGenFunction. /// \param Loc Clang source location. /// \param DKind Kind of the directive. /// \param ScheduleKind Schedule kind, specified by the 'schedule' clause. /// \param Values Input arguments for the construct. /// virtual void emitForStaticInit(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind DKind, const OpenMPScheduleTy &ScheduleKind, const StaticRTInput &Values); /// /// \param CGF Reference to current CodeGenFunction. /// \param Loc Clang source location. /// \param SchedKind Schedule kind, specified by the 'dist_schedule' clause. /// \param Values Input arguments for the construct. /// virtual void emitDistributeStaticInit(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDistScheduleClauseKind SchedKind, const StaticRTInput &Values); /// Call the appropriate runtime routine to notify that we finished /// iteration of the ordered loop with the dynamic scheduling. /// /// \param CGF Reference to current CodeGenFunction. /// \param Loc Clang source location. /// \param IVSize Size of the iteration variable in bits. /// \param IVSigned Sign of the iteration variable. /// virtual void emitForOrderedIterationEnd(CodeGenFunction &CGF, SourceLocation Loc, unsigned IVSize, bool IVSigned); /// Call the appropriate runtime routine to notify that we finished /// all the work with current loop. /// /// \param CGF Reference to current CodeGenFunction. /// \param Loc Clang source location. /// \param DKind Kind of the directive for which the static finish is emitted. /// virtual void emitForStaticFinish(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind DKind); /// Call __kmpc_dispatch_next( /// ident_t *loc, kmp_int32 tid, kmp_int32 *p_lastiter, /// kmp_int[32|64] *p_lower, kmp_int[32|64] *p_upper, /// kmp_int[32|64] *p_stride); /// \param IVSize Size of the iteration variable in bits. /// \param IVSigned Sign of the iteration variable. /// \param IL Address of the output variable in which the flag of the /// last iteration is returned. /// \param LB Address of the output variable in which the lower iteration /// number is returned. /// \param UB Address of the output variable in which the upper iteration /// number is returned. /// \param ST Address of the output variable in which the stride value is /// returned. virtual llvm::Value *emitForNext(CodeGenFunction &CGF, SourceLocation Loc, unsigned IVSize, bool IVSigned, Address IL, Address LB, Address UB, Address ST); /// Emits call to void __kmpc_push_num_threads(ident_t *loc, kmp_int32 /// global_tid, kmp_int32 num_threads) to generate code for 'num_threads' /// clause. /// \param NumThreads An integer value of threads. virtual void emitNumThreadsClause(CodeGenFunction &CGF, llvm::Value *NumThreads, SourceLocation Loc); /// Emit call to void __kmpc_push_proc_bind(ident_t *loc, kmp_int32 /// global_tid, int proc_bind) to generate code for 'proc_bind' clause. virtual void emitProcBindClause(CodeGenFunction &CGF, llvm::omp::ProcBindKind ProcBind, SourceLocation Loc); /// Returns address of the threadprivate variable for the current /// thread. /// \param VD Threadprivate variable. /// \param VDAddr Address of the global variable \a VD. /// \param Loc Location of the reference to threadprivate var. /// \return Address of the threadprivate variable for the current thread. virtual Address getAddrOfThreadPrivate(CodeGenFunction &CGF, const VarDecl *VD, Address VDAddr, SourceLocation Loc); /// Returns the address of the variable marked as declare target with link /// clause OR as declare target with to clause and unified memory. virtual Address getAddrOfDeclareTargetVar(const VarDecl *VD); /// Emit a code for initialization of threadprivate variable. It emits /// a call to runtime library which adds initial value to the newly created /// threadprivate variable (if it is not constant) and registers destructor /// for the variable (if any). /// \param VD Threadprivate variable. /// \param VDAddr Address of the global variable \a VD. /// \param Loc Location of threadprivate declaration. /// \param PerformInit true if initialization expression is not constant. virtual llvm::Function * emitThreadPrivateVarDefinition(const VarDecl *VD, Address VDAddr, SourceLocation Loc, bool PerformInit, CodeGenFunction *CGF = nullptr); /// Emit a code for initialization of declare target variable. /// \param VD Declare target variable. /// \param Addr Address of the global variable \a VD. /// \param PerformInit true if initialization expression is not constant. virtual bool emitDeclareTargetVarDefinition(const VarDecl *VD, llvm::GlobalVariable *Addr, bool PerformInit); /// Creates artificial threadprivate variable with name \p Name and type \p /// VarType. /// \param VarType Type of the artificial threadprivate variable. /// \param Name Name of the artificial threadprivate variable. virtual Address getAddrOfArtificialThreadPrivate(CodeGenFunction &CGF, QualType VarType, StringRef Name); /// Emit flush of the variables specified in 'omp flush' directive. /// \param Vars List of variables to flush. virtual void emitFlush(CodeGenFunction &CGF, ArrayRef<const Expr *> Vars, SourceLocation Loc, llvm::AtomicOrdering AO); /// Emit task region for the task directive. The task region is /// emitted in several steps: /// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32 /// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds, /// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the /// function: /// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) { /// TaskFunction(gtid, tt->part_id, tt->shareds); /// return 0; /// } /// 2. Copy a list of shared variables to field shareds of the resulting /// structure kmp_task_t returned by the previous call (if any). /// 3. Copy a pointer to destructions function to field destructions of the /// resulting structure kmp_task_t. /// 4. Emit a call to kmp_int32 __kmpc_omp_task(ident_t *, kmp_int32 gtid, /// kmp_task_t *new_task), where new_task is a resulting structure from /// previous items. /// \param D Current task directive. /// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32 /// /*part_id*/, captured_struct */*__context*/); /// \param SharedsTy A type which contains references the shared variables. /// \param Shareds Context with the list of shared variables from the \p /// TaskFunction. /// \param IfCond Not a nullptr if 'if' clause was specified, nullptr /// otherwise. /// \param Data Additional data for task generation like tiednsee, final /// state, list of privates etc. virtual void emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc, const OMPExecutableDirective &D, llvm::Function *TaskFunction, QualType SharedsTy, Address Shareds, const Expr *IfCond, const OMPTaskDataTy &Data); /// Emit task region for the taskloop directive. The taskloop region is /// emitted in several steps: /// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32 /// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds, /// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the /// function: /// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) { /// TaskFunction(gtid, tt->part_id, tt->shareds); /// return 0; /// } /// 2. Copy a list of shared variables to field shareds of the resulting /// structure kmp_task_t returned by the previous call (if any). /// 3. Copy a pointer to destructions function to field destructions of the /// resulting structure kmp_task_t. /// 4. Emit a call to void __kmpc_taskloop(ident_t *loc, int gtid, kmp_task_t /// *task, int if_val, kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, int /// nogroup, int sched, kmp_uint64 grainsize, void *task_dup ), where new_task /// is a resulting structure from /// previous items. /// \param D Current task directive. /// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32 /// /*part_id*/, captured_struct */*__context*/); /// \param SharedsTy A type which contains references the shared variables. /// \param Shareds Context with the list of shared variables from the \p /// TaskFunction. /// \param IfCond Not a nullptr if 'if' clause was specified, nullptr /// otherwise. /// \param Data Additional data for task generation like tiednsee, final /// state, list of privates etc. virtual void emitTaskLoopCall(CodeGenFunction &CGF, SourceLocation Loc, const OMPLoopDirective &D, llvm::Function *TaskFunction, QualType SharedsTy, Address Shareds, const Expr *IfCond, const OMPTaskDataTy &Data); /// Emit code for the directive that does not require outlining. /// /// \param InnermostKind Kind of innermost directive (for simple directives it /// is a directive itself, for combined - its innermost directive). /// \param CodeGen Code generation sequence for the \a D directive. /// \param HasCancel true if region has inner cancel directive, false /// otherwise. virtual void emitInlinedDirective(CodeGenFunction &CGF, OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen, bool HasCancel = false); /// Emits reduction function. /// \param ArgsType Array type containing pointers to reduction variables. /// \param Privates List of private copies for original reduction arguments. /// \param LHSExprs List of LHS in \a ReductionOps reduction operations. /// \param RHSExprs List of RHS in \a ReductionOps reduction operations. /// \param ReductionOps List of reduction operations in form 'LHS binop RHS' /// or 'operator binop(LHS, RHS)'. llvm::Function *emitReductionFunction(SourceLocation Loc, llvm::Type *ArgsType, ArrayRef<const Expr *> Privates, ArrayRef<const Expr *> LHSExprs, ArrayRef<const Expr *> RHSExprs, ArrayRef<const Expr *> ReductionOps); /// Emits single reduction combiner void emitSingleReductionCombiner(CodeGenFunction &CGF, const Expr *ReductionOp, const Expr *PrivateRef, const DeclRefExpr *LHS, const DeclRefExpr *RHS); struct ReductionOptionsTy { bool WithNowait; bool SimpleReduction; OpenMPDirectiveKind ReductionKind; }; /// Emit a code for reduction clause. Next code should be emitted for /// reduction: /// \code /// /// static kmp_critical_name lock = { 0 }; /// /// void reduce_func(void *lhs[<n>], void *rhs[<n>]) { /// ... /// *(Type<i>*)lhs[i] = RedOp<i>(*(Type<i>*)lhs[i], *(Type<i>*)rhs[i]); /// ... /// } /// /// ... /// void *RedList[<n>] = {&<RHSExprs>[0], ..., &<RHSExprs>[<n>-1]}; /// switch (__kmpc_reduce{_nowait}(<loc>, <gtid>, <n>, sizeof(RedList), /// RedList, reduce_func, &<lock>)) { /// case 1: /// ... /// <LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]); /// ... /// __kmpc_end_reduce{_nowait}(<loc>, <gtid>, &<lock>); /// break; /// case 2: /// ... /// Atomic(<LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i])); /// ... /// break; /// default:; /// } /// \endcode /// /// \param Privates List of private copies for original reduction arguments. /// \param LHSExprs List of LHS in \a ReductionOps reduction operations. /// \param RHSExprs List of RHS in \a ReductionOps reduction operations. /// \param ReductionOps List of reduction operations in form 'LHS binop RHS' /// or 'operator binop(LHS, RHS)'. /// \param Options List of options for reduction codegen: /// WithNowait true if parent directive has also nowait clause, false /// otherwise. /// SimpleReduction Emit reduction operation only. Used for omp simd /// directive on the host. /// ReductionKind The kind of reduction to perform. virtual void emitReduction(CodeGenFunction &CGF, SourceLocation Loc, ArrayRef<const Expr *> Privates, ArrayRef<const Expr *> LHSExprs, ArrayRef<const Expr *> RHSExprs, ArrayRef<const Expr *> ReductionOps, ReductionOptionsTy Options); /// Emit a code for initialization of task reduction clause. Next code /// should be emitted for reduction: /// \code /// /// _taskred_item_t red_data[n]; /// ... /// red_data[i].shar = &shareds[i]; /// red_data[i].orig = &origs[i]; /// red_data[i].size = sizeof(origs[i]); /// red_data[i].f_init = (void*)RedInit<i>; /// red_data[i].f_fini = (void*)RedDest<i>; /// red_data[i].f_comb = (void*)RedOp<i>; /// red_data[i].flags = <Flag_i>; /// ... /// void* tg1 = __kmpc_taskred_init(gtid, n, red_data); /// \endcode /// For reduction clause with task modifier it emits the next call: /// \code /// /// _taskred_item_t red_data[n]; /// ... /// red_data[i].shar = &shareds[i]; /// red_data[i].orig = &origs[i]; /// red_data[i].size = sizeof(origs[i]); /// red_data[i].f_init = (void*)RedInit<i>; /// red_data[i].f_fini = (void*)RedDest<i>; /// red_data[i].f_comb = (void*)RedOp<i>; /// red_data[i].flags = <Flag_i>; /// ... /// void* tg1 = __kmpc_taskred_modifier_init(loc, gtid, is_worksharing, n, /// red_data); /// \endcode /// \param LHSExprs List of LHS in \a Data.ReductionOps reduction operations. /// \param RHSExprs List of RHS in \a Data.ReductionOps reduction operations. /// \param Data Additional data for task generation like tiedness, final /// state, list of privates, reductions etc. virtual llvm::Value *emitTaskReductionInit(CodeGenFunction &CGF, SourceLocation Loc, ArrayRef<const Expr *> LHSExprs, ArrayRef<const Expr *> RHSExprs, const OMPTaskDataTy &Data); /// Emits the following code for reduction clause with task modifier: /// \code /// __kmpc_task_reduction_modifier_fini(loc, gtid, is_worksharing); /// \endcode virtual void emitTaskReductionFini(CodeGenFunction &CGF, SourceLocation Loc, bool IsWorksharingReduction); /// Required to resolve existing problems in the runtime. Emits threadprivate /// variables to store the size of the VLAs/array sections for /// initializer/combiner/finalizer functions. /// \param RCG Allows to reuse an existing data for the reductions. /// \param N Reduction item for which fixups must be emitted. virtual void emitTaskReductionFixups(CodeGenFunction &CGF, SourceLocation Loc, ReductionCodeGen &RCG, unsigned N); /// Get the address of `void *` type of the privatue copy of the reduction /// item specified by the \p SharedLVal. /// \param ReductionsPtr Pointer to the reduction data returned by the /// emitTaskReductionInit function. /// \param SharedLVal Address of the original reduction item. virtual Address getTaskReductionItem(CodeGenFunction &CGF, SourceLocation Loc, llvm::Value *ReductionsPtr, LValue SharedLVal); /// Emit code for 'taskwait' directive. virtual void emitTaskwaitCall(CodeGenFunction &CGF, SourceLocation Loc, const OMPTaskDataTy &Data); /// Emit code for 'cancellation point' construct. /// \param CancelRegion Region kind for which the cancellation point must be /// emitted. /// virtual void emitCancellationPointCall(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind CancelRegion); /// Emit code for 'cancel' construct. /// \param IfCond Condition in the associated 'if' clause, if it was /// specified, nullptr otherwise. /// \param CancelRegion Region kind for which the cancel must be emitted. /// virtual void emitCancelCall(CodeGenFunction &CGF, SourceLocation Loc, const Expr *IfCond, OpenMPDirectiveKind CancelRegion); /// Emit outilined function for 'target' directive. /// \param D Directive to emit. /// \param ParentName Name of the function that encloses the target region. /// \param OutlinedFn Outlined function value to be defined by this call. /// \param OutlinedFnID Outlined function ID value to be defined by this call. /// \param IsOffloadEntry True if the outlined function is an offload entry. /// \param CodeGen Code generation sequence for the \a D directive. /// An outlined function may not be an entry if, e.g. the if clause always /// evaluates to false. virtual void emitTargetOutlinedFunction(const OMPExecutableDirective &D, StringRef ParentName, llvm::Function *&OutlinedFn, llvm::Constant *&OutlinedFnID, bool IsOffloadEntry, const RegionCodeGenTy &CodeGen); /// Emit the target offloading code associated with \a D. The emitted /// code attempts offloading the execution to the device, an the event of /// a failure it executes the host version outlined in \a OutlinedFn. /// \param D Directive to emit. /// \param OutlinedFn Host version of the code to be offloaded. /// \param OutlinedFnID ID of host version of the code to be offloaded. /// \param IfCond Expression evaluated in if clause associated with the target /// directive, or null if no if clause is used. /// \param Device Expression evaluated in device clause associated with the /// target directive, or null if no device clause is used and device modifier. /// \param SizeEmitter Callback to emit number of iterations for loop-based /// directives. virtual void emitTargetCall( CodeGenFunction &CGF, const OMPExecutableDirective &D, llvm::Function *OutlinedFn, llvm::Value *OutlinedFnID, const Expr *IfCond, llvm::PointerIntPair<const Expr *, 2, OpenMPDeviceClauseModifier> Device, llvm::function_ref<llvm::Value *(CodeGenFunction &CGF, const OMPLoopDirective &D)> SizeEmitter); /// Emit the target regions enclosed in \a GD function definition or /// the function itself in case it is a valid device function. Returns true if /// \a GD was dealt with successfully. /// \param GD Function to scan. virtual bool emitTargetFunctions(GlobalDecl GD); /// Emit the global variable if it is a valid device global variable. /// Returns true if \a GD was dealt with successfully. /// \param GD Variable declaration to emit. virtual bool emitTargetGlobalVariable(GlobalDecl GD); /// Checks if the provided global decl \a GD is a declare target variable and /// registers it when emitting code for the host. virtual void registerTargetGlobalVariable(const VarDecl *VD, llvm::Constant *Addr); /// Emit the global \a GD if it is meaningful for the target. Returns /// if it was emitted successfully. /// \param GD Global to scan. virtual bool emitTargetGlobal(GlobalDecl GD); /// Creates and returns a registration function for when at least one /// requires directives was used in the current module. llvm::Function *emitRequiresDirectiveRegFun(); /// Creates all the offload entries in the current compilation unit /// along with the associated metadata. void createOffloadEntriesAndInfoMetadata(); /// Emits code for teams call of the \a OutlinedFn with /// variables captured in a record which address is stored in \a /// CapturedStruct. /// \param OutlinedFn Outlined function to be run by team masters. Type of /// this function is void(*)(kmp_int32 *, kmp_int32, struct context_vars*). /// \param CapturedVars A pointer to the record with the references to /// variables used in \a OutlinedFn function. /// virtual void emitTeamsCall(CodeGenFunction &CGF, const OMPExecutableDirective &D, SourceLocation Loc, llvm::Function *OutlinedFn, ArrayRef<llvm::Value *> CapturedVars); /// Emits call to void __kmpc_push_num_teams(ident_t *loc, kmp_int32 /// global_tid, kmp_int32 num_teams, kmp_int32 thread_limit) to generate code /// for num_teams clause. /// \param NumTeams An integer expression of teams. /// \param ThreadLimit An integer expression of threads. virtual void emitNumTeamsClause(CodeGenFunction &CGF, const Expr *NumTeams, const Expr *ThreadLimit, SourceLocation Loc); /// Struct that keeps all the relevant information that should be kept /// throughout a 'target data' region. class TargetDataInfo { /// Set to true if device pointer information have to be obtained. bool RequiresDevicePointerInfo = false; /// Set to true if Clang emits separate runtime calls for the beginning and /// end of the region. These calls might have separate map type arrays. bool SeparateBeginEndCalls = false; public: /// The array of base pointer passed to the runtime library. llvm::Value *BasePointersArray = nullptr; /// The array of section pointers passed to the runtime library. llvm::Value *PointersArray = nullptr; /// The array of sizes passed to the runtime library. llvm::Value *SizesArray = nullptr; /// The array of map types passed to the runtime library for the beginning /// of the region or for the entire region if there are no separate map /// types for the region end. llvm::Value *MapTypesArray = nullptr; /// The array of map types passed to the runtime library for the end of the /// region, or nullptr if there are no separate map types for the region /// end. llvm::Value *MapTypesArrayEnd = nullptr; /// The array of user-defined mappers passed to the runtime library. llvm::Value *MappersArray = nullptr; /// The array of original declaration names of mapped pointers sent to the /// runtime library for debugging llvm::Value *MapNamesArray = nullptr; /// Indicate whether any user-defined mapper exists. bool HasMapper = false; /// The total number of pointers passed to the runtime library. unsigned NumberOfPtrs = 0u; /// Map between the a declaration of a capture and the corresponding base /// pointer address where the runtime returns the device pointers. llvm::DenseMap<const ValueDecl *, Address> CaptureDeviceAddrMap; explicit TargetDataInfo() {} explicit TargetDataInfo(bool RequiresDevicePointerInfo, bool SeparateBeginEndCalls) : RequiresDevicePointerInfo(RequiresDevicePointerInfo), SeparateBeginEndCalls(SeparateBeginEndCalls) {} /// Clear information about the data arrays. void clearArrayInfo() { BasePointersArray = nullptr; PointersArray = nullptr; SizesArray = nullptr; MapTypesArray = nullptr; MapTypesArrayEnd = nullptr; MapNamesArray = nullptr; MappersArray = nullptr; HasMapper = false; NumberOfPtrs = 0u; } /// Return true if the current target data information has valid arrays. bool isValid() { return BasePointersArray && PointersArray && SizesArray && MapTypesArray && (!HasMapper || MappersArray) && NumberOfPtrs; } bool requiresDevicePointerInfo() { return RequiresDevicePointerInfo; } bool separateBeginEndCalls() { return SeparateBeginEndCalls; } }; /// Emit the target data mapping code associated with \a D. /// \param D Directive to emit. /// \param IfCond Expression evaluated in if clause associated with the /// target directive, or null if no device clause is used. /// \param Device Expression evaluated in device clause associated with the /// target directive, or null if no device clause is used. /// \param Info A record used to store information that needs to be preserved /// until the region is closed. virtual void emitTargetDataCalls(CodeGenFunction &CGF, const OMPExecutableDirective &D, const Expr *IfCond, const Expr *Device, const RegionCodeGenTy &CodeGen, TargetDataInfo &Info); /// Emit the data mapping/movement code associated with the directive /// \a D that should be of the form 'target [{enter|exit} data | update]'. /// \param D Directive to emit. /// \param IfCond Expression evaluated in if clause associated with the target /// directive, or null if no if clause is used. /// \param Device Expression evaluated in device clause associated with the /// target directive, or null if no device clause is used. virtual void emitTargetDataStandAloneCall(CodeGenFunction &CGF, const OMPExecutableDirective &D, const Expr *IfCond, const Expr *Device); /// Marks function \a Fn with properly mangled versions of vector functions. /// \param FD Function marked as 'declare simd'. /// \param Fn LLVM function that must be marked with 'declare simd' /// attributes. virtual void emitDeclareSimdFunction(const FunctionDecl *FD, llvm::Function *Fn); /// Emit initialization for doacross loop nesting support. /// \param D Loop-based construct used in doacross nesting construct. virtual void emitDoacrossInit(CodeGenFunction &CGF, const OMPLoopDirective &D, ArrayRef<Expr *> NumIterations); /// Emit code for doacross ordered directive with 'depend' clause. /// \param C 'depend' clause with 'sink|source' dependency kind. virtual void emitDoacrossOrdered(CodeGenFunction &CGF, const OMPDependClause *C); /// Translates the native parameter of outlined function if this is required /// for target. /// \param FD Field decl from captured record for the parameter. /// \param NativeParam Parameter itself. virtual const VarDecl *translateParameter(const FieldDecl *FD, const VarDecl *NativeParam) const { return NativeParam; } /// Gets the address of the native argument basing on the address of the /// target-specific parameter. /// \param NativeParam Parameter itself. /// \param TargetParam Corresponding target-specific parameter. virtual Address getParameterAddress(CodeGenFunction &CGF, const VarDecl *NativeParam, const VarDecl *TargetParam) const; /// Choose default schedule type and chunk value for the /// dist_schedule clause. virtual void getDefaultDistScheduleAndChunk(CodeGenFunction &CGF, const OMPLoopDirective &S, OpenMPDistScheduleClauseKind &ScheduleKind, llvm::Value *&Chunk) const {} /// Choose default schedule type and chunk value for the /// schedule clause. virtual void getDefaultScheduleAndChunk(CodeGenFunction &CGF, const OMPLoopDirective &S, OpenMPScheduleClauseKind &ScheduleKind, const Expr *&ChunkExpr) const; /// Emits call of the outlined function with the provided arguments, /// translating these arguments to correct target-specific arguments. virtual void emitOutlinedFunctionCall(CodeGenFunction &CGF, SourceLocation Loc, llvm::FunctionCallee OutlinedFn, ArrayRef<llvm::Value *> Args = llvm::None) const; /// Emits OpenMP-specific function prolog. /// Required for device constructs. virtual void emitFunctionProlog(CodeGenFunction &CGF, const Decl *D); /// Gets the OpenMP-specific address of the local variable. virtual Address getAddressOfLocalVariable(CodeGenFunction &CGF, const VarDecl *VD); /// Marks the declaration as already emitted for the device code and returns /// true, if it was marked already, and false, otherwise. bool markAsGlobalTarget(GlobalDecl GD); /// Emit deferred declare target variables marked for deferred emission. void emitDeferredTargetDecls() const; /// Adjust some parameters for the target-based directives, like addresses of /// the variables captured by reference in lambdas. virtual void adjustTargetSpecificDataForLambdas(CodeGenFunction &CGF, const OMPExecutableDirective &D) const; /// Perform check on requires decl to ensure that target architecture /// supports unified addressing virtual void processRequiresDirective(const OMPRequiresDecl *D); /// Gets default memory ordering as specified in requires directive. llvm::AtomicOrdering getDefaultMemoryOrdering() const; /// Checks if the variable has associated OMPAllocateDeclAttr attribute with /// the predefined allocator and translates it into the corresponding address /// space. virtual bool hasAllocateAttributeForGlobalVar(const VarDecl *VD, LangAS &AS); /// Return whether the unified_shared_memory has been specified. bool hasRequiresUnifiedSharedMemory() const; /// Checks if the \p VD variable is marked as nontemporal declaration in /// current context. bool isNontemporalDecl(const ValueDecl *VD) const; /// Create specialized alloca to handle lastprivate conditionals. Address emitLastprivateConditionalInit(CodeGenFunction &CGF, const VarDecl *VD); /// Checks if the provided \p LVal is lastprivate conditional and emits the /// code to update the value of the original variable. /// \code /// lastprivate(conditional: a) /// ... /// <type> a; /// lp_a = ...; /// #pragma omp critical(a) /// if (last_iv_a <= iv) { /// last_iv_a = iv; /// global_a = lp_a; /// } /// \endcode virtual void checkAndEmitLastprivateConditional(CodeGenFunction &CGF, const Expr *LHS); /// Checks if the lastprivate conditional was updated in inner region and /// writes the value. /// \code /// lastprivate(conditional: a) /// ... /// <type> a;bool Fired = false; /// #pragma omp ... shared(a) /// { /// lp_a = ...; /// Fired = true; /// } /// if (Fired) { /// #pragma omp critical(a) /// if (last_iv_a <= iv) { /// last_iv_a = iv; /// global_a = lp_a; /// } /// Fired = false; /// } /// \endcode virtual void checkAndEmitSharedLastprivateConditional( CodeGenFunction &CGF, const OMPExecutableDirective &D, const llvm::DenseSet<CanonicalDeclPtr<const VarDecl>> &IgnoredDecls); /// Gets the address of the global copy used for lastprivate conditional /// update, if any. /// \param PrivLVal LValue for the private copy. /// \param VD Original lastprivate declaration. virtual void emitLastprivateConditionalFinalUpdate(CodeGenFunction &CGF, LValue PrivLVal, const VarDecl *VD, SourceLocation Loc); /// Emits list of dependecies based on the provided data (array of /// dependence/expression pairs). /// \returns Pointer to the first element of the array casted to VoidPtr type. std::pair<llvm::Value *, Address> emitDependClause(CodeGenFunction &CGF, ArrayRef<OMPTaskDataTy::DependData> Dependencies, SourceLocation Loc); /// Emits list of dependecies based on the provided data (array of /// dependence/expression pairs) for depobj construct. In this case, the /// variable is allocated in dynamically. \returns Pointer to the first /// element of the array casted to VoidPtr type. Address emitDepobjDependClause(CodeGenFunction &CGF, const OMPTaskDataTy::DependData &Dependencies, SourceLocation Loc); /// Emits the code to destroy the dependency object provided in depobj /// directive. void emitDestroyClause(CodeGenFunction &CGF, LValue DepobjLVal, SourceLocation Loc); /// Updates the dependency kind in the specified depobj object. /// \param DepobjLVal LValue for the main depobj object. /// \param NewDepKind New dependency kind. void emitUpdateClause(CodeGenFunction &CGF, LValue DepobjLVal, OpenMPDependClauseKind NewDepKind, SourceLocation Loc); /// Initializes user defined allocators specified in the uses_allocators /// clauses. void emitUsesAllocatorsInit(CodeGenFunction &CGF, const Expr *Allocator, const Expr *AllocatorTraits); /// Destroys user defined allocators specified in the uses_allocators clause. void emitUsesAllocatorsFini(CodeGenFunction &CGF, const Expr *Allocator); /// Returns true if the variable is a local variable in untied task. bool isLocalVarInUntiedTask(CodeGenFunction &CGF, const VarDecl *VD) const; }; /// Class supports emissionof SIMD-only code. class CGOpenMPSIMDRuntime final : public CGOpenMPRuntime { public: explicit CGOpenMPSIMDRuntime(CodeGenModule &CGM) : CGOpenMPRuntime(CGM) {} ~CGOpenMPSIMDRuntime() override {} /// Emits outlined function for the specified OpenMP parallel directive /// \a D. This outlined function has type void(*)(kmp_int32 *ThreadID, /// kmp_int32 BoundID, struct context_vars*). /// \param D OpenMP directive. /// \param ThreadIDVar Variable for thread id in the current OpenMP region. /// \param InnermostKind Kind of innermost directive (for simple directives it /// is a directive itself, for combined - its innermost directive). /// \param CodeGen Code generation sequence for the \a D directive. llvm::Function * emitParallelOutlinedFunction(const OMPExecutableDirective &D, const VarDecl *ThreadIDVar, OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) override; /// Emits outlined function for the specified OpenMP teams directive /// \a D. This outlined function has type void(*)(kmp_int32 *ThreadID, /// kmp_int32 BoundID, struct context_vars*). /// \param D OpenMP directive. /// \param ThreadIDVar Variable for thread id in the current OpenMP region. /// \param InnermostKind Kind of innermost directive (for simple directives it /// is a directive itself, for combined - its innermost directive). /// \param CodeGen Code generation sequence for the \a D directive. llvm::Function * emitTeamsOutlinedFunction(const OMPExecutableDirective &D, const VarDecl *ThreadIDVar, OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) override; /// Emits outlined function for the OpenMP task directive \a D. This /// outlined function has type void(*)(kmp_int32 ThreadID, struct task_t* /// TaskT). /// \param D OpenMP directive. /// \param ThreadIDVar Variable for thread id in the current OpenMP region. /// \param PartIDVar Variable for partition id in the current OpenMP untied /// task region. /// \param TaskTVar Variable for task_t argument. /// \param InnermostKind Kind of innermost directive (for simple directives it /// is a directive itself, for combined - its innermost directive). /// \param CodeGen Code generation sequence for the \a D directive. /// \param Tied true if task is generated for tied task, false otherwise. /// \param NumberOfParts Number of parts in untied task. Ignored for tied /// tasks. /// llvm::Function *emitTaskOutlinedFunction( const OMPExecutableDirective &D, const VarDecl *ThreadIDVar, const VarDecl *PartIDVar, const VarDecl *TaskTVar, OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen, bool Tied, unsigned &NumberOfParts) override; /// Emits code for parallel or serial call of the \a OutlinedFn with /// variables captured in a record which address is stored in \a /// CapturedStruct. /// \param OutlinedFn Outlined function to be run in parallel threads. Type of /// this function is void(*)(kmp_int32 *, kmp_int32, struct context_vars*). /// \param CapturedVars A pointer to the record with the references to /// variables used in \a OutlinedFn function. /// \param IfCond Condition in the associated 'if' clause, if it was /// specified, nullptr otherwise. /// \param NumThreads The value corresponding to the num_threads clause, if /// any, or nullptr. /// void emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc, llvm::Function *OutlinedFn, ArrayRef<llvm::Value *> CapturedVars, const Expr *IfCond, llvm::Value *NumThreads) override; /// Emits a critical region. /// \param CriticalName Name of the critical region. /// \param CriticalOpGen Generator for the statement associated with the given /// critical region. /// \param Hint Value of the 'hint' clause (optional). void emitCriticalRegion(CodeGenFunction &CGF, StringRef CriticalName, const RegionCodeGenTy &CriticalOpGen, SourceLocation Loc, const Expr *Hint = nullptr) override; /// Emits a master region. /// \param MasterOpGen Generator for the statement associated with the given /// master region. void emitMasterRegion(CodeGenFunction &CGF, const RegionCodeGenTy &MasterOpGen, SourceLocation Loc) override; /// Emits a masked region. /// \param MaskedOpGen Generator for the statement associated with the given /// masked region. void emitMaskedRegion(CodeGenFunction &CGF, const RegionCodeGenTy &MaskedOpGen, SourceLocation Loc, const Expr *Filter = nullptr) override; /// Emits a masked region. /// \param MaskedOpGen Generator for the statement associated with the given /// masked region. /// Emits code for a taskyield directive. void emitTaskyieldCall(CodeGenFunction &CGF, SourceLocation Loc) override; /// Emit a taskgroup region. /// \param TaskgroupOpGen Generator for the statement associated with the /// given taskgroup region. void emitTaskgroupRegion(CodeGenFunction &CGF, const RegionCodeGenTy &TaskgroupOpGen, SourceLocation Loc) override; /// Emits a single region. /// \param SingleOpGen Generator for the statement associated with the given /// single region. void emitSingleRegion(CodeGenFunction &CGF, const RegionCodeGenTy &SingleOpGen, SourceLocation Loc, ArrayRef<const Expr *> CopyprivateVars, ArrayRef<const Expr *> DestExprs, ArrayRef<const Expr *> SrcExprs, ArrayRef<const Expr *> AssignmentOps) override; /// Emit an ordered region. /// \param OrderedOpGen Generator for the statement associated with the given /// ordered region. void emitOrderedRegion(CodeGenFunction &CGF, const RegionCodeGenTy &OrderedOpGen, SourceLocation Loc, bool IsThreads) override; /// Emit an implicit/explicit barrier for OpenMP threads. /// \param Kind Directive for which this implicit barrier call must be /// generated. Must be OMPD_barrier for explicit barrier generation. /// \param EmitChecks true if need to emit checks for cancellation barriers. /// \param ForceSimpleCall true simple barrier call must be emitted, false if /// runtime class decides which one to emit (simple or with cancellation /// checks). /// void emitBarrierCall(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind Kind, bool EmitChecks = true, bool ForceSimpleCall = false) override; /// This is used for non static scheduled types and when the ordered /// clause is present on the loop construct. /// Depending on the loop schedule, it is necessary to call some runtime /// routine before start of the OpenMP loop to get the loop upper / lower /// bounds \a LB and \a UB and stride \a ST. /// /// \param CGF Reference to current CodeGenFunction. /// \param Loc Clang source location. /// \param ScheduleKind Schedule kind, specified by the 'schedule' clause. /// \param IVSize Size of the iteration variable in bits. /// \param IVSigned Sign of the iteration variable. /// \param Ordered true if loop is ordered, false otherwise. /// \param DispatchValues struct containing llvm values for lower bound, upper /// bound, and chunk expression. /// For the default (nullptr) value, the chunk 1 will be used. /// void emitForDispatchInit(CodeGenFunction &CGF, SourceLocation Loc, const OpenMPScheduleTy &ScheduleKind, unsigned IVSize, bool IVSigned, bool Ordered, const DispatchRTInput &DispatchValues) override; /// Call the appropriate runtime routine to initialize it before start /// of loop. /// /// This is used only in case of static schedule, when the user did not /// specify a ordered clause on the loop construct. /// Depending on the loop schedule, it is necessary to call some runtime /// routine before start of the OpenMP loop to get the loop upper / lower /// bounds LB and UB and stride ST. /// /// \param CGF Reference to current CodeGenFunction. /// \param Loc Clang source location. /// \param DKind Kind of the directive. /// \param ScheduleKind Schedule kind, specified by the 'schedule' clause. /// \param Values Input arguments for the construct. /// void emitForStaticInit(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind DKind, const OpenMPScheduleTy &ScheduleKind, const StaticRTInput &Values) override; /// /// \param CGF Reference to current CodeGenFunction. /// \param Loc Clang source location. /// \param SchedKind Schedule kind, specified by the 'dist_schedule' clause. /// \param Values Input arguments for the construct. /// void emitDistributeStaticInit(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDistScheduleClauseKind SchedKind, const StaticRTInput &Values) override; /// Call the appropriate runtime routine to notify that we finished /// iteration of the ordered loop with the dynamic scheduling. /// /// \param CGF Reference to current CodeGenFunction. /// \param Loc Clang source location. /// \param IVSize Size of the iteration variable in bits. /// \param IVSigned Sign of the iteration variable. /// void emitForOrderedIterationEnd(CodeGenFunction &CGF, SourceLocation Loc, unsigned IVSize, bool IVSigned) override; /// Call the appropriate runtime routine to notify that we finished /// all the work with current loop. /// /// \param CGF Reference to current CodeGenFunction. /// \param Loc Clang source location. /// \param DKind Kind of the directive for which the static finish is emitted. /// void emitForStaticFinish(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind DKind) override; /// Call __kmpc_dispatch_next( /// ident_t *loc, kmp_int32 tid, kmp_int32 *p_lastiter, /// kmp_int[32|64] *p_lower, kmp_int[32|64] *p_upper, /// kmp_int[32|64] *p_stride); /// \param IVSize Size of the iteration variable in bits. /// \param IVSigned Sign of the iteration variable. /// \param IL Address of the output variable in which the flag of the /// last iteration is returned. /// \param LB Address of the output variable in which the lower iteration /// number is returned. /// \param UB Address of the output variable in which the upper iteration /// number is returned. /// \param ST Address of the output variable in which the stride value is /// returned. llvm::Value *emitForNext(CodeGenFunction &CGF, SourceLocation Loc, unsigned IVSize, bool IVSigned, Address IL, Address LB, Address UB, Address ST) override; /// Emits call to void __kmpc_push_num_threads(ident_t *loc, kmp_int32 /// global_tid, kmp_int32 num_threads) to generate code for 'num_threads' /// clause. /// \param NumThreads An integer value of threads. void emitNumThreadsClause(CodeGenFunction &CGF, llvm::Value *NumThreads, SourceLocation Loc) override; /// Emit call to void __kmpc_push_proc_bind(ident_t *loc, kmp_int32 /// global_tid, int proc_bind) to generate code for 'proc_bind' clause. void emitProcBindClause(CodeGenFunction &CGF, llvm::omp::ProcBindKind ProcBind, SourceLocation Loc) override; /// Returns address of the threadprivate variable for the current /// thread. /// \param VD Threadprivate variable. /// \param VDAddr Address of the global variable \a VD. /// \param Loc Location of the reference to threadprivate var. /// \return Address of the threadprivate variable for the current thread. Address getAddrOfThreadPrivate(CodeGenFunction &CGF, const VarDecl *VD, Address VDAddr, SourceLocation Loc) override; /// Emit a code for initialization of threadprivate variable. It emits /// a call to runtime library which adds initial value to the newly created /// threadprivate variable (if it is not constant) and registers destructor /// for the variable (if any). /// \param VD Threadprivate variable. /// \param VDAddr Address of the global variable \a VD. /// \param Loc Location of threadprivate declaration. /// \param PerformInit true if initialization expression is not constant. llvm::Function * emitThreadPrivateVarDefinition(const VarDecl *VD, Address VDAddr, SourceLocation Loc, bool PerformInit, CodeGenFunction *CGF = nullptr) override; /// Creates artificial threadprivate variable with name \p Name and type \p /// VarType. /// \param VarType Type of the artificial threadprivate variable. /// \param Name Name of the artificial threadprivate variable. Address getAddrOfArtificialThreadPrivate(CodeGenFunction &CGF, QualType VarType, StringRef Name) override; /// Emit flush of the variables specified in 'omp flush' directive. /// \param Vars List of variables to flush. void emitFlush(CodeGenFunction &CGF, ArrayRef<const Expr *> Vars, SourceLocation Loc, llvm::AtomicOrdering AO) override; /// Emit task region for the task directive. The task region is /// emitted in several steps: /// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32 /// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds, /// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the /// function: /// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) { /// TaskFunction(gtid, tt->part_id, tt->shareds); /// return 0; /// } /// 2. Copy a list of shared variables to field shareds of the resulting /// structure kmp_task_t returned by the previous call (if any). /// 3. Copy a pointer to destructions function to field destructions of the /// resulting structure kmp_task_t. /// 4. Emit a call to kmp_int32 __kmpc_omp_task(ident_t *, kmp_int32 gtid, /// kmp_task_t *new_task), where new_task is a resulting structure from /// previous items. /// \param D Current task directive. /// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32 /// /*part_id*/, captured_struct */*__context*/); /// \param SharedsTy A type which contains references the shared variables. /// \param Shareds Context with the list of shared variables from the \p /// TaskFunction. /// \param IfCond Not a nullptr if 'if' clause was specified, nullptr /// otherwise. /// \param Data Additional data for task generation like tiednsee, final /// state, list of privates etc. void emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc, const OMPExecutableDirective &D, llvm::Function *TaskFunction, QualType SharedsTy, Address Shareds, const Expr *IfCond, const OMPTaskDataTy &Data) override; /// Emit task region for the taskloop directive. The taskloop region is /// emitted in several steps: /// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32 /// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds, /// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the /// function: /// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) { /// TaskFunction(gtid, tt->part_id, tt->shareds); /// return 0; /// } /// 2. Copy a list of shared variables to field shareds of the resulting /// structure kmp_task_t returned by the previous call (if any). /// 3. Copy a pointer to destructions function to field destructions of the /// resulting structure kmp_task_t. /// 4. Emit a call to void __kmpc_taskloop(ident_t *loc, int gtid, kmp_task_t /// *task, int if_val, kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, int /// nogroup, int sched, kmp_uint64 grainsize, void *task_dup ), where new_task /// is a resulting structure from /// previous items. /// \param D Current task directive. /// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32 /// /*part_id*/, captured_struct */*__context*/); /// \param SharedsTy A type which contains references the shared variables. /// \param Shareds Context with the list of shared variables from the \p /// TaskFunction. /// \param IfCond Not a nullptr if 'if' clause was specified, nullptr /// otherwise. /// \param Data Additional data for task generation like tiednsee, final /// state, list of privates etc. void emitTaskLoopCall(CodeGenFunction &CGF, SourceLocation Loc, const OMPLoopDirective &D, llvm::Function *TaskFunction, QualType SharedsTy, Address Shareds, const Expr *IfCond, const OMPTaskDataTy &Data) override; /// Emit a code for reduction clause. Next code should be emitted for /// reduction: /// \code /// /// static kmp_critical_name lock = { 0 }; /// /// void reduce_func(void *lhs[<n>], void *rhs[<n>]) { /// ... /// *(Type<i>*)lhs[i] = RedOp<i>(*(Type<i>*)lhs[i], *(Type<i>*)rhs[i]); /// ... /// } /// /// ... /// void *RedList[<n>] = {&<RHSExprs>[0], ..., &<RHSExprs>[<n>-1]}; /// switch (__kmpc_reduce{_nowait}(<loc>, <gtid>, <n>, sizeof(RedList), /// RedList, reduce_func, &<lock>)) { /// case 1: /// ... /// <LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]); /// ... /// __kmpc_end_reduce{_nowait}(<loc>, <gtid>, &<lock>); /// break; /// case 2: /// ... /// Atomic(<LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i])); /// ... /// break; /// default:; /// } /// \endcode /// /// \param Privates List of private copies for original reduction arguments. /// \param LHSExprs List of LHS in \a ReductionOps reduction operations. /// \param RHSExprs List of RHS in \a ReductionOps reduction operations. /// \param ReductionOps List of reduction operations in form 'LHS binop RHS' /// or 'operator binop(LHS, RHS)'. /// \param Options List of options for reduction codegen: /// WithNowait true if parent directive has also nowait clause, false /// otherwise. /// SimpleReduction Emit reduction operation only. Used for omp simd /// directive on the host. /// ReductionKind The kind of reduction to perform. void emitReduction(CodeGenFunction &CGF, SourceLocation Loc, ArrayRef<const Expr *> Privates, ArrayRef<const Expr *> LHSExprs, ArrayRef<const Expr *> RHSExprs, ArrayRef<const Expr *> ReductionOps, ReductionOptionsTy Options) override; /// Emit a code for initialization of task reduction clause. Next code /// should be emitted for reduction: /// \code /// /// _taskred_item_t red_data[n]; /// ... /// red_data[i].shar = &shareds[i]; /// red_data[i].orig = &origs[i]; /// red_data[i].size = sizeof(origs[i]); /// red_data[i].f_init = (void*)RedInit<i>; /// red_data[i].f_fini = (void*)RedDest<i>; /// red_data[i].f_comb = (void*)RedOp<i>; /// red_data[i].flags = <Flag_i>; /// ... /// void* tg1 = __kmpc_taskred_init(gtid, n, red_data); /// \endcode /// For reduction clause with task modifier it emits the next call: /// \code /// /// _taskred_item_t red_data[n]; /// ... /// red_data[i].shar = &shareds[i]; /// red_data[i].orig = &origs[i]; /// red_data[i].size = sizeof(origs[i]); /// red_data[i].f_init = (void*)RedInit<i>; /// red_data[i].f_fini = (void*)RedDest<i>; /// red_data[i].f_comb = (void*)RedOp<i>; /// red_data[i].flags = <Flag_i>; /// ... /// void* tg1 = __kmpc_taskred_modifier_init(loc, gtid, is_worksharing, n, /// red_data); /// \endcode /// \param LHSExprs List of LHS in \a Data.ReductionOps reduction operations. /// \param RHSExprs List of RHS in \a Data.ReductionOps reduction operations. /// \param Data Additional data for task generation like tiedness, final /// state, list of privates, reductions etc. llvm::Value *emitTaskReductionInit(CodeGenFunction &CGF, SourceLocation Loc, ArrayRef<const Expr *> LHSExprs, ArrayRef<const Expr *> RHSExprs, const OMPTaskDataTy &Data) override; /// Emits the following code for reduction clause with task modifier: /// \code /// __kmpc_task_reduction_modifier_fini(loc, gtid, is_worksharing); /// \endcode void emitTaskReductionFini(CodeGenFunction &CGF, SourceLocation Loc, bool IsWorksharingReduction) override; /// Required to resolve existing problems in the runtime. Emits threadprivate /// variables to store the size of the VLAs/array sections for /// initializer/combiner/finalizer functions + emits threadprivate variable to /// store the pointer to the original reduction item for the custom /// initializer defined by declare reduction construct. /// \param RCG Allows to reuse an existing data for the reductions. /// \param N Reduction item for which fixups must be emitted. void emitTaskReductionFixups(CodeGenFunction &CGF, SourceLocation Loc, ReductionCodeGen &RCG, unsigned N) override; /// Get the address of `void *` type of the privatue copy of the reduction /// item specified by the \p SharedLVal. /// \param ReductionsPtr Pointer to the reduction data returned by the /// emitTaskReductionInit function. /// \param SharedLVal Address of the original reduction item. Address getTaskReductionItem(CodeGenFunction &CGF, SourceLocation Loc, llvm::Value *ReductionsPtr, LValue SharedLVal) override; /// Emit code for 'taskwait' directive. void emitTaskwaitCall(CodeGenFunction &CGF, SourceLocation Loc, const OMPTaskDataTy &Data) override; /// Emit code for 'cancellation point' construct. /// \param CancelRegion Region kind for which the cancellation point must be /// emitted. /// void emitCancellationPointCall(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind CancelRegion) override; /// Emit code for 'cancel' construct. /// \param IfCond Condition in the associated 'if' clause, if it was /// specified, nullptr otherwise. /// \param CancelRegion Region kind for which the cancel must be emitted. /// void emitCancelCall(CodeGenFunction &CGF, SourceLocation Loc, const Expr *IfCond, OpenMPDirectiveKind CancelRegion) override; /// Emit outilined function for 'target' directive. /// \param D Directive to emit. /// \param ParentName Name of the function that encloses the target region. /// \param OutlinedFn Outlined function value to be defined by this call. /// \param OutlinedFnID Outlined function ID value to be defined by this call. /// \param IsOffloadEntry True if the outlined function is an offload entry. /// \param CodeGen Code generation sequence for the \a D directive. /// An outlined function may not be an entry if, e.g. the if clause always /// evaluates to false. void emitTargetOutlinedFunction(const OMPExecutableDirective &D, StringRef ParentName, llvm::Function *&OutlinedFn, llvm::Constant *&OutlinedFnID, bool IsOffloadEntry, const RegionCodeGenTy &CodeGen) override; /// Emit the target offloading code associated with \a D. The emitted /// code attempts offloading the execution to the device, an the event of /// a failure it executes the host version outlined in \a OutlinedFn. /// \param D Directive to emit. /// \param OutlinedFn Host version of the code to be offloaded. /// \param OutlinedFnID ID of host version of the code to be offloaded. /// \param IfCond Expression evaluated in if clause associated with the target /// directive, or null if no if clause is used. /// \param Device Expression evaluated in device clause associated with the /// target directive, or null if no device clause is used and device modifier. void emitTargetCall( CodeGenFunction &CGF, const OMPExecutableDirective &D, llvm::Function *OutlinedFn, llvm::Value *OutlinedFnID, const Expr *IfCond, llvm::PointerIntPair<const Expr *, 2, OpenMPDeviceClauseModifier> Device, llvm::function_ref<llvm::Value *(CodeGenFunction &CGF, const OMPLoopDirective &D)> SizeEmitter) override; /// Emit the target regions enclosed in \a GD function definition or /// the function itself in case it is a valid device function. Returns true if /// \a GD was dealt with successfully. /// \param GD Function to scan. bool emitTargetFunctions(GlobalDecl GD) override; /// Emit the global variable if it is a valid device global variable. /// Returns true if \a GD was dealt with successfully. /// \param GD Variable declaration to emit. bool emitTargetGlobalVariable(GlobalDecl GD) override; /// Emit the global \a GD if it is meaningful for the target. Returns /// if it was emitted successfully. /// \param GD Global to scan. bool emitTargetGlobal(GlobalDecl GD) override; /// Emits code for teams call of the \a OutlinedFn with /// variables captured in a record which address is stored in \a /// CapturedStruct. /// \param OutlinedFn Outlined function to be run by team masters. Type of /// this function is void(*)(kmp_int32 *, kmp_int32, struct context_vars*). /// \param CapturedVars A pointer to the record with the references to /// variables used in \a OutlinedFn function. /// void emitTeamsCall(CodeGenFunction &CGF, const OMPExecutableDirective &D, SourceLocation Loc, llvm::Function *OutlinedFn, ArrayRef<llvm::Value *> CapturedVars) override; /// Emits call to void __kmpc_push_num_teams(ident_t *loc, kmp_int32 /// global_tid, kmp_int32 num_teams, kmp_int32 thread_limit) to generate code /// for num_teams clause. /// \param NumTeams An integer expression of teams. /// \param ThreadLimit An integer expression of threads. void emitNumTeamsClause(CodeGenFunction &CGF, const Expr *NumTeams, const Expr *ThreadLimit, SourceLocation Loc) override; /// Emit the target data mapping code associated with \a D. /// \param D Directive to emit. /// \param IfCond Expression evaluated in if clause associated with the /// target directive, or null if no device clause is used. /// \param Device Expression evaluated in device clause associated with the /// target directive, or null if no device clause is used. /// \param Info A record used to store information that needs to be preserved /// until the region is closed. void emitTargetDataCalls(CodeGenFunction &CGF, const OMPExecutableDirective &D, const Expr *IfCond, const Expr *Device, const RegionCodeGenTy &CodeGen, TargetDataInfo &Info) override; /// Emit the data mapping/movement code associated with the directive /// \a D that should be of the form 'target [{enter|exit} data | update]'. /// \param D Directive to emit. /// \param IfCond Expression evaluated in if clause associated with the target /// directive, or null if no if clause is used. /// \param Device Expression evaluated in device clause associated with the /// target directive, or null if no device clause is used. void emitTargetDataStandAloneCall(CodeGenFunction &CGF, const OMPExecutableDirective &D, const Expr *IfCond, const Expr *Device) override; /// Emit initialization for doacross loop nesting support. /// \param D Loop-based construct used in doacross nesting construct. void emitDoacrossInit(CodeGenFunction &CGF, const OMPLoopDirective &D, ArrayRef<Expr *> NumIterations) override; /// Emit code for doacross ordered directive with 'depend' clause. /// \param C 'depend' clause with 'sink|source' dependency kind. void emitDoacrossOrdered(CodeGenFunction &CGF, const OMPDependClause *C) override; /// Translates the native parameter of outlined function if this is required /// for target. /// \param FD Field decl from captured record for the parameter. /// \param NativeParam Parameter itself. const VarDecl *translateParameter(const FieldDecl *FD, const VarDecl *NativeParam) const override; /// Gets the address of the native argument basing on the address of the /// target-specific parameter. /// \param NativeParam Parameter itself. /// \param TargetParam Corresponding target-specific parameter. Address getParameterAddress(CodeGenFunction &CGF, const VarDecl *NativeParam, const VarDecl *TargetParam) const override; /// Gets the OpenMP-specific address of the local variable. Address getAddressOfLocalVariable(CodeGenFunction &CGF, const VarDecl *VD) override { return Address::invalid(); } }; } // namespace CodeGen } // namespace clang #endif
DRB070-simd1-orig-no.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* One dimension array computation with a vetorization directive */ #include "omprace.h" #include <omp.h> int a[100], b[100], c[100]; int main() { omprace_init(); int i; #pragma omp simd for (i=0;i<100;i++) a[i]=b[i]*c[i]; omprace_fini(); return 0; }
counters_ww_par.c
#include <stdlib.h> #include <stdio.h> #include <unistd.h> #include <errno.h> #include <sys/types.h> #include <memory.h> #include <malloc.h> #include <papi.h> #define SIZE 1000 #define COUNTERS 2 int main(int argc, char **argv) { float matrixa[SIZE][SIZE], matrixb[SIZE][SIZE], mresult[SIZE][SIZE]; int i,j,k; int events[COUNTERS] = {PAPI_L1_DCM, PAPI_L2_DCM}; //int events[COUNTERS] = {PAPI_L3_TCM, PAPI_FP_OPS}; int ret; long long values[COUNTERS]; //does the system have enough counters in hardware? if (PAPI_num_counters() < COUNTERS) { fprintf(stderr, "No hardware counters here, or PAPI not supported.\n"); exit(1); } //define what each counter should listen to and start them if ((ret = PAPI_start_counters(events, COUNTERS)) != PAPI_OK) { fprintf(stderr, "PAPI failed to start counters: %s\n", PAPI_strerror(ret)); exit(1); } /* Initialize the Matrix arrays */ for ( i=0; i<SIZE*SIZE; i++ ){ mresult[0][i] = 0.0; matrixa[0][i] = matrixb[0][i] = rand()*(float)1.1; } /* Matrix-Matrix multiply */ #pragma omp parallel for private (i,j,k) shared (mresult, matrixa, matrixb) schedule (static) for (i=0;i<SIZE;i++) for(j=0;j<SIZE;j++) for(k=0;k<SIZE;k++) mresult[i][j]=mresult[i][j] + matrixa[i][k]*matrixb[k][j]; //read the values from the counters if ((ret = PAPI_read_counters(values, COUNTERS)) != PAPI_OK) { fprintf(stderr, "PAPI failed to read counters: %s\n", PAPI_strerror(ret)); exit(1); } //printf("Total hardware flops = %lld\n",values[1]); printf("Total L1 cache misses = %lld\n",values[0]); printf("Total L2 cache misses = %lld\n",values[1]); //printf("Total L3 cache misses = %lld\n",values[0]); exit(0); }
hyantes.c
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <omp.h> #ifdef USE_FLOAT typedef float data_t; #define M_PI 3.141592654f #define INPUT_FORMAT "%f%*[ \t]%f%*[ \t]%f" #define OUTPUT_FORMAT "%f %f %f\n" #else typedef double data_t; #define M_PI 3.141592654 #define INPUT_FORMAT "%lf%*[ \t]%lf%*[ \t]%lf" #define OUTPUT_FORMAT "%lf %lf %lf\n" #endif typedef data_t town[3]; typedef struct { size_t nb; town *data; } towns; towns read_towns(const char fname[]) { FILE * fd = fopen(fname,"r"); size_t curr=0; char c; towns the_towns = { 1 , malloc(sizeof(town)) }; while(!feof(fd)) { if(the_towns.nb==curr) { the_towns.nb*=2; the_towns.data=realloc(the_towns.data,the_towns.nb*sizeof(town)); } if(fscanf(fd,INPUT_FORMAT,&the_towns.data[curr][0],&the_towns.data[curr][1],&the_towns.data[curr][2]) !=3 ) { while(!feof(fd)) { c=(char)fgetc(fd); if(c=='\n' || c=='\r') break; } } else { the_towns.data[curr][0]*=M_PI/180; the_towns.data[curr][1]*=M_PI/180; ++curr; } } fclose(fd); the_towns.data=realloc(the_towns.data,curr*sizeof(town)); the_towns.nb=curr; /* for(curr=0;curr<the_towns.nb;curr++) fprintf(stderr,OUTPUT_FORMAT,the_towns.data[curr][0],the_towns.data[curr][1],the_towns.data[curr][2]); */ return the_towns; } towns run(towns t,data_t xmin, data_t ymin, data_t xmax, data_t ymax, data_t step,data_t range) { size_t i,j; size_t k; size_t rangex=( (xmax - xmin )/step ), rangey=( (ymax - ymin )/step ); int nb = 1+ rangex*rangey; towns tout = { nb , malloc(sizeof(town)*nb) };; #pragma omp parallel for private(i,j,k) for(i=0;i<rangex;i++) for(j=0;j<rangey;j++) { tout.data[i*rangey+j][0]=(xmin+step*i)*180/M_PI; tout.data[i*rangey+j][1]=(ymin+step*j)*180/M_PI; tout.data[i*rangey+j][2]=0.; for(k=0;k<t.nb;k++) { data_t tmp = 6368.* acos(cos(xmin+step*i)*cos( t.data[k][0] ) * cos((ymin+step*j)-t.data[k][1]) + sin(xmin+step*i)*sin(t.data[k][0])); if( tmp < range ) tout.data[i*rangey+j][2]+= t.data[k][2] / (1 + tmp) ; } } return tout; } void display(towns t,data_t xmin, data_t ymin, data_t xmax, data_t ymax, data_t step) { size_t rangex=( (xmax - xmin )/step ), rangey=( (ymax - ymin )/step ); size_t i,j; FILE *fd = fopen("out.dat","w"); for(i=0;i<rangex;i++) { for(j=0;j<rangey;j++) fprintf(fd, OUTPUT_FORMAT,t.data[i*rangey+j][0],t.data[i*rangey+j][1],t.data[i*rangey+j][2]); fprintf(fd, "\n"); } } int main(int argc, char * argv[]) { double start = omp_get_wtime(); if(argc != 8) return 1; { towns t = read_towns(argv[1]); data_t xmin = atof(argv[2])*M_PI/180., ymin =atof(argv[3])*M_PI/180., xmax=atof(argv[4])*M_PI/180., ymax=atof(argv[5])*M_PI/180., step=atof(argv[6])*M_PI/180., range=atof(argv[7]); towns out = run(t,xmin,ymin,xmax,ymax,step,range); display(out,xmin,ymin,xmax,ymax,step); } double stop = omp_get_wtime(); printf("%lf\n", stop -start); return 0; }
par_csr_matrix.c
/*BHEADER********************************************************************** * Copyright (c) 2008, Lawrence Livermore National Security, LLC. * Produced at the Lawrence Livermore National Laboratory. * This file is part of HYPRE. See file COPYRIGHT for details. * * HYPRE is free software; you can redistribute it and/or modify it under the * terms of the GNU Lesser General Public License (as published by the Free * Software Foundation) version 2.1 dated February 1999. * * $Revision$ ***********************************************************************EHEADER*/ /****************************************************************************** * * Member functions for hypre_ParCSRMatrix class. * *****************************************************************************/ #include "_hypre_parcsr_mv.h" #include "../seq_mv/HYPRE_seq_mv.h" #include "../seq_mv/csr_matrix.h" /* In addition to publically accessible interface in HYPRE_mv.h, the implementation in this file uses accessor macros into the sequential matrix structure, and so includes the .h that defines that structure. Should those accessor functions become proper functions at some later date, this will not be necessary. AJC 4/99 */ #ifdef HYPRE_NO_GLOBAL_PARTITION HYPRE_Int hypre_FillResponseParToCSRMatrix(void*, HYPRE_Int, HYPRE_Int, void*, MPI_Comm, void**, HYPRE_Int*); #endif /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixCreate *--------------------------------------------------------------------------*/ /* If create is called for HYPRE_NO_GLOBAL_PARTITION and row_starts and col_starts are NOT null, then it is assumed that they are array of length 2 containing the start row of the calling processor followed by the start row of the next processor - AHB 6/05 */ hypre_ParCSRMatrix * hypre_ParCSRMatrixCreate( MPI_Comm comm, HYPRE_Int global_num_rows, HYPRE_Int global_num_cols, HYPRE_Int *row_starts, HYPRE_Int *col_starts, HYPRE_Int num_cols_offd, HYPRE_Int num_nonzeros_diag, HYPRE_Int num_nonzeros_offd ) { hypre_ParCSRMatrix *matrix; HYPRE_Int num_procs, my_id; HYPRE_Int local_num_rows, local_num_cols; HYPRE_Int first_row_index, first_col_diag; matrix = hypre_CTAlloc(hypre_ParCSRMatrix, 1); hypre_MPI_Comm_rank(comm,&my_id); hypre_MPI_Comm_size(comm,&num_procs); if (!row_starts) { #ifdef HYPRE_NO_GLOBAL_PARTITION hypre_GenerateLocalPartitioning(global_num_rows, num_procs, my_id, &row_starts); #else hypre_GeneratePartitioning(global_num_rows, num_procs, &row_starts); #endif } if (!col_starts) { if (global_num_rows == global_num_cols) { col_starts = row_starts; } else { #ifdef HYPRE_NO_GLOBAL_PARTITION hypre_GenerateLocalPartitioning(global_num_cols, num_procs, my_id, &col_starts); #else hypre_GeneratePartitioning(global_num_cols, num_procs, &col_starts); #endif } } #ifdef HYPRE_NO_GLOBAL_PARTITION /* row_starts[0] is start of local rows. row_starts[1] is start of next processor's rows */ first_row_index = row_starts[0]; local_num_rows = row_starts[1]-first_row_index ; first_col_diag = col_starts[0]; local_num_cols = col_starts[1]-first_col_diag; #else first_row_index = row_starts[my_id]; local_num_rows = row_starts[my_id+1]-first_row_index; first_col_diag = col_starts[my_id]; local_num_cols = col_starts[my_id+1]-first_col_diag; #endif hypre_ParCSRMatrixComm(matrix) = comm; hypre_ParCSRMatrixDiag(matrix) = hypre_CSRMatrixCreate(local_num_rows, local_num_cols,num_nonzeros_diag); hypre_ParCSRMatrixOffd(matrix) = hypre_CSRMatrixCreate(local_num_rows, num_cols_offd,num_nonzeros_offd); hypre_ParCSRMatrixDiagT(matrix) = NULL; hypre_ParCSRMatrixOffdT(matrix) = NULL; // JSP: transposed matrices are optional hypre_ParCSRMatrixGlobalNumRows(matrix) = global_num_rows; hypre_ParCSRMatrixGlobalNumCols(matrix) = global_num_cols; hypre_ParCSRMatrixFirstRowIndex(matrix) = first_row_index; hypre_ParCSRMatrixFirstColDiag(matrix) = first_col_diag; hypre_ParCSRMatrixLastRowIndex(matrix) = first_row_index + local_num_rows - 1; hypre_ParCSRMatrixLastColDiag(matrix) = first_col_diag + local_num_cols - 1; hypre_ParCSRMatrixColMapOffd(matrix) = NULL; hypre_ParCSRMatrixAssumedPartition(matrix) = NULL; /* When NO_GLOBAL_PARTITION is set we could make these null, instead of leaving the range. If that change is made, then when this create is called from functions like the matrix-matrix multiply, be careful not to generate a new partition */ hypre_ParCSRMatrixRowStarts(matrix) = row_starts; hypre_ParCSRMatrixColStarts(matrix) = col_starts; hypre_ParCSRMatrixCommPkg(matrix) = NULL; hypre_ParCSRMatrixCommPkgT(matrix) = NULL; /* set defaults */ hypre_ParCSRMatrixOwnsData(matrix) = 1; hypre_ParCSRMatrixOwnsRowStarts(matrix) = 1; hypre_ParCSRMatrixOwnsColStarts(matrix) = 1; if (row_starts == col_starts) hypre_ParCSRMatrixOwnsColStarts(matrix) = 0; hypre_ParCSRMatrixRowindices(matrix) = NULL; hypre_ParCSRMatrixRowvalues(matrix) = NULL; hypre_ParCSRMatrixGetrowactive(matrix) = 0; return matrix; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixDestroy *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixDestroy( hypre_ParCSRMatrix *matrix ) { if (matrix) { if ( hypre_ParCSRMatrixOwnsData(matrix) ) { hypre_CSRMatrixDestroy(hypre_ParCSRMatrixDiag(matrix)); hypre_CSRMatrixDestroy(hypre_ParCSRMatrixOffd(matrix)); if ( hypre_ParCSRMatrixDiagT(matrix) ) { hypre_CSRMatrixDestroy(hypre_ParCSRMatrixDiagT(matrix)); } if ( hypre_ParCSRMatrixOffdT(matrix) ) { hypre_CSRMatrixDestroy(hypre_ParCSRMatrixOffdT(matrix)); } if (hypre_ParCSRMatrixColMapOffd(matrix)) hypre_TFree(hypre_ParCSRMatrixColMapOffd(matrix)); if (hypre_ParCSRMatrixCommPkg(matrix)) hypre_MatvecCommPkgDestroy(hypre_ParCSRMatrixCommPkg(matrix)); if (hypre_ParCSRMatrixCommPkgT(matrix)) hypre_MatvecCommPkgDestroy(hypre_ParCSRMatrixCommPkgT(matrix)); } if ( hypre_ParCSRMatrixOwnsRowStarts(matrix) ) hypre_TFree(hypre_ParCSRMatrixRowStarts(matrix)); if ( hypre_ParCSRMatrixOwnsColStarts(matrix) ) hypre_TFree(hypre_ParCSRMatrixColStarts(matrix)); hypre_TFree(hypre_ParCSRMatrixRowindices(matrix)); hypre_TFree(hypre_ParCSRMatrixRowvalues(matrix)); if (hypre_ParCSRMatrixAssumedPartition(matrix)) hypre_AssumedPartitionDestroy(hypre_ParCSRMatrixAssumedPartition(matrix)); hypre_TFree(matrix); } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixInitialize *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixInitialize( hypre_ParCSRMatrix *matrix ) { if (!matrix) { hypre_error_in_arg(1); return hypre_error_flag; } hypre_CSRMatrixInitialize(hypre_ParCSRMatrixDiag(matrix)); hypre_CSRMatrixInitialize(hypre_ParCSRMatrixOffd(matrix)); hypre_ParCSRMatrixColMapOffd(matrix) = hypre_CTAlloc(HYPRE_Int,hypre_CSRMatrixNumCols( hypre_ParCSRMatrixOffd(matrix))); return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixSetNumNonzeros *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixSetNumNonzeros( hypre_ParCSRMatrix *matrix ) { MPI_Comm comm; hypre_CSRMatrix *diag; HYPRE_Int *diag_i; hypre_CSRMatrix *offd; HYPRE_Int *offd_i; HYPRE_Int local_num_rows; HYPRE_Int total_num_nonzeros; HYPRE_Int local_num_nonzeros; if (!matrix) { hypre_error_in_arg(1); return hypre_error_flag; } comm = hypre_ParCSRMatrixComm(matrix); diag = hypre_ParCSRMatrixDiag(matrix); diag_i = hypre_CSRMatrixI(diag); offd = hypre_ParCSRMatrixOffd(matrix); offd_i = hypre_CSRMatrixI(offd); local_num_rows = hypre_CSRMatrixNumRows(diag); local_num_nonzeros = diag_i[local_num_rows] + offd_i[local_num_rows]; hypre_MPI_Allreduce(&local_num_nonzeros, &total_num_nonzeros, 1, HYPRE_MPI_INT, hypre_MPI_SUM, comm); hypre_ParCSRMatrixNumNonzeros(matrix) = total_num_nonzeros; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixSetDNumNonzeros *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixSetDNumNonzeros( hypre_ParCSRMatrix *matrix ) { MPI_Comm comm; hypre_CSRMatrix *diag; HYPRE_Int *diag_i; hypre_CSRMatrix *offd; HYPRE_Int *offd_i; HYPRE_Int local_num_rows; HYPRE_Real total_num_nonzeros; HYPRE_Real local_num_nonzeros; if (!matrix) { hypre_error_in_arg(1); return hypre_error_flag; } comm = hypre_ParCSRMatrixComm(matrix); diag = hypre_ParCSRMatrixDiag(matrix); diag_i = hypre_CSRMatrixI(diag); offd = hypre_ParCSRMatrixOffd(matrix); offd_i = hypre_CSRMatrixI(offd); local_num_rows = hypre_CSRMatrixNumRows(diag); local_num_nonzeros = (HYPRE_Real) diag_i[local_num_rows] + (HYPRE_Real) offd_i[local_num_rows]; hypre_MPI_Allreduce(&local_num_nonzeros, &total_num_nonzeros, 1, HYPRE_MPI_REAL, hypre_MPI_SUM, comm); hypre_ParCSRMatrixDNumNonzeros(matrix) = total_num_nonzeros; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixSetDataOwner *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixSetDataOwner( hypre_ParCSRMatrix *matrix, HYPRE_Int owns_data ) { if (!matrix) { hypre_error_in_arg(1); return hypre_error_flag; } hypre_ParCSRMatrixOwnsData(matrix) = owns_data; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixSetRowStartsOwner *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixSetRowStartsOwner( hypre_ParCSRMatrix *matrix, HYPRE_Int owns_row_starts ) { if (!matrix) { hypre_error_in_arg(1); return hypre_error_flag; } hypre_ParCSRMatrixOwnsRowStarts(matrix) = owns_row_starts; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixSetColStartsOwner *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixSetColStartsOwner( hypre_ParCSRMatrix *matrix, HYPRE_Int owns_col_starts ) { if (!matrix) { hypre_error_in_arg(1); return hypre_error_flag; } hypre_ParCSRMatrixOwnsColStarts(matrix) = owns_col_starts; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixRead *--------------------------------------------------------------------------*/ hypre_ParCSRMatrix * hypre_ParCSRMatrixRead( MPI_Comm comm, const char *file_name ) { hypre_ParCSRMatrix *matrix; hypre_CSRMatrix *diag; hypre_CSRMatrix *offd; HYPRE_Int my_id, i, num_procs; char new_file_d[80], new_file_o[80], new_file_info[80]; HYPRE_Int global_num_rows, global_num_cols, num_cols_offd; HYPRE_Int local_num_rows; HYPRE_Int *row_starts; HYPRE_Int *col_starts; HYPRE_Int *col_map_offd; FILE *fp; HYPRE_Int equal = 1; #ifdef HYPRE_NO_GLOBAL_PARTITION HYPRE_Int row_s, row_e, col_s, col_e; #endif hypre_MPI_Comm_rank(comm,&my_id); hypre_MPI_Comm_size(comm,&num_procs); #ifdef HYPRE_NO_GLOBAL_PARTITION row_starts = hypre_CTAlloc(HYPRE_Int, 2); col_starts = hypre_CTAlloc(HYPRE_Int, 2); #else row_starts = hypre_CTAlloc(HYPRE_Int, num_procs+1); col_starts = hypre_CTAlloc(HYPRE_Int, num_procs+1); #endif hypre_sprintf(new_file_d,"%s.D.%d",file_name,my_id); hypre_sprintf(new_file_o,"%s.O.%d",file_name,my_id); hypre_sprintf(new_file_info,"%s.INFO.%d",file_name,my_id); fp = fopen(new_file_info, "r"); hypre_fscanf(fp, "%d", &global_num_rows); hypre_fscanf(fp, "%d", &global_num_cols); hypre_fscanf(fp, "%d", &num_cols_offd); #ifdef HYPRE_NO_GLOBAL_PARTITION /* the bgl input file should only contain the EXACT range for local processor */ hypre_fscanf(fp, "%d %d %d %d", &row_s, &row_e, &col_s, &col_e); row_starts[0] = row_s; row_starts[1] = row_e; col_starts[0] = col_s; col_starts[1] = col_e; #else for (i=0; i < num_procs; i++) hypre_fscanf(fp, "%d %d", &row_starts[i], &col_starts[i]); row_starts[num_procs] = global_num_rows; col_starts[num_procs] = global_num_cols; #endif col_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd); for (i=0; i < num_cols_offd; i++) hypre_fscanf(fp, "%d", &col_map_offd[i]); fclose(fp); #ifdef HYPRE_NO_GLOBAL_PARTITION for (i=1; i >= 0; i--) { if (row_starts[i] != col_starts[i]) { equal = 0; break; } } #else for (i=num_procs; i >= 0; i--) { if (row_starts[i] != col_starts[i]) { equal = 0; break; } } #endif if (equal) { hypre_TFree(col_starts); col_starts = row_starts; } diag = hypre_CSRMatrixRead(new_file_d); local_num_rows = hypre_CSRMatrixNumRows(diag); if (num_cols_offd) { offd = hypre_CSRMatrixRead(new_file_o); } else { offd = hypre_CSRMatrixCreate(local_num_rows,0,0); hypre_CSRMatrixInitialize(offd); } matrix = hypre_CTAlloc(hypre_ParCSRMatrix, 1); hypre_ParCSRMatrixComm(matrix) = comm; hypre_ParCSRMatrixGlobalNumRows(matrix) = global_num_rows; hypre_ParCSRMatrixGlobalNumCols(matrix) = global_num_cols; #ifdef HYPRE_NO_GLOBAL_PARTITION hypre_ParCSRMatrixFirstRowIndex(matrix) = row_s; hypre_ParCSRMatrixFirstColDiag(matrix) = col_s; hypre_ParCSRMatrixLastRowIndex(matrix) = row_e - 1; hypre_ParCSRMatrixLastColDiag(matrix) = col_e - 1; #else hypre_ParCSRMatrixFirstRowIndex(matrix) = row_starts[my_id]; hypre_ParCSRMatrixFirstColDiag(matrix) = col_starts[my_id]; hypre_ParCSRMatrixLastRowIndex(matrix) = row_starts[my_id+1]-1; hypre_ParCSRMatrixLastColDiag(matrix) = col_starts[my_id+1]-1; #endif hypre_ParCSRMatrixRowStarts(matrix) = row_starts; hypre_ParCSRMatrixColStarts(matrix) = col_starts; hypre_ParCSRMatrixCommPkg(matrix) = NULL; /* set defaults */ hypre_ParCSRMatrixOwnsData(matrix) = 1; hypre_ParCSRMatrixOwnsRowStarts(matrix) = 1; hypre_ParCSRMatrixOwnsColStarts(matrix) = 1; if (row_starts == col_starts) hypre_ParCSRMatrixOwnsColStarts(matrix) = 0; hypre_ParCSRMatrixDiag(matrix) = diag; hypre_ParCSRMatrixOffd(matrix) = offd; if (num_cols_offd) hypre_ParCSRMatrixColMapOffd(matrix) = col_map_offd; else hypre_ParCSRMatrixColMapOffd(matrix) = NULL; return matrix; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixPrint *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixPrint( hypre_ParCSRMatrix *matrix, const char *file_name ) { MPI_Comm comm; HYPRE_Int global_num_rows; HYPRE_Int global_num_cols; HYPRE_Int *col_map_offd; #ifndef HYPRE_NO_GLOBAL_PARTITION HYPRE_Int *row_starts; HYPRE_Int *col_starts; #endif HYPRE_Int my_id, i, num_procs; char new_file_d[80], new_file_o[80], new_file_info[80]; FILE *fp; HYPRE_Int num_cols_offd = 0; #ifdef HYPRE_NO_GLOBAL_PARTITION HYPRE_Int row_s, row_e, col_s, col_e; #endif if (!matrix) { hypre_error_in_arg(1); return hypre_error_flag; } comm = hypre_ParCSRMatrixComm(matrix); global_num_rows = hypre_ParCSRMatrixGlobalNumRows(matrix); global_num_cols = hypre_ParCSRMatrixGlobalNumCols(matrix); col_map_offd = hypre_ParCSRMatrixColMapOffd(matrix); #ifndef HYPRE_NO_GLOBAL_PARTITION row_starts = hypre_ParCSRMatrixRowStarts(matrix); col_starts = hypre_ParCSRMatrixColStarts(matrix); #endif if (hypre_ParCSRMatrixOffd(matrix)) num_cols_offd = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(matrix)); hypre_MPI_Comm_rank(comm, &my_id); hypre_MPI_Comm_size(comm, &num_procs); hypre_sprintf(new_file_d,"%s.D.%d",file_name,my_id); hypre_sprintf(new_file_o,"%s.O.%d",file_name,my_id); hypre_sprintf(new_file_info,"%s.INFO.%d",file_name,my_id); hypre_CSRMatrixPrint(hypre_ParCSRMatrixDiag(matrix),new_file_d); if (num_cols_offd != 0) hypre_CSRMatrixPrint(hypre_ParCSRMatrixOffd(matrix),new_file_o); fp = fopen(new_file_info, "w"); hypre_fprintf(fp, "%d\n", global_num_rows); hypre_fprintf(fp, "%d\n", global_num_cols); hypre_fprintf(fp, "%d\n", num_cols_offd); #ifdef HYPRE_NO_GLOBAL_PARTITION row_s = hypre_ParCSRMatrixFirstRowIndex(matrix); row_e = hypre_ParCSRMatrixLastRowIndex(matrix); col_s = hypre_ParCSRMatrixFirstColDiag(matrix); col_e = hypre_ParCSRMatrixLastColDiag(matrix); /* add 1 to the ends because this is a starts partition */ hypre_fprintf(fp, "%d %d %d %d\n", row_s, row_e + 1, col_s, col_e + 1); #else for (i=0; i < num_procs; i++) hypre_fprintf(fp, "%d %d\n", row_starts[i], col_starts[i]); #endif for (i=0; i < num_cols_offd; i++) hypre_fprintf(fp, "%d\n", col_map_offd[i]); fclose(fp); return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixPrintIJ *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixPrintIJ( const hypre_ParCSRMatrix *matrix, const HYPRE_Int base_i, const HYPRE_Int base_j, const char *filename ) { MPI_Comm comm; HYPRE_Int first_row_index; HYPRE_Int first_col_diag; hypre_CSRMatrix *diag; hypre_CSRMatrix *offd; HYPRE_Int *col_map_offd; HYPRE_Int num_rows; HYPRE_Int *row_starts; HYPRE_Int *col_starts; HYPRE_Complex *diag_data; HYPRE_Int *diag_i; HYPRE_Int *diag_j; HYPRE_Complex *offd_data; HYPRE_Int *offd_i; HYPRE_Int *offd_j; HYPRE_Int myid, num_procs, i, j, I, J; char new_filename[255]; FILE *file; HYPRE_Int num_nonzeros_offd; HYPRE_Int ilower, iupper, jlower, jupper; if (!matrix) { hypre_error_in_arg(1); return hypre_error_flag; } comm = hypre_ParCSRMatrixComm(matrix); first_row_index = hypre_ParCSRMatrixFirstRowIndex(matrix); first_col_diag = hypre_ParCSRMatrixFirstColDiag(matrix); diag = hypre_ParCSRMatrixDiag(matrix); offd = hypre_ParCSRMatrixOffd(matrix); col_map_offd = hypre_ParCSRMatrixColMapOffd(matrix); num_rows = hypre_ParCSRMatrixNumRows(matrix); row_starts = hypre_ParCSRMatrixRowStarts(matrix); col_starts = hypre_ParCSRMatrixColStarts(matrix); hypre_MPI_Comm_rank(comm, &myid); hypre_MPI_Comm_size(comm, &num_procs); hypre_sprintf(new_filename,"%s.%05d", filename, myid); if ((file = fopen(new_filename, "w")) == NULL) { hypre_error_w_msg(HYPRE_ERROR_GENERIC,"Error: can't open output file %s\n"); return hypre_error_flag; } num_nonzeros_offd = hypre_CSRMatrixNumNonzeros(offd); diag_data = hypre_CSRMatrixData(diag); diag_i = hypre_CSRMatrixI(diag); diag_j = hypre_CSRMatrixJ(diag); offd_i = hypre_CSRMatrixI(offd); if (num_nonzeros_offd) { offd_data = hypre_CSRMatrixData(offd); offd_j = hypre_CSRMatrixJ(offd); } #ifdef HYPRE_NO_GLOBAL_PARTITION ilower = row_starts[0]+base_i; iupper = row_starts[1]+base_i - 1; jlower = col_starts[0]+base_j; jupper = col_starts[1]+base_j - 1; #else ilower = row_starts[myid] +base_i; iupper = row_starts[myid+1]+base_i - 1; jlower = col_starts[myid] +base_j; jupper = col_starts[myid+1]+base_j - 1; #endif hypre_fprintf(file, "%d %d %d %d\n", ilower, iupper, jlower, jupper); for (i = 0; i < num_rows; i++) { I = first_row_index + i + base_i; /* print diag columns */ for (j = diag_i[i]; j < diag_i[i+1]; j++) { J = first_col_diag + diag_j[j] + base_j; if ( diag_data ) { #ifdef HYPRE_COMPLEX hypre_fprintf(file, "%d %d %.14e , %.14e\n", I, J, hypre_creal(diag_data[j]), hypre_cimag(diag_data[j])); #else hypre_fprintf(file, "%d %d %.14e\n", I, J, diag_data[j]); #endif } else hypre_fprintf(file, "%d %d\n", I, J); } /* print offd columns */ if ( num_nonzeros_offd ) { for (j = offd_i[i]; j < offd_i[i+1]; j++) { J = col_map_offd[offd_j[j]] + base_j; if ( offd_data ) { #ifdef HYPRE_COMPLEX hypre_fprintf(file, "%d %d %.14e , %.14e\n", I, J, hypre_creal(offd_data[j]), hypre_cimag(offd_data[j])); #else hypre_fprintf(file, "%d %d %.14e\n", I, J, offd_data[j]); #endif } else hypre_fprintf(file, "%d %d\n", I, J ); } } } fclose(file); return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixReadIJ *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixReadIJ( MPI_Comm comm, const char *filename, HYPRE_Int *base_i_ptr, HYPRE_Int *base_j_ptr, hypre_ParCSRMatrix **matrix_ptr) { HYPRE_Int global_num_rows; HYPRE_Int global_num_cols; HYPRE_Int first_row_index; HYPRE_Int first_col_diag; HYPRE_Int last_col_diag; hypre_ParCSRMatrix *matrix; hypre_CSRMatrix *diag; hypre_CSRMatrix *offd; HYPRE_Int *col_map_offd; HYPRE_Int *row_starts; HYPRE_Int *col_starts; HYPRE_Int num_rows; HYPRE_Int base_i, base_j; HYPRE_Complex *diag_data; HYPRE_Int *diag_i; HYPRE_Int *diag_j; HYPRE_Complex *offd_data; HYPRE_Int *offd_i; HYPRE_Int *offd_j; HYPRE_Int *aux_offd_j; HYPRE_Int myid, num_procs, i, j, I, J; char new_filename[255]; FILE *file; HYPRE_Int num_cols_offd, num_nonzeros_diag, num_nonzeros_offd; HYPRE_Int equal, i_col, num_cols; HYPRE_Int diag_cnt, offd_cnt, row_cnt; HYPRE_Complex data; hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &myid); hypre_sprintf(new_filename,"%s.%05d", filename, myid); if ((file = fopen(new_filename, "r")) == NULL) { hypre_error_w_msg(HYPRE_ERROR_GENERIC,"Error: can't open output file %s\n"); return hypre_error_flag; } hypre_fscanf(file, "%d %d", &global_num_rows, &global_num_cols); hypre_fscanf(file, "%d %d %d", &num_rows, &num_cols, &num_cols_offd); hypre_fscanf(file, "%d %d", &num_nonzeros_diag, &num_nonzeros_offd); row_starts = hypre_CTAlloc(HYPRE_Int,num_procs+1); col_starts = hypre_CTAlloc(HYPRE_Int,num_procs+1); for (i = 0; i <= num_procs; i++) hypre_fscanf(file, "%d %d", &row_starts[i], &col_starts[i]); base_i = row_starts[0]; base_j = col_starts[0]; equal = 1; for (i = 0; i <= num_procs; i++) { row_starts[i] -= base_i; col_starts[i] -= base_j; if (row_starts[i] != col_starts[i]) equal = 0; } if (equal) { hypre_TFree(col_starts); col_starts = row_starts; } matrix = hypre_ParCSRMatrixCreate(comm, global_num_rows, global_num_cols, row_starts, col_starts, num_cols_offd, num_nonzeros_diag, num_nonzeros_offd); hypre_ParCSRMatrixInitialize(matrix); diag = hypre_ParCSRMatrixDiag(matrix); offd = hypre_ParCSRMatrixOffd(matrix); diag_data = hypre_CSRMatrixData(diag); diag_i = hypre_CSRMatrixI(diag); diag_j = hypre_CSRMatrixJ(diag); offd_i = hypre_CSRMatrixI(offd); if (num_nonzeros_offd) { offd_data = hypre_CSRMatrixData(offd); offd_j = hypre_CSRMatrixJ(offd); } first_row_index = hypre_ParCSRMatrixFirstRowIndex(matrix); first_col_diag = hypre_ParCSRMatrixFirstColDiag(matrix); last_col_diag = first_col_diag+num_cols-1; diag_cnt = 0; offd_cnt = 0; row_cnt = 0; for (i = 0; i < num_nonzeros_diag+num_nonzeros_offd; i++) { /* read values */ hypre_fscanf(file, "%d %d %le", &I, &J, &data); I = I-base_i-first_row_index; J -= base_j; if (I > row_cnt) { diag_i[I] = diag_cnt; offd_i[I] = offd_cnt; row_cnt++; } if (J < first_col_diag || J > last_col_diag) { offd_j[offd_cnt] = J; offd_data[offd_cnt++] = data; } else { diag_j[diag_cnt] = J - first_col_diag; diag_data[diag_cnt++] = data; } } diag_i[num_rows] = diag_cnt; offd_i[num_rows] = offd_cnt; fclose(file); /* generate col_map_offd */ if (num_nonzeros_offd) { aux_offd_j = hypre_CTAlloc(HYPRE_Int, num_nonzeros_offd); for (i=0; i < num_nonzeros_offd; i++) aux_offd_j[i] = offd_j[i]; hypre_qsort0(aux_offd_j,0,num_nonzeros_offd-1); col_map_offd = hypre_ParCSRMatrixColMapOffd(matrix); col_map_offd[0] = aux_offd_j[0]; offd_cnt = 0; for (i=1; i < num_nonzeros_offd; i++) { if (aux_offd_j[i] > col_map_offd[offd_cnt]) col_map_offd[++offd_cnt] = aux_offd_j[i]; } for (i=0; i < num_nonzeros_offd; i++) { offd_j[i] = hypre_BinarySearch(col_map_offd, offd_j[i], num_cols_offd); } hypre_TFree(aux_offd_j); } /* move diagonal element in first position in each row */ for (i=0; i < num_rows; i++) { i_col = diag_i[i]; for (j=i_col; j < diag_i[i+1]; j++) { if (diag_j[j] == i) { diag_j[j] = diag_j[i_col]; data = diag_data[j]; diag_data[j] = diag_data[i_col]; diag_data[i_col] = data; diag_j[i_col] = i; break; } } } *base_i_ptr = base_i; *base_j_ptr = base_j; *matrix_ptr = matrix; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixGetLocalRange * returns the row numbers of the rows stored on this processor. * "End" is actually the row number of the last row on this processor. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixGetLocalRange( hypre_ParCSRMatrix *matrix, HYPRE_Int *row_start, HYPRE_Int *row_end, HYPRE_Int *col_start, HYPRE_Int *col_end ) { HYPRE_Int my_id; if (!matrix) { hypre_error_in_arg(1); return hypre_error_flag; } hypre_MPI_Comm_rank( hypre_ParCSRMatrixComm(matrix), &my_id ); #ifdef HYPRE_NO_GLOBAL_PARTITION *row_start = hypre_ParCSRMatrixFirstRowIndex(matrix); *row_end = hypre_ParCSRMatrixLastRowIndex(matrix); *col_start = hypre_ParCSRMatrixFirstColDiag(matrix); *col_end = hypre_ParCSRMatrixLastColDiag(matrix); #else *row_start = hypre_ParCSRMatrixRowStarts(matrix)[ my_id ]; *row_end = hypre_ParCSRMatrixRowStarts(matrix)[ my_id + 1 ]-1; *col_start = hypre_ParCSRMatrixColStarts(matrix)[ my_id ]; *col_end = hypre_ParCSRMatrixColStarts(matrix)[ my_id + 1 ]-1; #endif return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixGetRow * Returns global column indices and/or values for a given row in the global * matrix. Global row number is used, but the row must be stored locally or * an error is returned. This implementation copies from the two matrices that * store the local data, storing them in the hypre_ParCSRMatrix structure. * Only a single row can be accessed via this function at any one time; the * corresponding RestoreRow function must be called, to avoid bleeding memory, * and to be able to look at another row. * Either one of col_ind and values can be left null, and those values will * not be returned. * All indices are returned in 0-based indexing, no matter what is used under * the hood. EXCEPTION: currently this only works if the local CSR matrices * use 0-based indexing. * This code, semantics, implementation, etc., are all based on PETSc's hypre_MPI_AIJ * matrix code, adjusted for our data and software structures. * AJC 4/99. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixGetRow( hypre_ParCSRMatrix *mat, HYPRE_Int row, HYPRE_Int *size, HYPRE_Int **col_ind, HYPRE_Complex **values ) { HYPRE_Int my_id; HYPRE_Int row_start, row_end; hypre_CSRMatrix *Aa; hypre_CSRMatrix *Ba; if (!mat) { hypre_error_in_arg(1); return hypre_error_flag; } Aa = (hypre_CSRMatrix *) hypre_ParCSRMatrixDiag(mat); Ba = (hypre_CSRMatrix *) hypre_ParCSRMatrixOffd(mat); if (hypre_ParCSRMatrixGetrowactive(mat)) return(-1); hypre_MPI_Comm_rank( hypre_ParCSRMatrixComm(mat), &my_id ); hypre_ParCSRMatrixGetrowactive(mat) = 1; #ifdef HYPRE_NO_GLOBAL_PARTITION row_start = hypre_ParCSRMatrixFirstRowIndex(mat); row_end = hypre_ParCSRMatrixLastRowIndex(mat) + 1; #else row_end = hypre_ParCSRMatrixRowStarts(mat)[ my_id + 1 ]; row_start = hypre_ParCSRMatrixRowStarts(mat)[ my_id ]; #endif if (row < row_start || row >= row_end) return(-1); /* if buffer is not allocated and some information is requested, allocate buffer */ if (!hypre_ParCSRMatrixRowvalues(mat) && ( col_ind || values )) { /* allocate enough space to hold information from the longest row. */ HYPRE_Int max = 1,tmp; HYPRE_Int i; HYPRE_Int m = row_end-row_start; for ( i=0; i<m; i++ ) { tmp = hypre_CSRMatrixI(Aa)[i+1] - hypre_CSRMatrixI(Aa)[i] + hypre_CSRMatrixI(Ba)[i+1] - hypre_CSRMatrixI(Ba)[i]; if (max < tmp) { max = tmp; } } hypre_ParCSRMatrixRowvalues(mat) = (HYPRE_Complex *) hypre_CTAlloc ( HYPRE_Complex, max ); hypre_ParCSRMatrixRowindices(mat) = (HYPRE_Int *) hypre_CTAlloc ( HYPRE_Int, max ); } /* Copy from dual sequential matrices into buffer */ { HYPRE_Complex *vworkA, *vworkB, *v_p; HYPRE_Int i, *cworkA, *cworkB; HYPRE_Int cstart = hypre_ParCSRMatrixFirstColDiag(mat); HYPRE_Int nztot, nzA, nzB, lrow=row-row_start; HYPRE_Int *cmap, *idx_p; nzA = hypre_CSRMatrixI(Aa)[lrow+1]-hypre_CSRMatrixI(Aa)[lrow]; cworkA = &( hypre_CSRMatrixJ(Aa)[ hypre_CSRMatrixI(Aa)[lrow] ] ); vworkA = &( hypre_CSRMatrixData(Aa)[ hypre_CSRMatrixI(Aa)[lrow] ] ); nzB = hypre_CSRMatrixI(Ba)[lrow+1]-hypre_CSRMatrixI(Ba)[lrow]; cworkB = &( hypre_CSRMatrixJ(Ba)[ hypre_CSRMatrixI(Ba)[lrow] ] ); vworkB = &( hypre_CSRMatrixData(Ba)[ hypre_CSRMatrixI(Ba)[lrow] ] ); nztot = nzA + nzB; cmap = hypre_ParCSRMatrixColMapOffd(mat); if (values || col_ind) { if (nztot) { /* Sort by increasing column numbers, assuming A and B already sorted */ HYPRE_Int imark = -1; if (values) { *values = v_p = hypre_ParCSRMatrixRowvalues(mat); for ( i=0; i<nzB; i++ ) { if (cmap[cworkB[i]] < cstart) v_p[i] = vworkB[i]; else break; } imark = i; for ( i=0; i<nzA; i++ ) v_p[imark+i] = vworkA[i]; for ( i=imark; i<nzB; i++ ) v_p[nzA+i] = vworkB[i]; } if (col_ind) { *col_ind = idx_p = hypre_ParCSRMatrixRowindices(mat); if (imark > -1) { for ( i=0; i<imark; i++ ) { idx_p[i] = cmap[cworkB[i]]; } } else { for ( i=0; i<nzB; i++ ) { if (cmap[cworkB[i]] < cstart) idx_p[i] = cmap[cworkB[i]]; else break; } imark = i; } for ( i=0; i<nzA; i++ ) idx_p[imark+i] = cstart + cworkA[i]; for ( i=imark; i<nzB; i++ ) idx_p[nzA+i] = cmap[cworkB[i]]; } } else { if (col_ind) *col_ind = 0; if (values) *values = 0; } } *size = nztot; } /* End of copy */ return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixRestoreRow *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixRestoreRow( hypre_ParCSRMatrix *matrix, HYPRE_Int row, HYPRE_Int *size, HYPRE_Int **col_ind, HYPRE_Complex **values ) { if (!hypre_ParCSRMatrixGetrowactive(matrix)) { hypre_error(HYPRE_ERROR_GENERIC); return hypre_error_flag; } hypre_ParCSRMatrixGetrowactive(matrix)=0; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_CSRMatrixToParCSRMatrix: * generates a ParCSRMatrix distributed across the processors in comm * from a CSRMatrix on proc 0 . * * This shouldn't be used with the HYPRE_NO_GLOBAL_PARTITON option * *--------------------------------------------------------------------------*/ hypre_ParCSRMatrix * hypre_CSRMatrixToParCSRMatrix( MPI_Comm comm, hypre_CSRMatrix *A, HYPRE_Int *row_starts, HYPRE_Int *col_starts ) { HYPRE_Int *global_data; HYPRE_Int global_size; HYPRE_Int global_num_rows; HYPRE_Int global_num_cols; HYPRE_Int *local_num_rows; HYPRE_Int num_procs, my_id; HYPRE_Int *local_num_nonzeros=NULL; HYPRE_Int num_nonzeros; HYPRE_Complex *a_data; HYPRE_Int *a_i; HYPRE_Int *a_j; hypre_CSRMatrix *local_A; hypre_MPI_Request *requests; hypre_MPI_Status *status, status0; hypre_MPI_Datatype *csr_matrix_datatypes; hypre_ParCSRMatrix *par_matrix; HYPRE_Int first_col_diag; HYPRE_Int last_col_diag; HYPRE_Int i, j, ind; hypre_MPI_Comm_rank(comm, &my_id); hypre_MPI_Comm_size(comm, &num_procs); global_data = hypre_CTAlloc(HYPRE_Int, 2*num_procs+6); if (my_id == 0) { global_size = 3; if (row_starts) { if (col_starts) { if (col_starts != row_starts) { /* contains code for what to expect, if 0: row_starts = col_starts, only row_starts given if 1: only row_starts given, col_starts = NULL if 2: both row_starts and col_starts given if 3: only col_starts given, row_starts = NULL */ global_data[3] = 2; global_size = 2*num_procs+6; for (i=0; i < num_procs+1; i++) global_data[i+4] = row_starts[i]; for (i=0; i < num_procs+1; i++) global_data[i+num_procs+5] = col_starts[i]; } else { global_data[3] = 0; global_size = num_procs+5; for (i=0; i < num_procs+1; i++) global_data[i+4] = row_starts[i]; } } else { global_data[3] = 1; global_size = num_procs+5; for (i=0; i < num_procs+1; i++) global_data[i+4] = row_starts[i]; } } else { if (col_starts) { global_data[3] = 3; global_size = num_procs+5; for (i=0; i < num_procs+1; i++) global_data[i+4] = col_starts[i]; } } global_data[0] = hypre_CSRMatrixNumRows(A); global_data[1] = hypre_CSRMatrixNumCols(A); global_data[2] = global_size; a_data = hypre_CSRMatrixData(A); a_i = hypre_CSRMatrixI(A); a_j = hypre_CSRMatrixJ(A); } hypre_MPI_Bcast(global_data,3,HYPRE_MPI_INT,0,comm); global_num_rows = global_data[0]; global_num_cols = global_data[1]; global_size = global_data[2]; if (global_size > 3) { hypre_MPI_Bcast(&global_data[3],global_size-3,HYPRE_MPI_INT,0,comm); if (my_id > 0) { if (global_data[3] < 3) { row_starts = hypre_CTAlloc(HYPRE_Int, num_procs+1); for (i=0; i< num_procs+1; i++) { row_starts[i] = global_data[i+4]; } if (global_data[3] == 0) col_starts = row_starts; if (global_data[3] == 2) { col_starts = hypre_CTAlloc(HYPRE_Int, num_procs+1); for (i=0; i < num_procs+1; i++) { col_starts[i] = global_data[i+num_procs+5]; } } } else { col_starts = hypre_CTAlloc(HYPRE_Int, num_procs+1); for (i=0; i< num_procs+1; i++) { col_starts[i] = global_data[i+4]; } } } } hypre_TFree(global_data); local_num_rows = hypre_CTAlloc(HYPRE_Int, num_procs); csr_matrix_datatypes = hypre_CTAlloc(hypre_MPI_Datatype, num_procs); par_matrix = hypre_ParCSRMatrixCreate( comm, global_num_rows, global_num_cols,row_starts,col_starts,0,0,0); row_starts = hypre_ParCSRMatrixRowStarts(par_matrix); col_starts = hypre_ParCSRMatrixColStarts(par_matrix); for (i=0; i < num_procs; i++) local_num_rows[i] = row_starts[i+1] - row_starts[i]; if (my_id == 0) { local_num_nonzeros = hypre_CTAlloc(HYPRE_Int, num_procs); for (i=0; i < num_procs-1; i++) local_num_nonzeros[i] = a_i[row_starts[i+1]] - a_i[row_starts[i]]; local_num_nonzeros[num_procs-1] = a_i[global_num_rows] - a_i[row_starts[num_procs-1]]; } hypre_MPI_Scatter(local_num_nonzeros,1,HYPRE_MPI_INT,&num_nonzeros,1, HYPRE_MPI_INT,0,comm); if (my_id == 0) num_nonzeros = local_num_nonzeros[0]; local_A = hypre_CSRMatrixCreate(local_num_rows[my_id], global_num_cols, num_nonzeros); if (my_id == 0) { requests = hypre_CTAlloc (hypre_MPI_Request, num_procs-1); status = hypre_CTAlloc(hypre_MPI_Status, num_procs-1); j=0; for (i=1; i < num_procs; i++) { ind = a_i[row_starts[i]]; hypre_BuildCSRMatrixMPIDataType(local_num_nonzeros[i], local_num_rows[i], &a_data[ind], &a_i[row_starts[i]], &a_j[ind], &csr_matrix_datatypes[i]); hypre_MPI_Isend(hypre_MPI_BOTTOM, 1, csr_matrix_datatypes[i], i, 0, comm, &requests[j++]); hypre_MPI_Type_free(&csr_matrix_datatypes[i]); } hypre_CSRMatrixData(local_A) = a_data; hypre_CSRMatrixI(local_A) = a_i; hypre_CSRMatrixJ(local_A) = a_j; hypre_CSRMatrixOwnsData(local_A) = 0; hypre_MPI_Waitall(num_procs-1,requests,status); hypre_TFree(requests); hypre_TFree(status); hypre_TFree(local_num_nonzeros); } else { hypre_CSRMatrixInitialize(local_A); hypre_BuildCSRMatrixMPIDataType(num_nonzeros, local_num_rows[my_id], hypre_CSRMatrixData(local_A), hypre_CSRMatrixI(local_A), hypre_CSRMatrixJ(local_A), csr_matrix_datatypes); hypre_MPI_Recv(hypre_MPI_BOTTOM,1,csr_matrix_datatypes[0],0,0,comm,&status0); hypre_MPI_Type_free(csr_matrix_datatypes); } first_col_diag = col_starts[my_id]; last_col_diag = col_starts[my_id+1]-1; GenerateDiagAndOffd(local_A, par_matrix, first_col_diag, last_col_diag); /* set pointers back to NULL before destroying */ if (my_id == 0) { hypre_CSRMatrixData(local_A) = NULL; hypre_CSRMatrixI(local_A) = NULL; hypre_CSRMatrixJ(local_A) = NULL; } hypre_CSRMatrixDestroy(local_A); hypre_TFree(local_num_rows); hypre_TFree(csr_matrix_datatypes); return par_matrix; } HYPRE_Int GenerateDiagAndOffd(hypre_CSRMatrix *A, hypre_ParCSRMatrix *matrix, HYPRE_Int first_col_diag, HYPRE_Int last_col_diag) { HYPRE_Int i, j; HYPRE_Int jo, jd; HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A); HYPRE_Int num_cols = hypre_CSRMatrixNumCols(A); HYPRE_Complex *a_data = hypre_CSRMatrixData(A); HYPRE_Int *a_i = hypre_CSRMatrixI(A); HYPRE_Int *a_j = hypre_CSRMatrixJ(A); hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(matrix); hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(matrix); HYPRE_Int *col_map_offd; HYPRE_Complex *diag_data, *offd_data; HYPRE_Int *diag_i, *offd_i; HYPRE_Int *diag_j, *offd_j; HYPRE_Int *marker; HYPRE_Int num_cols_diag, num_cols_offd; HYPRE_Int first_elmt = a_i[0]; HYPRE_Int num_nonzeros = a_i[num_rows]-first_elmt; HYPRE_Int counter; num_cols_diag = last_col_diag - first_col_diag +1; num_cols_offd = 0; if (num_cols - num_cols_diag) { hypre_CSRMatrixInitialize(diag); diag_i = hypre_CSRMatrixI(diag); hypre_CSRMatrixInitialize(offd); offd_i = hypre_CSRMatrixI(offd); marker = hypre_CTAlloc(HYPRE_Int,num_cols); for (i=0; i < num_cols; i++) marker[i] = 0; jo = 0; jd = 0; for (i=0; i < num_rows; i++) { offd_i[i] = jo; diag_i[i] = jd; for (j=a_i[i]-first_elmt; j < a_i[i+1]-first_elmt; j++) if (a_j[j] < first_col_diag || a_j[j] > last_col_diag) { if (!marker[a_j[j]]) { marker[a_j[j]] = 1; num_cols_offd++; } jo++; } else { jd++; } } offd_i[num_rows] = jo; diag_i[num_rows] = jd; hypre_ParCSRMatrixColMapOffd(matrix) = hypre_CTAlloc(HYPRE_Int,num_cols_offd); col_map_offd = hypre_ParCSRMatrixColMapOffd(matrix); counter = 0; for (i=0; i < num_cols; i++) if (marker[i]) { col_map_offd[counter] = i; marker[i] = counter; counter++; } hypre_CSRMatrixNumNonzeros(diag) = jd; hypre_CSRMatrixInitialize(diag); diag_data = hypre_CSRMatrixData(diag); diag_j = hypre_CSRMatrixJ(diag); hypre_CSRMatrixNumNonzeros(offd) = jo; hypre_CSRMatrixNumCols(offd) = num_cols_offd; hypre_CSRMatrixInitialize(offd); offd_data = hypre_CSRMatrixData(offd); offd_j = hypre_CSRMatrixJ(offd); jo = 0; jd = 0; for (i=0; i < num_rows; i++) { for (j=a_i[i]-first_elmt; j < a_i[i+1]-first_elmt; j++) if (a_j[j] < first_col_diag || a_j[j] > last_col_diag) { offd_data[jo] = a_data[j]; offd_j[jo++] = marker[a_j[j]]; } else { diag_data[jd] = a_data[j]; diag_j[jd++] = a_j[j]-first_col_diag; } } hypre_TFree(marker); } else { hypre_CSRMatrixNumNonzeros(diag) = num_nonzeros; hypre_CSRMatrixInitialize(diag); diag_data = hypre_CSRMatrixData(diag); diag_i = hypre_CSRMatrixI(diag); diag_j = hypre_CSRMatrixJ(diag); for (i=0; i < num_nonzeros; i++) { diag_data[i] = a_data[i]; diag_j[i] = a_j[i]; } offd_i = hypre_CTAlloc(HYPRE_Int, num_rows+1); for (i=0; i < num_rows+1; i++) { diag_i[i] = a_i[i]; offd_i[i] = 0; } hypre_CSRMatrixNumCols(offd) = 0; hypre_CSRMatrixI(offd) = offd_i; } return hypre_error_flag; } hypre_CSRMatrix * hypre_MergeDiagAndOffd(hypre_ParCSRMatrix *par_matrix) { hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(par_matrix); hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(par_matrix); hypre_CSRMatrix *matrix; HYPRE_Int num_cols = hypre_ParCSRMatrixGlobalNumCols(par_matrix); HYPRE_Int first_col_diag = hypre_ParCSRMatrixFirstColDiag(par_matrix); HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(par_matrix); HYPRE_Int num_rows = hypre_CSRMatrixNumRows(diag); HYPRE_Int *diag_i = hypre_CSRMatrixI(diag); HYPRE_Int *diag_j = hypre_CSRMatrixJ(diag); HYPRE_Complex *diag_data = hypre_CSRMatrixData(diag); HYPRE_Int *offd_i = hypre_CSRMatrixI(offd); HYPRE_Int *offd_j = hypre_CSRMatrixJ(offd); HYPRE_Complex *offd_data = hypre_CSRMatrixData(offd); HYPRE_Int *matrix_i; HYPRE_Int *matrix_j; HYPRE_Complex *matrix_data; HYPRE_Int num_nonzeros, i, j; HYPRE_Int count; HYPRE_Int size, rest, num_threads, ii; num_nonzeros = diag_i[num_rows] + offd_i[num_rows]; matrix = hypre_CSRMatrixCreate(num_rows,num_cols,num_nonzeros); hypre_CSRMatrixInitialize(matrix); matrix_i = hypre_CSRMatrixI(matrix); matrix_j = hypre_CSRMatrixJ(matrix); matrix_data = hypre_CSRMatrixData(matrix); num_threads = hypre_NumThreads(); size = num_rows/num_threads; rest = num_rows - size*num_threads; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(ii, i, j, count) HYPRE_SMP_SCHEDULE #endif for (ii=0; ii < num_threads; ii++) { HYPRE_Int ns, ne; if (ii < rest) { ns = ii*size+ii; ne = (ii+1)*size+ii+1; } else { ns = ii*size+rest; ne = (ii+1)*size+rest; } count = diag_i[ns]+offd_i[ns];; for (i=ns; i < ne; i++) { matrix_i[i] = count; for (j=diag_i[i]; j < diag_i[i+1]; j++) { matrix_data[count] = diag_data[j]; matrix_j[count++] = diag_j[j]+first_col_diag; } for (j=offd_i[i]; j < offd_i[i+1]; j++) { matrix_data[count] = offd_data[j]; matrix_j[count++] = col_map_offd[offd_j[j]]; } } } /* end parallel region */ matrix_i[num_rows] = num_nonzeros; return matrix; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixToCSRMatrixAll: * generates a CSRMatrix from a ParCSRMatrix on all processors that have * parts of the ParCSRMatrix *--------------------------------------------------------------------------*/ hypre_CSRMatrix * hypre_ParCSRMatrixToCSRMatrixAll(hypre_ParCSRMatrix *par_matrix) { MPI_Comm comm = hypre_ParCSRMatrixComm(par_matrix); hypre_CSRMatrix *matrix; hypre_CSRMatrix *local_matrix; HYPRE_Int num_rows = hypre_ParCSRMatrixGlobalNumRows(par_matrix); HYPRE_Int num_cols = hypre_ParCSRMatrixGlobalNumCols(par_matrix); #ifndef HYPRE_NO_GLOBAL_PARTITION HYPRE_Int *row_starts = hypre_ParCSRMatrixRowStarts(par_matrix); #endif HYPRE_Int *matrix_i; HYPRE_Int *matrix_j; HYPRE_Complex *matrix_data; HYPRE_Int *local_matrix_i; HYPRE_Int *local_matrix_j; HYPRE_Complex *local_matrix_data; HYPRE_Int i, j; HYPRE_Int local_num_rows; HYPRE_Int local_num_nonzeros; HYPRE_Int num_nonzeros; HYPRE_Int num_data; HYPRE_Int num_requests; HYPRE_Int vec_len, offset; HYPRE_Int start_index; HYPRE_Int proc_id; HYPRE_Int num_procs, my_id; HYPRE_Int num_types; HYPRE_Int *used_procs; hypre_MPI_Request *requests; hypre_MPI_Status *status; #ifdef HYPRE_NO_GLOBAL_PARTITION HYPRE_Int *new_vec_starts; HYPRE_Int num_contacts; HYPRE_Int contact_proc_list[1]; HYPRE_Int contact_send_buf[1]; HYPRE_Int contact_send_buf_starts[2]; HYPRE_Int max_response_size; HYPRE_Int *response_recv_buf=NULL; HYPRE_Int *response_recv_buf_starts = NULL; hypre_DataExchangeResponse response_obj; hypre_ProcListElements send_proc_obj; HYPRE_Int *send_info = NULL; hypre_MPI_Status status1; HYPRE_Int count, tag1 = 11112, tag2 = 22223, tag3 = 33334; HYPRE_Int start; #endif hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); #ifdef HYPRE_NO_GLOBAL_PARTITION local_num_rows = hypre_ParCSRMatrixLastRowIndex(par_matrix) - hypre_ParCSRMatrixFirstRowIndex(par_matrix) + 1; local_matrix = hypre_MergeDiagAndOffd(par_matrix); /* creates matrix */ local_matrix_i = hypre_CSRMatrixI(local_matrix); local_matrix_j = hypre_CSRMatrixJ(local_matrix); local_matrix_data = hypre_CSRMatrixData(local_matrix); /* determine procs that have vector data and store their ids in used_procs */ /* we need to do an exchange data for this. If I own row then I will contact processor 0 with the endpoint of my local range */ if (local_num_rows > 0) { num_contacts = 1; contact_proc_list[0] = 0; contact_send_buf[0] = hypre_ParCSRMatrixLastRowIndex(par_matrix); contact_send_buf_starts[0] = 0; contact_send_buf_starts[1] = 1; } else { num_contacts = 0; contact_send_buf_starts[0] = 0; contact_send_buf_starts[1] = 0; } /*build the response object*/ /*send_proc_obj will be for saving info from contacts */ send_proc_obj.length = 0; send_proc_obj.storage_length = 10; send_proc_obj.id = hypre_CTAlloc(HYPRE_Int, send_proc_obj.storage_length); send_proc_obj.vec_starts = hypre_CTAlloc(HYPRE_Int, send_proc_obj.storage_length + 1); send_proc_obj.vec_starts[0] = 0; send_proc_obj.element_storage_length = 10; send_proc_obj.elements = hypre_CTAlloc(HYPRE_Int, send_proc_obj.element_storage_length); max_response_size = 0; /* each response is null */ response_obj.fill_response = hypre_FillResponseParToCSRMatrix; response_obj.data1 = NULL; response_obj.data2 = &send_proc_obj; /*this is where we keep info from contacts*/ hypre_DataExchangeList(num_contacts, contact_proc_list, contact_send_buf, contact_send_buf_starts, sizeof(HYPRE_Int), sizeof(HYPRE_Int), &response_obj, max_response_size, 1, comm, (void**) &response_recv_buf, &response_recv_buf_starts); /* now processor 0 should have a list of ranges for processors that have rows - these are in send_proc_obj - it needs to create the new list of processors and also an array of vec starts - and send to those who own row*/ if (my_id) { if (local_num_rows) { /* look for a message from processor 0 */ hypre_MPI_Probe(0, tag1, comm, &status1); hypre_MPI_Get_count(&status1, HYPRE_MPI_INT, &count); send_info = hypre_CTAlloc(HYPRE_Int, count); hypre_MPI_Recv(send_info, count, HYPRE_MPI_INT, 0, tag1, comm, &status1); /* now unpack */ num_types = send_info[0]; used_procs = hypre_CTAlloc(HYPRE_Int, num_types); new_vec_starts = hypre_CTAlloc(HYPRE_Int, num_types+1); for (i=1; i<= num_types; i++) { used_procs[i-1] = send_info[i]; } for (i=num_types+1; i< count; i++) { new_vec_starts[i-num_types-1] = send_info[i] ; } } else /* clean up and exit */ { hypre_TFree(send_proc_obj.vec_starts); hypre_TFree(send_proc_obj.id); hypre_TFree(send_proc_obj.elements); if(response_recv_buf) hypre_TFree(response_recv_buf); if(response_recv_buf_starts) hypre_TFree(response_recv_buf_starts); if (hypre_CSRMatrixOwnsData(local_matrix)) hypre_CSRMatrixDestroy(local_matrix); else hypre_TFree(local_matrix); return NULL; } } else /* my_id ==0 */ { num_types = send_proc_obj.length; used_procs = hypre_CTAlloc(HYPRE_Int, num_types); new_vec_starts = hypre_CTAlloc(HYPRE_Int, num_types+1); new_vec_starts[0] = 0; for (i=0; i< num_types; i++) { used_procs[i] = send_proc_obj.id[i]; new_vec_starts[i+1] = send_proc_obj.elements[i]+1; } hypre_qsort0(used_procs, 0, num_types-1); hypre_qsort0(new_vec_starts, 0, num_types); /*now we need to put into an array to send */ count = 2*num_types+2; send_info = hypre_CTAlloc(HYPRE_Int, count); send_info[0] = num_types; for (i=1; i<= num_types; i++) { send_info[i] = used_procs[i-1]; } for (i=num_types+1; i< count; i++) { send_info[i] = new_vec_starts[i-num_types-1]; } requests = hypre_CTAlloc(hypre_MPI_Request, num_types); status = hypre_CTAlloc(hypre_MPI_Status, num_types); /* don't send to myself - these are sorted so my id would be first*/ start = 0; if (num_types && used_procs[0] == 0) { start = 1; } for (i=start; i < num_types; i++) { hypre_MPI_Isend(send_info, count, HYPRE_MPI_INT, used_procs[i], tag1, comm, &requests[i-start]); } hypre_MPI_Waitall(num_types-start, requests, status); hypre_TFree(status); hypre_TFree(requests); } /* clean up */ hypre_TFree(send_proc_obj.vec_starts); hypre_TFree(send_proc_obj.id); hypre_TFree(send_proc_obj.elements); hypre_TFree(send_info); if(response_recv_buf) hypre_TFree(response_recv_buf); if(response_recv_buf_starts) hypre_TFree(response_recv_buf_starts); /* now proc 0 can exit if it has no rows */ if (!local_num_rows) { if (hypre_CSRMatrixOwnsData(local_matrix)) hypre_CSRMatrixDestroy(local_matrix); else hypre_TFree(local_matrix); hypre_TFree(new_vec_starts); hypre_TFree(used_procs); return NULL; } /* everyone left has rows and knows: new_vec_starts, num_types, and used_procs */ /* this matrix should be rather small */ matrix_i = hypre_CTAlloc(HYPRE_Int, num_rows+1); num_requests = 4*num_types; requests = hypre_CTAlloc(hypre_MPI_Request, num_requests); status = hypre_CTAlloc(hypre_MPI_Status, num_requests); /* exchange contents of local_matrix_i - here we are sending to ourself also*/ j = 0; for (i = 0; i < num_types; i++) { proc_id = used_procs[i]; vec_len = new_vec_starts[i+1] - new_vec_starts[i]; hypre_MPI_Irecv(&matrix_i[new_vec_starts[i]+1], vec_len, HYPRE_MPI_INT, proc_id, tag2, comm, &requests[j++]); } for (i = 0; i < num_types; i++) { proc_id = used_procs[i]; hypre_MPI_Isend(&local_matrix_i[1], local_num_rows, HYPRE_MPI_INT, proc_id, tag2, comm, &requests[j++]); } hypre_MPI_Waitall(j, requests, status); /* generate matrix_i from received data */ /* global numbering?*/ offset = matrix_i[new_vec_starts[1]]; for (i=1; i < num_types; i++) { for (j = new_vec_starts[i]; j < new_vec_starts[i+1]; j++) matrix_i[j+1] += offset; offset = matrix_i[new_vec_starts[i+1]]; } num_nonzeros = matrix_i[num_rows]; matrix = hypre_CSRMatrixCreate(num_rows, num_cols, num_nonzeros); hypre_CSRMatrixI(matrix) = matrix_i; hypre_CSRMatrixInitialize(matrix); matrix_j = hypre_CSRMatrixJ(matrix); matrix_data = hypre_CSRMatrixData(matrix); /* generate datatypes for further data exchange and exchange remaining data, i.e. column info and actual data */ j = 0; for (i = 0; i < num_types; i++) { proc_id = used_procs[i]; start_index = matrix_i[new_vec_starts[i]]; num_data = matrix_i[new_vec_starts[i+1]] - start_index; hypre_MPI_Irecv(&matrix_data[start_index], num_data, HYPRE_MPI_COMPLEX, used_procs[i], tag1, comm, &requests[j++]); hypre_MPI_Irecv(&matrix_j[start_index], num_data, HYPRE_MPI_INT, used_procs[i], tag3, comm, &requests[j++]); } local_num_nonzeros = local_matrix_i[local_num_rows]; for (i=0; i < num_types; i++) { hypre_MPI_Isend(local_matrix_data, local_num_nonzeros, HYPRE_MPI_COMPLEX, used_procs[i], tag1, comm, &requests[j++]); hypre_MPI_Isend(local_matrix_j, local_num_nonzeros, HYPRE_MPI_INT, used_procs[i], tag3, comm, &requests[j++]); } hypre_MPI_Waitall(num_requests, requests, status); hypre_TFree(new_vec_starts); #else local_num_rows = row_starts[my_id+1] - row_starts[my_id]; /* if my_id contains no data, return NULL */ if (!local_num_rows) return NULL; local_matrix = hypre_MergeDiagAndOffd(par_matrix); local_matrix_i = hypre_CSRMatrixI(local_matrix); local_matrix_j = hypre_CSRMatrixJ(local_matrix); local_matrix_data = hypre_CSRMatrixData(local_matrix); matrix_i = hypre_CTAlloc(HYPRE_Int, num_rows+1); /* determine procs that have vector data and store their ids in used_procs */ num_types = 0; for (i=0; i < num_procs; i++) if (row_starts[i+1]-row_starts[i] && i-my_id) num_types++; num_requests = 4*num_types; used_procs = hypre_CTAlloc(HYPRE_Int, num_types); j = 0; for (i=0; i < num_procs; i++) if (row_starts[i+1]-row_starts[i] && i-my_id) used_procs[j++] = i; requests = hypre_CTAlloc(hypre_MPI_Request, num_requests); status = hypre_CTAlloc(hypre_MPI_Status, num_requests); /* data_type = hypre_CTAlloc(hypre_MPI_Datatype, num_types+1); */ /* exchange contents of local_matrix_i */ j = 0; for (i = 0; i < num_types; i++) { proc_id = used_procs[i]; vec_len = row_starts[proc_id+1] - row_starts[proc_id]; hypre_MPI_Irecv(&matrix_i[row_starts[proc_id]+1], vec_len, HYPRE_MPI_INT, proc_id, 0, comm, &requests[j++]); } for (i = 0; i < num_types; i++) { proc_id = used_procs[i]; hypre_MPI_Isend(&local_matrix_i[1], local_num_rows, HYPRE_MPI_INT, proc_id, 0, comm, &requests[j++]); } vec_len = row_starts[my_id+1] - row_starts[my_id]; for (i=1; i <= vec_len; i++) matrix_i[row_starts[my_id]+i] = local_matrix_i[i]; hypre_MPI_Waitall(j, requests, status); /* generate matrix_i from received data */ offset = matrix_i[row_starts[1]]; for (i=1; i < num_procs; i++) { for (j = row_starts[i]; j < row_starts[i+1]; j++) matrix_i[j+1] += offset; offset = matrix_i[row_starts[i+1]]; } num_nonzeros = matrix_i[num_rows]; matrix = hypre_CSRMatrixCreate(num_rows, num_cols, num_nonzeros); hypre_CSRMatrixI(matrix) = matrix_i; hypre_CSRMatrixInitialize(matrix); matrix_j = hypre_CSRMatrixJ(matrix); matrix_data = hypre_CSRMatrixData(matrix); /* generate datatypes for further data exchange and exchange remaining data, i.e. column info and actual data */ j = 0; for (i = 0; i < num_types; i++) { proc_id = used_procs[i]; start_index = matrix_i[row_starts[proc_id]]; num_data = matrix_i[row_starts[proc_id+1]] - start_index; hypre_MPI_Irecv(&matrix_data[start_index], num_data, HYPRE_MPI_COMPLEX, used_procs[i], 0, comm, &requests[j++]); hypre_MPI_Irecv(&matrix_j[start_index], num_data, HYPRE_MPI_INT, used_procs[i], 0, comm, &requests[j++]); } local_num_nonzeros = local_matrix_i[local_num_rows]; for (i=0; i < num_types; i++) { hypre_MPI_Isend(local_matrix_data, local_num_nonzeros, HYPRE_MPI_COMPLEX, used_procs[i], 0, comm, &requests[j++]); hypre_MPI_Isend(local_matrix_j, local_num_nonzeros, HYPRE_MPI_INT, used_procs[i], 0, comm, &requests[j++]); } start_index = matrix_i[row_starts[my_id]]; for (i=0; i < local_num_nonzeros; i++) { matrix_j[start_index+i] = local_matrix_j[i]; matrix_data[start_index+i] = local_matrix_data[i]; } hypre_MPI_Waitall(num_requests, requests, status); start_index = matrix_i[row_starts[my_id]]; for (i=0; i < local_num_nonzeros; i++) { matrix_j[start_index+i] = local_matrix_j[i]; matrix_data[start_index+i] = local_matrix_data[i]; } hypre_MPI_Waitall(num_requests, requests, status); #endif if (hypre_CSRMatrixOwnsData(local_matrix)) hypre_CSRMatrixDestroy(local_matrix); else hypre_TFree(local_matrix); if (num_requests) { hypre_TFree(requests); hypre_TFree(status); hypre_TFree(used_procs); } return matrix; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixCopy, * copies B to A, * if copy_data = 0, only the structure of A is copied to B * the routine does not check whether the dimensions of A and B are compatible *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixCopy( hypre_ParCSRMatrix *A, hypre_ParCSRMatrix *B, HYPRE_Int copy_data ) { hypre_CSRMatrix *A_diag; hypre_CSRMatrix *A_offd; HYPRE_Int *col_map_offd_A; hypre_CSRMatrix *B_diag; hypre_CSRMatrix *B_offd; HYPRE_Int *col_map_offd_B; HYPRE_Int num_cols_offd; HYPRE_Int i; if (!A) { hypre_error_in_arg(1); return hypre_error_flag; } if (!B) { hypre_error_in_arg(1); return hypre_error_flag; } A_diag = hypre_ParCSRMatrixDiag(A); A_offd = hypre_ParCSRMatrixOffd(A); col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A); B_diag = hypre_ParCSRMatrixDiag(B); B_offd = hypre_ParCSRMatrixOffd(B); col_map_offd_B = hypre_ParCSRMatrixColMapOffd(B); num_cols_offd = hypre_CSRMatrixNumCols(A_offd); hypre_CSRMatrixCopy(A_diag, B_diag, copy_data); hypre_CSRMatrixCopy(A_offd, B_offd, copy_data); if (num_cols_offd && col_map_offd_B == NULL) { col_map_offd_B = hypre_CTAlloc(HYPRE_Int,num_cols_offd); hypre_ParCSRMatrixColMapOffd(B) = col_map_offd_B; } for (i = 0; i < num_cols_offd; i++) col_map_offd_B[i] = col_map_offd_A[i]; return hypre_error_flag; } /*-------------------------------------------------------------------- * hypre_FillResponseParToCSRMatrix * Fill response function for determining the send processors * data exchange *--------------------------------------------------------------------*/ HYPRE_Int hypre_FillResponseParToCSRMatrix( void *p_recv_contact_buf, HYPRE_Int contact_size, HYPRE_Int contact_proc, void *ro, MPI_Comm comm, void **p_send_response_buf, HYPRE_Int *response_message_size ) { HYPRE_Int myid; HYPRE_Int i, index, count, elength; HYPRE_Int *recv_contact_buf = (HYPRE_Int * ) p_recv_contact_buf; hypre_DataExchangeResponse *response_obj = (hypre_DataExchangeResponse*)ro; hypre_ProcListElements *send_proc_obj = (hypre_ProcListElements*)response_obj->data2; hypre_MPI_Comm_rank(comm, &myid ); /*check to see if we need to allocate more space in send_proc_obj for ids*/ if (send_proc_obj->length == send_proc_obj->storage_length) { send_proc_obj->storage_length +=10; /*add space for 10 more processors*/ send_proc_obj->id = hypre_TReAlloc(send_proc_obj->id,HYPRE_Int, send_proc_obj->storage_length); send_proc_obj->vec_starts = hypre_TReAlloc(send_proc_obj->vec_starts,HYPRE_Int, send_proc_obj->storage_length + 1); } /*initialize*/ count = send_proc_obj->length; index = send_proc_obj->vec_starts[count]; /*this is the number of elements*/ /*send proc*/ send_proc_obj->id[count] = contact_proc; /*do we need more storage for the elements?*/ if (send_proc_obj->element_storage_length < index + contact_size) { elength = hypre_max(contact_size, 10); elength += index; send_proc_obj->elements = hypre_TReAlloc(send_proc_obj->elements, HYPRE_Int, elength); send_proc_obj->element_storage_length = elength; } /*populate send_proc_obj*/ for (i=0; i< contact_size; i++) { send_proc_obj->elements[index++] = recv_contact_buf[i]; } send_proc_obj->vec_starts[count+1] = index; send_proc_obj->length++; /*output - no message to return (confirmation) */ *response_message_size = 0; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixCompleteClone * Creates and returns a new copy of the argument, A. * Data is not copied, only structural information is reproduced. * The following variables are not copied because they will be constructed * later if needed: CommPkg, CommPkgT, rowindices, rowvalues *--------------------------------------------------------------------------*/ /* This differs from Hypre_ParCSRMatrixClone in parcsr_ls/par_gsmg.c, because that Clone function makes a matrix with different global parameters. */ hypre_ParCSRMatrix * hypre_ParCSRMatrixCompleteClone( hypre_ParCSRMatrix * A ) { hypre_ParCSRMatrix * B = hypre_CTAlloc(hypre_ParCSRMatrix, 1); HYPRE_Int i, ncols_offd; hypre_ParCSRMatrixComm( B ) = hypre_ParCSRMatrixComm( A ); hypre_ParCSRMatrixGlobalNumRows( B ) = hypre_ParCSRMatrixGlobalNumRows( A ); hypre_ParCSRMatrixGlobalNumCols( B ) = hypre_ParCSRMatrixGlobalNumCols( A ); hypre_ParCSRMatrixFirstRowIndex( B ) = hypre_ParCSRMatrixFirstRowIndex( A ); hypre_ParCSRMatrixFirstColDiag( B ) = hypre_ParCSRMatrixFirstColDiag( A ); hypre_ParCSRMatrixLastRowIndex( B ) = hypre_ParCSRMatrixLastRowIndex( A ); hypre_ParCSRMatrixLastColDiag( B ) = hypre_ParCSRMatrixLastColDiag( A ); hypre_ParCSRMatrixDiag( B ) = hypre_CSRMatrixClone( hypre_ParCSRMatrixDiag( A ) ); hypre_ParCSRMatrixOffd( B ) = hypre_CSRMatrixClone( hypre_ParCSRMatrixOffd( A ) ); hypre_ParCSRMatrixRowStarts( B ) = hypre_ParCSRMatrixRowStarts( A ); hypre_ParCSRMatrixColStarts( B ) = hypre_ParCSRMatrixColStarts( A ); /* note that B doesn't own row_starts & col_starts; this isn't a full copy */ hypre_ParCSRMatrixCommPkg( B ) = NULL; hypre_ParCSRMatrixCommPkgT( B ) = NULL; hypre_ParCSRMatrixOwnsData( B ) = 1; hypre_ParCSRMatrixOwnsRowStarts( B ) = 0; hypre_ParCSRMatrixOwnsColStarts( B ) = 0; hypre_ParCSRMatrixNumNonzeros( B ) = hypre_ParCSRMatrixNumNonzeros( A ); hypre_ParCSRMatrixDNumNonzeros( B ) = hypre_ParCSRMatrixNumNonzeros( A ); hypre_ParCSRMatrixRowindices( B ) = NULL; hypre_ParCSRMatrixRowvalues( B ) = NULL; hypre_ParCSRMatrixGetrowactive( B ) = 0; ncols_offd = hypre_CSRMatrixNumCols( hypre_ParCSRMatrixOffd( B ) ); hypre_ParCSRMatrixColMapOffd( B ) = hypre_CTAlloc( HYPRE_Int, ncols_offd ); for ( i=0; i<ncols_offd; ++i ) hypre_ParCSRMatrixColMapOffd( B )[i] = hypre_ParCSRMatrixColMapOffd( A )[i]; return B; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixUnion * Creates and returns a new matrix whose elements are the union of A and B. * Data is not copied, only structural information is created. * A and B must have the same communicator, numbers and distributions of rows * and columns (they can differ in which row-column pairs are nonzero, thus * in which columns are in a offd block) *--------------------------------------------------------------------------*/ hypre_ParCSRMatrix * hypre_ParCSRMatrixUnion( hypre_ParCSRMatrix * A, hypre_ParCSRMatrix * B ) { hypre_ParCSRMatrix * C; HYPRE_Int * col_map_offd_C = NULL; HYPRE_Int num_procs, my_id, p; MPI_Comm comm = hypre_ParCSRMatrixComm( A ); hypre_MPI_Comm_rank(comm,&my_id); hypre_MPI_Comm_size(comm,&num_procs); C = hypre_CTAlloc( hypre_ParCSRMatrix, 1 ); hypre_ParCSRMatrixComm( C ) = hypre_ParCSRMatrixComm( A ); hypre_ParCSRMatrixGlobalNumRows( C ) = hypre_ParCSRMatrixGlobalNumRows( A ); hypre_ParCSRMatrixGlobalNumCols( C ) = hypre_ParCSRMatrixGlobalNumCols( A ); hypre_ParCSRMatrixFirstRowIndex( C ) = hypre_ParCSRMatrixFirstRowIndex( A ); hypre_assert( hypre_ParCSRMatrixFirstRowIndex( B ) == hypre_ParCSRMatrixFirstRowIndex( A ) ); hypre_ParCSRMatrixRowStarts( C ) = hypre_ParCSRMatrixRowStarts( A ); hypre_ParCSRMatrixOwnsRowStarts( C ) = 0; hypre_ParCSRMatrixColStarts( C ) = hypre_ParCSRMatrixColStarts( A ); hypre_ParCSRMatrixOwnsColStarts( C ) = 0; for ( p=0; p<=num_procs; ++p ) hypre_assert( hypre_ParCSRMatrixColStarts(A) == hypre_ParCSRMatrixColStarts(B) ); hypre_ParCSRMatrixFirstColDiag( C ) = hypre_ParCSRMatrixFirstColDiag( A ); hypre_ParCSRMatrixLastRowIndex( C ) = hypre_ParCSRMatrixLastRowIndex( A ); hypre_ParCSRMatrixLastColDiag( C ) = hypre_ParCSRMatrixLastColDiag( A ); hypre_ParCSRMatrixDiag( C ) = hypre_CSRMatrixUnion( hypre_ParCSRMatrixDiag(A), hypre_ParCSRMatrixDiag(B), 0, 0, 0 ); hypre_ParCSRMatrixOffd( C ) = hypre_CSRMatrixUnion( hypre_ParCSRMatrixOffd(A), hypre_ParCSRMatrixOffd(B), hypre_ParCSRMatrixColMapOffd(A), hypre_ParCSRMatrixColMapOffd(B), &col_map_offd_C ); hypre_ParCSRMatrixColMapOffd( C ) = col_map_offd_C; hypre_ParCSRMatrixCommPkg( C ) = NULL; hypre_ParCSRMatrixCommPkgT( C ) = NULL; hypre_ParCSRMatrixOwnsData( C ) = 1; /* SetNumNonzeros, SetDNumNonzeros are global, need hypre_MPI_Allreduce. I suspect, but don't know, that other parts of hypre do not assume that the correct values have been set. hypre_ParCSRMatrixSetNumNonzeros( C ); hypre_ParCSRMatrixSetDNumNonzeros( C );*/ hypre_ParCSRMatrixNumNonzeros( C ) = 0; hypre_ParCSRMatrixDNumNonzeros( C ) = 0.0; hypre_ParCSRMatrixRowindices( C ) = NULL; hypre_ParCSRMatrixRowvalues( C ) = NULL; hypre_ParCSRMatrixGetrowactive( C ) = 0; return C; }
EDT.h
#ifndef EDT_INCLUDED #define EDT_INCLUDED #include <omp.h> #include "../SignalProcessing/CubeGrid.h" template< class Real > void SquaredEDT( const CubeGrid< char >& rasterization , CubeGrid< Real >& edt , int threads=1 ); void SquaredEDT( const CubeGrid< char >& rasterization , CubeGrid< int >& edt , int threads=1 ); template< class Real > void GaussianEDT( const CubeGrid< char >& rasterization , CubeGrid< Real >& gedt , Real fallOff=Real(sqrt(8.)) , int threads=1 ); /////////////////////////////// // Rasterization definitions // /////////////////////////////// template< class Real > void SquaredEDT( const CubeGrid< char >& rasterization , CubeGrid< Real >& edt , int threads ) { int res = rasterization.resolution(); CubeGrid< int > _edt; SquaredEDT( rasterization , _edt , threads ); edt.resize( res ); const int* _edtPtr = _edt[0]; Real *edtPtr = edt[0]; #pragma omp parallel for num_threads( threads ) for( int i=0 ; i<res*res*res ; i++ ) edtPtr[i] = Real( _edtPtr[i] ); } template< class Real > void GaussianEDT( const CubeGrid< char >& rasterization , CubeGrid< Real >& gedt , Real fallOff , int threads ) { int res = rasterization.resolution(); SquaredEDT( rasterization , gedt , threads ); Real* _gedt = gedt[0]; fallOff = Real(2.)*fallOff*fallOff; #pragma omp parallel for num_threads( threads ) for( int i=0 ; i<res*res*res; i++ ) _gedt[i] = Real( exp( - _gedt[i] / fallOff) ); } void SquaredEDT( const CubeGrid< char >& rasterization , CubeGrid< int >& edt , int threads ) { threads = std::max< int >( threads , 1 ); int res = rasterization.resolution(); if( !res ) { fprintf( stderr , "[WARNING] Cannot compute distance transform of zero resolution rasterization\n" ); return; } edt.resize( rasterization.resolution() ); std::vector< int* > oldBuffer( threads ) , newBuffer( threads ); for( int i=0 ; i<threads ; i++ ) oldBuffer[i] = new int[res] , newBuffer[i] = new int[res]; // Set the upper bound on the distance values { int* edtPtr = edt[0]; #pragma omp parallel for num_threads( threads ) for( int i=0 ; i<res*res*res ; i++ ) edtPtr[i] = 3 * (res+1) * (res+1); } // scan along z axis #pragma omp parallel for num_threads( threads ) for( int xy=0 ; xy<res*res ; xy++ ) { int x = xy/res , y = xy%res; bool first=true; int dist = 0; int* edtPtr = edt[x] + y*res; const char* rasterizationPtr = rasterization[x] + y*res; for( int z=0 ; z<res ; z++ ) { if( rasterizationPtr[z] ) { dist = 0; first = false; edtPtr[z] = 0; } else if( !first ) { dist++; edtPtr[z] = dist*dist; } } // backward scan dist = 0; first = true; for( int z=(res-1) ; z>=0 ; z-- ) { if( rasterizationPtr[z] ) { dist = 0; first = false; edtPtr[z] = 0; } else if( !first ) { dist++; int square = dist*dist; if( square<edtPtr[z] ) edtPtr[z] = square; } } } // scan along y axis #pragma omp parallel for num_threads( threads ) for( int thread=0 ; thread<threads ; thread++ ) { int *_oldBuffer=oldBuffer[thread] , *_newBuffer=newBuffer[thread]; for( int xz=(res*res*thread)/threads ; xz<(res*res*(thread+1))/threads ; xz++ ) { int x = xz/res , z = xz%res; // forward scan int s=0; int* edtPtr = edt[x] + z; for( int y=0 ; y<res; y++ ) { _oldBuffer[y] = edtPtr[y*res]; int dist = _oldBuffer[y]; bool foundCloser=false; if( dist ) { for( int t=s ; t<=y ; t++ ) { int new_dist = _oldBuffer[t] + (y - t) * (y - t); if( new_dist<=dist ) dist = new_dist , s=t , foundCloser=true; } } if( !foundCloser ) s=y; _newBuffer[y] = dist; } // backward scan s=res-1; for( int y=res-1 ; y>=0 ; y-- ) { int dist = _newBuffer[y]; bool foundCloser = false; if( dist ) { for( int t=s ; t>y ; t-- ) { int new_dist = _oldBuffer[t] + (y - t) * (y - t); if( new_dist<=dist ) dist = new_dist , s=t , foundCloser=true; } edtPtr[y*res] = dist; } if( !foundCloser ) s=y; } } } // scan along x axis #pragma omp parallel for num_threads( threads ) for( int thread=0 ; thread<threads ; thread++ ) { int *_oldBuffer = oldBuffer[thread] , *_newBuffer = newBuffer[thread]; for( int yz=(res*res*thread)/threads ; yz<(res*res*(thread+1))/threads ; yz++ ) { int y = yz/res , z=yz%res; // forward scan int s=0; int* edtPtr = edt[0] + y*res+z; for( int x=0 ; x<res ; x++ ) { int dist = _oldBuffer[x] = edtPtr[x*res*res]; // If the calculated distance to this point is not zero, // start from s and see if you can find something closer. bool foundCloser = false; if( dist ) { for( int t=s ; t<=x ; t++ ) { // Compute the squared distance that would be obtained if we used the (squared) orthogonal distance to t // plus the (squared) parallel distance from t to x int new_dist = _oldBuffer[t] + (x - t) * (x - t); // <=> new_dist = _oldBuffer[t] + x*x - 2*t*x + t*t // If s has not been updated then: _oldBuffer[t] + (x - t) * (x - t) > _oldBuffer[x] for all t <= x // Taking y = x+d (w/ d>0) we get: // _oldBuffer[t] + ( y - t ) * ( y - t ) = _oldBuffer[t] + ( x - t + d ) * ( x - t + d ) // = _oldBuffer[t] + ( x - t ) * ( x - t ) + 2 * d * ( x - t ) + d * d // > _oldBuffer[x] + ( y - x ) * ( y - x ) + 2 * d * ( x - t ) // > _oldBuffer[x] + ( y - x ) * ( y - x ) // for all t <= x // That is, the squared distance through t <= x has to be at least as large as the squared distance through x if( new_dist<=dist ) dist = new_dist , s=t , foundCloser=true; } } if( !foundCloser ) s=x; _newBuffer[x] = dist; } // backwards scan s = res-1; for( int x=res-1 ; x>=0 ; x-- ) { int dist = _newBuffer[x]; bool foundCloser = false; if( dist ) { for( int t=s; t>=x ; t-- ) { int new_dist = _oldBuffer[t] + (x - t) * (x - t); if( new_dist<=dist ) dist = new_dist , s=t , foundCloser=true; } edtPtr[x*res*res] = dist; } if( !foundCloser ) s=x; } } } for( int i=0 ; i<threads ; i++ ) { delete[] oldBuffer[i]; delete[] newBuffer[i]; } } #endif // EDT_INCLUDED
gbdt.h
/*! * Copyright (c) 2016 Microsoft Corporation. All rights reserved. * Licensed under the MIT License. See LICENSE file in the project root for license information. */ #ifndef LIGHTGBM_BOOSTING_GBDT_H_ #define LIGHTGBM_BOOSTING_GBDT_H_ #include <LightGBM/boosting.h> #include <LightGBM/objective_function.h> #include <LightGBM/prediction_early_stop.h> #include <LightGBM/utils/json11.h> #include <LightGBM/utils/threading.h> #include <string> #include <algorithm> #include <cstdio> #include <fstream> #include <map> #include <memory> #include <mutex> #include <unordered_map> #include <utility> #include <vector> #include "score_updater.hpp" namespace LightGBM { using json11::Json; /*! * \brief GBDT algorithm implementation. including Training, prediction, bagging. */ class GBDT : public GBDTBase { public: /*! * \brief Constructor */ GBDT(); /*! * \brief Destructor */ ~GBDT(); /*! * \brief Initialization logic * \param gbdt_config Config for boosting * \param train_data Training data * \param objective_function Training objective function * \param training_metrics Training metrics */ void Init(const Config* gbdt_config, const Dataset* train_data, const ObjectiveFunction* objective_function, const std::vector<const Metric*>& training_metrics) override; /*! * \brief Merge model from other boosting object. Will insert to the front of current boosting object * \param other */ void MergeFrom(const Boosting* other) override { auto other_gbdt = reinterpret_cast<const GBDT*>(other); // tmp move to other vector auto original_models = std::move(models_); models_ = std::vector<std::unique_ptr<Tree>>(); // push model from other first for (const auto& tree : other_gbdt->models_) { auto new_tree = std::unique_ptr<Tree>(new Tree(*(tree.get()))); models_.push_back(std::move(new_tree)); } num_init_iteration_ = static_cast<int>(models_.size()) / num_tree_per_iteration_; // push model in current object for (const auto& tree : original_models) { auto new_tree = std::unique_ptr<Tree>(new Tree(*(tree.get()))); models_.push_back(std::move(new_tree)); } num_iteration_for_pred_ = static_cast<int>(models_.size()) / num_tree_per_iteration_; } void ShuffleModels(int start_iter, int end_iter) override { int total_iter = static_cast<int>(models_.size()) / num_tree_per_iteration_; start_iter = std::max(0, start_iter); if (end_iter <= 0) { end_iter = total_iter; } end_iter = std::min(total_iter, end_iter); auto original_models = std::move(models_); std::vector<int> indices(total_iter); for (int i = 0; i < total_iter; ++i) { indices[i] = i; } Random tmp_rand(17); for (int i = start_iter; i < end_iter - 1; ++i) { int j = tmp_rand.NextShort(i + 1, end_iter); std::swap(indices[i], indices[j]); } models_ = std::vector<std::unique_ptr<Tree>>(); for (int i = 0; i < total_iter; ++i) { for (int j = 0; j < num_tree_per_iteration_; ++j) { int tree_idx = indices[i] * num_tree_per_iteration_ + j; auto new_tree = std::unique_ptr<Tree>(new Tree(*(original_models[tree_idx].get()))); models_.push_back(std::move(new_tree)); } } } /*! * \brief Reset the training data * \param train_data New Training data * \param objective_function Training objective function * \param training_metrics Training metrics */ void ResetTrainingData(const Dataset* train_data, const ObjectiveFunction* objective_function, const std::vector<const Metric*>& training_metrics) override; /*! * \brief Reset Boosting Config * \param gbdt_config Config for boosting */ void ResetConfig(const Config* gbdt_config) override; /*! * \brief Adding a validation dataset * \param valid_data Validation dataset * \param valid_metrics Metrics for validation dataset */ void AddValidDataset(const Dataset* valid_data, const std::vector<const Metric*>& valid_metrics) override; /*! * \brief Perform a full training procedure * \param snapshot_freq frequence of snapshot * \param model_output_path path of model file */ void Train(int snapshot_freq, const std::string& model_output_path) override; void RefitTree(const std::vector<std::vector<int>>& tree_leaf_prediction) override; /*! * \brief Training logic * \param gradients nullptr for using default objective, otherwise use self-defined boosting * \param hessians nullptr for using default objective, otherwise use self-defined boosting * \return True if cannot train any more */ bool TrainOneIter(const score_t* gradients, const score_t* hessians) override; /*! * \brief Rollback one iteration */ void RollbackOneIter() override; /*! * \brief Get current iteration */ int GetCurrentIteration() const override { return static_cast<int>(models_.size()) / num_tree_per_iteration_; } /*! * \brief Can use early stopping for prediction or not * \return True if cannot use early stopping for prediction */ bool NeedAccuratePrediction() const override { if (objective_function_ == nullptr) { return true; } else { return objective_function_->NeedAccuratePrediction(); } } /*! * \brief Get evaluation result at data_idx data * \param data_idx 0: training data, 1: 1st validation data * \return evaluation result */ std::vector<double> GetEvalAt(int data_idx) const override; /*! * \brief Get current training score * \param out_len length of returned score * \return training score */ const double* GetTrainingScore(int64_t* out_len) override; /*! * \brief Get size of prediction at data_idx data * \param data_idx 0: training data, 1: 1st validation data * \return The size of prediction */ int64_t GetNumPredictAt(int data_idx) const override { CHECK(data_idx >= 0 && data_idx <= static_cast<int>(valid_score_updater_.size())); data_size_t num_data = train_data_->num_data(); if (data_idx > 0) { num_data = valid_score_updater_[data_idx - 1]->num_data(); } return num_data * num_class_; } /*! * \brief Get prediction result at data_idx data * \param data_idx 0: training data, 1: 1st validation data * \param result used to store prediction result, should allocate memory before call this function * \param out_len length of returned score */ void GetPredictAt(int data_idx, double* out_result, int64_t* out_len) override; /*! * \brief Get number of prediction for one data * \param num_iteration number of used iterations * \param is_pred_leaf True if predicting leaf index * \param is_pred_contrib True if predicting feature contribution * \return number of prediction */ inline int NumPredictOneRow(int num_iteration, bool is_pred_leaf, bool is_pred_contrib) const override { int num_preb_in_one_row = num_class_; if (is_pred_leaf) { int max_iteration = GetCurrentIteration(); if (num_iteration > 0) { num_preb_in_one_row *= static_cast<int>(std::min(max_iteration, num_iteration)); } else { num_preb_in_one_row *= max_iteration; } } else if (is_pred_contrib) { num_preb_in_one_row = num_tree_per_iteration_ * (max_feature_idx_ + 2); // +1 for 0-based indexing, +1 for baseline } return num_preb_in_one_row; } void PredictRaw(const double* features, double* output, const PredictionEarlyStopInstance* earlyStop) const override; void PredictRawByMap(const std::unordered_map<int, double>& features, double* output, const PredictionEarlyStopInstance* early_stop) const override; void Predict(const double* features, double* output, const PredictionEarlyStopInstance* earlyStop) const override; void PredictByMap(const std::unordered_map<int, double>& features, double* output, const PredictionEarlyStopInstance* early_stop) const override; void PredictLeafIndex(const double* features, double* output) const override; void PredictLeafIndexByMap(const std::unordered_map<int, double>& features, double* output) const override; void PredictContrib(const double* features, double* output, const PredictionEarlyStopInstance* earlyStop) const override; /*! * \brief Dump model to json format string * \param start_iteration The model will be saved start from * \param num_iteration Number of iterations that want to dump, -1 means dump all * \return Json format string of model */ std::string DumpModel(int start_iteration, int num_iteration) const override; /*! * \brief Translate model to if-else statement * \param num_iteration Number of iterations that want to translate, -1 means translate all * \return if-else format codes of model */ std::string ModelToIfElse(int num_iteration) const override; /*! * \brief Translate model to if-else statement * \param num_iteration Number of iterations that want to translate, -1 means translate all * \param filename Filename that want to save to * \return is_finish Is training finished or not */ bool SaveModelToIfElse(int num_iteration, const char* filename) const override; /*! * \brief Save model to file * \param start_iteration The model will be saved start from * \param num_iterations Number of model that want to save, -1 means save all * \param filename Filename that want to save to * \return is_finish Is training finished or not */ bool SaveModelToFile(int start_iteration, int num_iterations, const char* filename) const override; /*! * \brief Save model to string * \param start_iteration The model will be saved start from * \param num_iterations Number of model that want to save, -1 means save all * \return Non-empty string if succeeded */ std::string SaveModelToString(int start_iteration, int num_iterations) const override; /*! * \brief Restore from a serialized buffer */ bool LoadModelFromString(const char* buffer, size_t len) override; /*! * \brief Calculate feature importances * \param num_iteration Number of model that want to use for feature importance, -1 means use all * \param importance_type: 0 for split, 1 for gain * \return vector of feature_importance */ std::vector<double> FeatureImportance(int num_iteration, int importance_type) const override; /*! * \brief Calculate upper bound value * \return upper bound value */ double GetUpperBoundValue() const override; /*! * \brief Calculate lower bound value * \return lower bound value */ double GetLowerBoundValue() const override; /*! * \brief Get max feature index of this model * \return Max feature index of this model */ inline int MaxFeatureIdx() const override { return max_feature_idx_; } /*! * \brief Get feature names of this model * \return Feature names of this model */ inline std::vector<std::string> FeatureNames() const override { return feature_names_; } /*! * \brief Get index of label column * \return index of label column */ inline int LabelIdx() const override { return label_idx_; } /*! * \brief Get number of weak sub-models * \return Number of weak sub-models */ inline int NumberOfTotalModel() const override { return static_cast<int>(models_.size()); } /*! * \brief Get number of tree per iteration * \return number of tree per iteration */ inline int NumModelPerIteration() const override { return num_tree_per_iteration_; } /*! * \brief Get number of classes * \return Number of classes */ inline int NumberOfClasses() const override { return num_class_; } inline void InitPredict(int num_iteration, bool is_pred_contrib) override { num_iteration_for_pred_ = static_cast<int>(models_.size()) / num_tree_per_iteration_; if (num_iteration > 0) { num_iteration_for_pred_ = std::min(num_iteration, num_iteration_for_pred_); } if (is_pred_contrib) { #pragma omp parallel for schedule(static) for (int i = 0; i < static_cast<int>(models_.size()); ++i) { models_[i]->RecomputeMaxDepth(); } } } inline double GetLeafValue(int tree_idx, int leaf_idx) const override { CHECK(tree_idx >= 0 && static_cast<size_t>(tree_idx) < models_.size()); CHECK(leaf_idx >= 0 && leaf_idx < models_[tree_idx]->num_leaves()); return models_[tree_idx]->LeafOutput(leaf_idx); } inline void SetLeafValue(int tree_idx, int leaf_idx, double val) override { CHECK(tree_idx >= 0 && static_cast<size_t>(tree_idx) < models_.size()); CHECK(leaf_idx >= 0 && leaf_idx < models_[tree_idx]->num_leaves()); models_[tree_idx]->SetLeafOutput(leaf_idx, val); } /*! * \brief Get Type name of this boosting object */ const char* SubModelName() const override { return "tree"; } protected: virtual bool GetIsConstHessian(const ObjectiveFunction* objective_function) { if (objective_function != nullptr) { return objective_function->IsConstantHessian(); } else { return false; } } /*! * \brief Print eval result and check early stopping */ virtual bool EvalAndCheckEarlyStopping(); /*! * \brief reset config for bagging */ void ResetBaggingConfig(const Config* config, bool is_change_dataset); /*! * \brief Implement bagging logic * \param iter Current interation */ virtual void Bagging(int iter); virtual data_size_t BaggingHelper(data_size_t start, data_size_t cnt, data_size_t* buffer); data_size_t BalancedBaggingHelper(data_size_t start, data_size_t cnt, data_size_t* buffer); /*! * \brief calculate the object function */ virtual void Boosting(); /*! * \brief updating score after tree was trained * \param tree Trained tree of this iteration * \param cur_tree_id Current tree for multiclass training */ virtual void UpdateScore(const Tree* tree, const int cur_tree_id); /*! * \brief eval results for one metric */ virtual std::vector<double> EvalOneMetric(const Metric* metric, const double* score) const; /*! * \brief Print metric result of current iteration * \param iter Current interation * \return best_msg if met early_stopping */ std::string OutputMetric(int iter); double BoostFromAverage(int class_id, bool update_scorer); /*! \brief current iteration */ int iter_; /*! \brief Pointer to training data */ const Dataset* train_data_; /*! \brief Config of gbdt */ std::unique_ptr<Config> config_; /*! \brief Tree learner, will use this class to learn trees */ std::unique_ptr<TreeLearner> tree_learner_; /*! \brief Objective function */ const ObjectiveFunction* objective_function_; /*! \brief Store and update training data's score */ std::unique_ptr<ScoreUpdater> train_score_updater_; /*! \brief Metrics for training data */ std::vector<const Metric*> training_metrics_; /*! \brief Store and update validation data's scores */ std::vector<std::unique_ptr<ScoreUpdater>> valid_score_updater_; /*! \brief Metric for validation data */ std::vector<std::vector<const Metric*>> valid_metrics_; /*! \brief Number of rounds for early stopping */ int early_stopping_round_; /*! \brief Only use first metric for early stopping */ bool es_first_metric_only_; /*! \brief Best iteration(s) for early stopping */ std::vector<std::vector<int>> best_iter_; /*! \brief Best score(s) for early stopping */ std::vector<std::vector<double>> best_score_; /*! \brief output message of best iteration */ std::vector<std::vector<std::string>> best_msg_; /*! \brief Trained models(trees) */ std::vector<std::unique_ptr<Tree>> models_; /*! \brief Max feature index of training data*/ int max_feature_idx_; /*! \brief First order derivative of training data */ std::vector<score_t, Common::AlignmentAllocator<score_t, kAlignedSize>> gradients_; /*! \brief Secend order derivative of training data */ std::vector<score_t, Common::AlignmentAllocator<score_t, kAlignedSize>> hessians_; /*! \brief Store the indices of in-bag data */ std::vector<data_size_t, Common::AlignmentAllocator<data_size_t, kAlignedSize>> bag_data_indices_; /*! \brief Number of in-bag data */ data_size_t bag_data_cnt_; /*! \brief Number of training data */ data_size_t num_data_; /*! \brief Number of trees per iterations */ int num_tree_per_iteration_; /*! \brief Number of class */ int num_class_; /*! \brief Index of label column */ data_size_t label_idx_; /*! \brief number of used model */ int num_iteration_for_pred_; /*! \brief Shrinkage rate for one iteration */ double shrinkage_rate_; /*! \brief Number of loaded initial models */ int num_init_iteration_; /*! \brief Feature names */ std::vector<std::string> feature_names_; std::vector<std::string> feature_infos_; std::unique_ptr<Dataset> tmp_subset_; bool is_use_subset_; std::vector<bool> class_need_train_; bool is_constant_hessian_; std::unique_ptr<ObjectiveFunction> loaded_objective_; bool average_output_; bool need_re_bagging_; bool balanced_bagging_; std::string loaded_parameter_; std::vector<int8_t> monotone_constraints_; const int bagging_rand_block_ = 1024; std::vector<Random> bagging_rands_; ParallelPartitionRunner<data_size_t, false> bagging_runner_; Json forced_splits_json_; }; } // namespace LightGBM #endif // LightGBM_BOOSTING_GBDT_H_
UI_mainConsole.h
#pragma once #define OLC_PGE_APPLICATION #include "olcPixelGameEngine.h" #include "BallCollisionEngine.h" class BallCollision : public olc::PixelGameEngine { public: BallCollision() { sAppName = "BallCollision"; } private: BallCollisionEngine* _engine; Ball* _selectedBall; Line* _selectedLine; bool _isStartLine; inline bool _isPointInBall(float x1, float y1, float r1, float x, float y) { return fabs((x1 - x) * (x1 - x) + (y1 - y) * (y1 - y)) < (r1 * r1); } inline void _selectAndDragBall() { if (GetMouse(olc::Mouse::LEFT).bPressed || GetMouse(olc::Mouse::RIGHT).bPressed) { _selectedBall = nullptr; for (auto& ball : _engine->Balls) { if (_isPointInBall(ball.x, ball.y, ball.r, GetMouseX(), GetMouseY())) { _selectedBall = &ball; break; } } _selectedLine = nullptr; for (auto& line : _engine->Lines) { if (_isPointInBall(line.sx, line.sy, line.r, GetMouseX(), GetMouseY())) { _selectedLine = &line; _isStartLine = true; } } for (auto& line : _engine->Lines) { if (_isPointInBall(line.ex, line.ey, line.r, GetMouseX(), GetMouseY())) { _selectedLine = &line; _isStartLine = false; } } } if (GetMouse(olc::Mouse::LEFT).bHeld) { if (_selectedBall) { _selectedBall->x = GetMouseX(); _selectedBall->y = GetMouseY(); } if (_selectedLine && _isStartLine) { _selectedLine->sx = GetMouseX(); _selectedLine->sy = GetMouseY(); } if (_selectedLine && !_isStartLine) { _selectedLine->ex = GetMouseX(); _selectedLine->ey = GetMouseY(); } } if (GetMouse(olc::Mouse::LEFT).bReleased) { _selectedBall = nullptr; _selectedLine = nullptr; } if (GetMouse(olc::Mouse::RIGHT).bReleased) { if (_selectedBall) { _selectedBall->vx = 20.0f * (_selectedBall->x - (float)GetMouseX()); _selectedBall->vy = 20.0f * (_selectedBall->y - (float)GetMouseY()); } _selectedBall = nullptr; } } public: bool OnUserCreate() override { _engine = new BallCollisionEngine(ScreenWidth(), ScreenHeight(), 700); _engine->Create(); return true; } bool OnUserUpdate(float fElapsedTime) override { _selectAndDragBall(); _engine->Update(fElapsedTime); // clear the screen FillRect(0, 0, ScreenWidth(), ScreenHeight(), olc::Pixel(0, 0, 0)); // draw ball #pragma omp parallel for for (int i = 0; i < _engine->Balls.size(); i++) { int r = std::min(sqrt(_engine->Balls[i].vx * _engine->Balls[i].vx + _engine->Balls[i].vy * _engine->Balls[i].vy) + 100.0f, 255.0f); FillCircle(_engine->Balls[i].x, _engine->Balls[i].y, _engine->Balls[i].r, olc::Pixel(r, 255 - r, 255 - r)); } // draw line for (auto& line : _engine->Lines) { float nx = -(line.ey - line.sy); float ny = (line.ex - line.sx); float d = sqrt(nx * nx + ny * ny); nx /= d; ny /= d; DrawLine((line.sx + nx * line.r), (line.sy + ny * line.r), (line.ex + nx * line.r), (line.ey + ny * line.r), olc::GREY); DrawLine((line.sx - nx * line.r), (line.sy - ny * line.r), (line.ex - nx * line.r), (line.ey - ny * line.r), olc::GREY); FillCircle(line.sx, line.sy, line.r, olc::GREY); FillCircle(line.ex, line.ey, line.r, olc::GREY); } // draw right click line if (_selectedBall) { DrawLine(_selectedBall->x, _selectedBall->y, GetMouseX(), GetMouseY(), olc::BLUE); } return true; } }; void RunBallCollision() { BallCollision ball_collision; if (ball_collision.Construct(1536, 1024, 1, 1)) ball_collision.Start(); }
bml_threshold_dense_typed.c
#ifdef BML_USE_MAGMA #include "magma_v2.h" #include "../bml_export.h" #include "bml_export_dense.h" #endif #include "../../typed.h" #include "../bml_allocate.h" #include "../bml_threshold.h" #include "../bml_parallel.h" #include "../bml_types.h" #include "bml_allocate_dense.h" #include "bml_threshold_dense.h" #include "bml_types_dense.h" #include "../bml_logger.h" #include <complex.h> #include <stdlib.h> #include <string.h> #include <math.h> #ifdef _OPENMP #include <omp.h> #endif /** Threshold a matrix. * * \ingroup threshold_group * * \param A The matrix to be thresholded * \param threshold Threshold value * \return The thresholded A */ bml_matrix_dense_t *TYPED_FUNC( bml_threshold_new_dense) ( bml_matrix_dense_t * A, double threshold) { #ifdef BML_USE_MAGMA LOG_ERROR ("bml_threshold_new_dense() not implemented for MAGMA matrices\n"); #endif int N = A->N; bml_matrix_dimension_t matrix_dimension = { A->N, A->N, A->N }; bml_matrix_dense_t *B = TYPED_FUNC(bml_zero_matrix_dense) (matrix_dimension, A->distribution_mode); REAL_T *A_matrix = A->matrix; REAL_T *B_matrix = B->matrix; int *A_localRowMin = A->domain->localRowMin; int *A_localRowMax = A->domain->localRowMax; int myRank = bml_getMyRank(); #pragma omp parallel for \ shared(N, A_matrix, B_matrix) \ shared(A_localRowMin, A_localRowMax, myRank) //for (int i = 0; i < N * N; i++) for (int i = A_localRowMin[myRank] * N; i < A_localRowMax[myRank] * N; i++) { if (is_above_threshold(A_matrix[i], (REAL_T) threshold)) { B_matrix[i] = A_matrix[i]; } } return B; } /** Threshold a matrix in place. * * \ingroup threshold_group * * \param A The matrix to be thresholded * \param threshold Threshold value * \return The thresholded A */ void TYPED_FUNC( bml_threshold_dense) ( bml_matrix_dense_t * A_bml, double threshold) { int N = A_bml->N; #ifdef BML_USE_MAGMA REAL_T *A_matrix = bml_export_to_dense(A_bml, dense_row_major); #else REAL_T *A_matrix = A_bml->matrix; #endif int *A_localRowMin = A_bml->domain->localRowMin; int *A_localRowMax = A_bml->domain->localRowMax; int myRank = bml_getMyRank(); #pragma omp parallel for \ shared(N, A_matrix) \ shared(A_localRowMin, A_localRowMax, myRank) //for (int i = 0; i < N * N; i++) for (int i = A_localRowMin[myRank] * N; i < A_localRowMax[myRank] * N; i++) { if (!is_above_threshold(A_matrix[i], (REAL_T) threshold)) { A_matrix[i] = (REAL_T) 0.0; } } #ifdef BML_USE_MAGMA MAGMA(setmatrix) (N, N, (MAGMA_T *) A_matrix, N, A_bml->matrix, A_bml->ld, bml_queue()); #endif }
krb5-18_fmt_plug.c
/* * KRB5 - Enctype 18 (aes256-cts-hmac-sha1-96) cracker patch for JtR * Created on August of 2012 by Mougey Camille (CEA/DAM) & Lalet Pierre (CEA/DAM) * * This format is one of formats saved in KDC database and used during the authentication part * * This software is Copyright (c) 2012, Mougey Camille (CEA/DAM) * Lalet Pierre (CEA/DAM) * and it is hereby released to the general public under the following terms: * Redistribution and use in source and binary forms, with or without modification, * are permitted. * * Input Format : * - user:$krb18$REALMname$hash * - user:REALMname$hash * * Format rewritten Dec, 2014, without use of -lkrb5, by JimF. Now we use 'native' JtR * pbkdf2-hmac-sha1() and simple call to 2 AES limb encrypt for entire process. Very * simple, and 10x faster, and no obsure -lkrb5 dependency */ #if AC_BUILT #include "autoconfig.h" #endif #if FMT_EXTERNS_H extern struct fmt_main fmt_krb5_18; #elif FMT_REGISTERS_H john_register_one(&fmt_krb5_18); #else #include <string.h> #include <assert.h> #include <errno.h> #include "arch.h" #include "misc.h" #include "common.h" #include "formats.h" #include "johnswap.h" #include "params.h" #include "options.h" #include "simd-intrinsics.h" #include "pbkdf2_hmac_sha1.h" #include "aes.h" #ifdef _OPENMP #include <omp.h> #ifdef SIMD_COEF_32 #ifndef OMP_SCALE #define OMP_SCALE 8 #endif #else #ifndef OMP_SCALE #define OMP_SCALE 32 #endif #endif #endif #include "memdbg.h" #define FORMAT_LABEL "krb5-18" #define FORMAT_NAME "Kerberos 5 db etype 18" #define FORMAT_TAG "$krb18$" #define TAG_LENGTH (sizeof(FORMAT_TAG)-1) #if SIMD_COEF_32 #define ALGORITHM_NAME "PBKDF2-SHA1 " SHA1_ALGORITHM_NAME " AES" #else #define ALGORITHM_NAME "PBKDF2-SHA1 32/" ARCH_BITS_STR " AES" #endif #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define PLAINTEXT_LENGTH 64 #define CIPHERTEXT_LENGTH 64 #define BINARY_SIZE 32 #define BINARY_ALIGN 4 #define SALT_SIZE CIPHERTEXT_LENGTH #define SALT_ALIGN 1 #ifdef SIMD_COEF_32 #define MIN_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1 #define MAX_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1 #else #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #endif static struct fmt_tests kinit_tests[] = { {"OLYMPE.OLtest$214bb89cf5b8330112d52189ab05d9d05b03b5a961fe6d06203335ad5f339b26", "password"}, {FORMAT_TAG "OLYMPE.OLtest$214bb89cf5b8330112d52189ab05d9d05b03b5a961fe6d06203335ad5f339b26", "password"}, {NULL} }; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static char saved_salt[SALT_SIZE+1]; static uint32_t (*crypt_out)[16]; static void init(struct fmt_main *self) { #ifdef _OPENMP int omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_key)); crypt_out = mem_calloc(self->params.max_keys_per_crypt, sizeof(*crypt_out)); } static void done(void) { MEM_FREE(crypt_out); MEM_FREE(saved_key); } static int valid(char *ciphertext, struct fmt_main *self) { char *p, *q; p = ciphertext; if (!strncmp(p, FORMAT_TAG, TAG_LENGTH)) p += TAG_LENGTH; p = strstr(p, "$"); if (p == NULL) return 0; q = ciphertext; if (p - q > SALT_SIZE) /* check salt length */ return 0; q = ++p; while (atoi16l[ARCH_INDEX(*q)] != 0x7F) { q++; } return !*q && q - p == CIPHERTEXT_LENGTH; } static char *split(char *ciphertext, int index, struct fmt_main *self) { static char out[TAG_LENGTH + CIPHERTEXT_LENGTH + SALT_SIZE + 1]; if (!strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH)) return ciphertext; memcpy(out, FORMAT_TAG, TAG_LENGTH); strnzcpyn(out + TAG_LENGTH, ciphertext, CIPHERTEXT_LENGTH + SALT_SIZE + 1); return out; } static void *get_salt(char *ciphertext) { static char out[SALT_SIZE+1]; char *p, *q; memset(&out, 0, sizeof(out)); p = ciphertext + TAG_LENGTH; q = strstr(p, "$"); strncpy(out, p, q-p); out[q-p] = 0; return out; } static void set_salt(void *salt) { strcpy(saved_salt, salt); } static void *get_binary(char *ciphertext) { static unsigned char *out; char *p; int i = 0; if (!out) out = mem_alloc_tiny(BINARY_SIZE, MEM_ALIGN_WORD); p = ciphertext; if (!strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH)) p += TAG_LENGTH; p = strstr(p, "$") + 1; for (; i < BINARY_SIZE; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } return out; } static int crypt_all(int *pcount, struct db_salt *_salt) { const int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for #endif #if defined(_OPENMP) || MAX_KEYS_PER_CRYPT > 1 for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT) #endif { unsigned char key[32], i; AES_KEY aeskey; #ifdef SSE_GROUP_SZ_SHA1 uint32_t Key[SSE_GROUP_SZ_SHA1][32/4]; int lens[SSE_GROUP_SZ_SHA1]; unsigned char *pin[SSE_GROUP_SZ_SHA1]; union { uint32_t *pout[SSE_GROUP_SZ_SHA1]; unsigned char *poutc; } x; for (i = 0; i < SSE_GROUP_SZ_SHA1; ++i) { lens[i] = strlen(saved_key[index+i]); pin[i] = (unsigned char*)saved_key[index+i]; x.pout[i] = Key[i]; } pbkdf2_sha1_sse((const unsigned char **)pin, lens, (const unsigned char*)saved_salt, strlen(saved_salt), 4096, &(x.poutc), 32, 0); #else pbkdf2_sha1((const unsigned char*)saved_key[index], strlen(saved_key[index]), (const unsigned char*)saved_salt, strlen(saved_salt), 4096, key, 32, 0); #endif i=0; #ifdef SSE_GROUP_SZ_SHA1 for (; i < SSE_GROUP_SZ_SHA1; ++i) { memcpy(key, Key[i], 32); #endif AES_set_encrypt_key(key, 256, &aeskey); AES_encrypt((unsigned char*)"kerberos{\x9b[+\x93\x13+\x93", (unsigned char*)(crypt_out[index+i]), &aeskey); AES_encrypt((unsigned char*)(crypt_out[index+i]), (unsigned char*)&crypt_out[index+i][4], &aeskey); #ifdef SSE_GROUP_SZ_SHA1 } #endif } return count; } static int cmp_all(void *binary, int count) { int index = 0; #if defined(_OPENMP) || MAX_KEYS_PER_CRYPT > 1 for (; index < count; index++) #endif if (crypt_out[index][0] == *(uint32_t*)binary) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } static void set_key(char *key, int index) { int saved_len = strlen(key); if (saved_len > PLAINTEXT_LENGTH) saved_len = PLAINTEXT_LENGTH; memcpy(saved_key[index], key, saved_len); saved_key[index][saved_len] = 0; } static char *get_key(int index) { return saved_key[index]; } static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; } static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; } static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; } static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; } static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; } static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; } static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; } struct fmt_main fmt_krb5_18 = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, { NULL }, { FORMAT_TAG }, kinit_tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, split, get_binary, get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, NULL, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact, } }; #endif /* plugin stanza */
owl_slicing_basic_impl_omp.h
/* * OWL - OCaml Scientific and Engineering Computing * Copyright (c) 2016-2020 Liang Wang <liang.wang@cl.cam.ac.uk> */ #ifdef OWL_ENABLE_TEMPLATE // Level 1 optimisation void FUNCTION (c, slice_1) (struct slice_pair *p) { TYPE *x = (TYPE *) p->x; TYPE *y = (TYPE *) p->y; int64_t d = p->dim - 1; int64_t n = p->n[d]; int64_t posx = p->posx + p->ofsx[d]; int64_t posy = p->posy + p->ofsy[d]; int64_t incx = p->incx[d]; int64_t incy = p->incy[d]; for (int64_t i = 0; i < n; i++) { MAPFUN (*(x + posx), *(y + posy)); posx += incx; posy += incy; } } // Level 2 optimisation void FUNCTION (c, slice_2) (struct slice_pair *p) { TYPE *x = (TYPE *) p->x; TYPE *y = (TYPE *) p->y; int64_t d0 = p->dim - 2; int64_t d1 = p->dim - 1; int64_t n0 = p->n[d0]; int64_t n1 = p->n[d1]; int64_t ofsx0 = p->ofsx[d0]; int64_t ofsy0 = p->ofsy[d0]; int64_t incx0 = p->incx[d0]; int64_t incy0 = p->incy[d0]; int64_t ofsx1 = p->ofsx[d1]; int64_t ofsy1 = p->ofsy[d1]; int64_t incx1 = p->incx[d1]; int64_t incy1 = p->incy[d1]; int64_t posx0 = p->posx + ofsx0; int64_t posy0 = p->posy + ofsy0; #pragma omp parallel for schedule(static) for (int64_t i0 = 0; i0 < n0; i0++) { int64_t posx1 = posx0 + ofsx1 + i0 * incx0; int64_t posy1 = posy0 + ofsy1 + i0 * incy0; for (int64_t i1 = 0; i1 < n1; i1++) { MAPFUN (*(x + posx1), *(y + posy1)); posx1 += incx1; posy1 += incy1; } } } // Level 3 optimisation void FUNCTION (c, slice_3) (struct slice_pair *p) { TYPE *x = (TYPE *) p->x; TYPE *y = (TYPE *) p->y; int64_t d0 = p->dim - 3; int64_t d1 = p->dim - 2; int64_t d2 = p->dim - 1; int64_t n0 = p->n[d0]; int64_t n1 = p->n[d1]; int64_t n2 = p->n[d2]; int64_t ofsx0 = p->ofsx[d0]; int64_t ofsy0 = p->ofsy[d0]; int64_t incx0 = p->incx[d0]; int64_t incy0 = p->incy[d0]; int64_t ofsx1 = p->ofsx[d1]; int64_t ofsy1 = p->ofsy[d1]; int64_t incx1 = p->incx[d1]; int64_t incy1 = p->incy[d1]; int64_t ofsx2 = p->ofsx[d2]; int64_t ofsy2 = p->ofsy[d2]; int64_t incx2 = p->incx[d2]; int64_t incy2 = p->incy[d2]; int64_t posx0 = p->posx + ofsx0; int64_t posy0 = p->posy + ofsy0; #pragma omp parallel for schedule(static) for (int64_t i0 = 0; i0 < n0; i0++) { int64_t posx1 = posx0 + ofsx1 + i0 * incx0; int64_t posy1 = posy0 + ofsy1 + i0 * incy0; for (int64_t i1 = 0; i1 < n1; i1++) { int64_t posx2 = posx1 + ofsx2; int64_t posy2 = posy1 + ofsy2; for (int64_t i2 = 0; i2 < n2; i2++) { MAPFUN (*(x + posx2), *(y + posy2)); posx2 += incx2; posy2 += incy2; } posx1 += incx1; posy1 += incy1; } } } // Level 4 optimisation void FUNCTION (c, slice_4) (struct slice_pair *p) { TYPE *x = (TYPE *) p->x; TYPE *y = (TYPE *) p->y; int64_t d0 = p->dim - 4; int64_t d1 = p->dim - 3; int64_t d2 = p->dim - 2; int64_t d3 = p->dim - 1; int64_t n0 = p->n[d0]; int64_t n1 = p->n[d1]; int64_t n2 = p->n[d2]; int64_t n3 = p->n[d3]; int64_t ofsx0 = p->ofsx[d0]; int64_t ofsy0 = p->ofsy[d0]; int64_t incx0 = p->incx[d0]; int64_t incy0 = p->incy[d0]; int64_t ofsx1 = p->ofsx[d1]; int64_t ofsy1 = p->ofsy[d1]; int64_t incx1 = p->incx[d1]; int64_t incy1 = p->incy[d1]; int64_t ofsx2 = p->ofsx[d2]; int64_t ofsy2 = p->ofsy[d2]; int64_t incx2 = p->incx[d2]; int64_t incy2 = p->incy[d2]; int64_t ofsx3 = p->ofsx[d3]; int64_t ofsy3 = p->ofsy[d3]; int64_t incx3 = p->incx[d3]; int64_t incy3 = p->incy[d3]; int64_t posx0 = p->posx + ofsx0; int64_t posy0 = p->posy + ofsy0; #pragma omp parallel for schedule(static) for (int64_t i0 = 0; i0 < n0; i0++) { int64_t posx1 = posx0 + ofsx1 + i0 * incx0; int64_t posy1 = posy0 + ofsy1 + i0 * incy0; for (int64_t i1 = 0; i1 < n1; i1++) { int64_t posx2 = posx1 + ofsx2; int64_t posy2 = posy1 + ofsy2; for (int64_t i2 = 0; i2 < n2; i2++) { int64_t posx3 = posx2 + ofsx3; int64_t posy3 = posy2 + ofsy3; for (int64_t i3 = 0; i3 < n3; i3++) { MAPFUN (*(x + posx3), *(y + posy3)); posx3 += incx3; posy3 += incy3; } posx2 += incx2; posy2 += incy2; } posx1 += incx1; posy1 += incy1; } } } // slice x based on the basic slice definition and save to y. void FUNCTION (c, slice) (struct slice_pair *p) { if (p->dep == p->dim - 1) FUNCTION (c, slice_1) (p); else if (p->dep == p->dim - 2) FUNCTION (c, slice_2) (p); else if (p->dep == p->dim - 3) FUNCTION (c, slice_3) (p); else if (p->dep == p->dim - 4) FUNCTION (c, slice_4) (p); else { const int64_t d = p->dep; const int64_t n = p->n[d]; const int64_t incx = p->incx[d]; const int64_t incy = p->incy[d]; const int64_t save_posx = p->posx; const int64_t save_posy = p->posy; p->posx += p->ofsx[d]; p->posy += p->ofsy[d]; for (int64_t i = 0; i < n; i++) { p->dep += 1; FUNCTION (c, slice) (p); p->dep -= 1; p->posx += incx; p->posy += incy; } p->posx = save_posx; p->posy = save_posy; } } // stub function CAMLprim value FUNCTION (stub, slice) (value vX, value vY, value vZ) { struct caml_ba_array *X = Caml_ba_array_val(vX); TYPE *X_data = (TYPE *) X->data; struct caml_ba_array *Y = Caml_ba_array_val(vY); TYPE *Y_data = (TYPE *) Y->data; struct caml_ba_array *Z = Caml_ba_array_val(vZ); int64_t *slice = (int64_t *) Z->data; struct slice_pair * sp = calloc(1, sizeof(struct slice_pair)); sp->dim = X->num_dims; sp->dep = 0; sp->n = Y->dim; sp->x = X_data; sp->y = Y_data; sp->posx = 0; sp->posy = 0; sp->ofsx = calloc(sp->dim, sizeof(int64_t)); sp->ofsy = calloc(sp->dim, sizeof(int64_t)); sp->incx = calloc(sp->dim, sizeof(int64_t)); sp->incy = calloc(sp->dim, sizeof(int64_t)); c_slicing_offset(X, slice, sp->ofsx); c_slicing_stride(X, slice, sp->incx); c_ndarray_stride(Y, sp->incy); FUNCTION (c, slice) (sp); free(sp->ofsx); free(sp->ofsy); free(sp->incx); free(sp->incy); free(sp); return Val_unit; } #endif /* OWL_ENABLE_TEMPLATE */
kernel_cpu.c
#ifdef __cplusplus extern "C" { #endif //========================================================================================================================================================================================================200 // DEFINE/INCLUDE //========================================================================================================================================================================================================200 //======================================================================================================================================================150 // LIBRARIES //======================================================================================================================================================150 //#include <omp.h> // (in path known to compiler) needed by openmp #include <stdlib.h> // (in path known to compiler) needed by malloc #include <stdio.h> // (in path known to compiler) needed by printf #include <math.h> // (in path known to compiler) needed by exp //======================================================================================================================================================150 // MAIN FUNCTION HEADER //======================================================================================================================================================150 #include "./../main.h" // (in the main program folder) needed to recognized input variables //======================================================================================================================================================150 // UTILITIES //======================================================================================================================================================150 #include "./../util/timer/timer.h" // (in library path specified to compiler) needed by timer //======================================================================================================================================================150 // KERNEL_CPU FUNCTION HEADER //======================================================================================================================================================150 #include "kernel_cpu.h" // (in the current directory) //========================================================================================================================================================================================================200 // PLASMAKERNEL_GPU //========================================================================================================================================================================================================200 void kernel_cpu( par_str par, dim_str dim, box_str* box, FOUR_VECTOR* rv, fp* qv, FOUR_VECTOR* fv) { //======================================================================================================================================================150 // Variables //======================================================================================================================================================150 // parameters fp alpha; fp a2; // counters int i, j, k, l; // home box long first_i; FOUR_VECTOR* rA; FOUR_VECTOR* fA; // neighbor box int pointer; long first_j; FOUR_VECTOR* rB; fp* qB; // common fp r2; fp u2; fp fs; fp vij; fp fxij,fyij,fzij; THREE_VECTOR d; //======================================================================================================================================================150 // MCPU SETUP //======================================================================================================================================================150 //omp_set_num_threads(dim.cores_arg); //======================================================================================================================================================150 // INPUTS //======================================================================================================================================================150 alpha = par.alpha; a2 = 2.0*alpha*alpha; //======================================================================================================================================================150 // PROCESS INTERACTIONS //======================================================================================================================================================150 /* #pragma omp parallel for \ private(i, j, k) \ private(first_i, rA, fA) \ private(pointer, first_j, rB, qB) \ private(r2, u2, fs, vij, fxij, fyij, fzij, d) */ for(l=0; l<dim.number_boxes; l=l+1){ //------------------------------------------------------------------------------------------100 // home box - box parameters //------------------------------------------------------------------------------------------100 first_i = box[l].offset; // offset to common arrays //------------------------------------------------------------------------------------------100 // home box - distance, force, charge and type parameters from common arrays //------------------------------------------------------------------------------------------100 rA = &rv[first_i]; fA = &fv[first_i]; //------------------------------------------------------------------------------------------100 // Do for the # of (home+neighbor) boxes //------------------------------------------------------------------------------------------100 for (k=0; k<(1+box[l].nn); k++) { //----------------------------------------50 // neighbor box - get pointer to the right box //----------------------------------------50 if(k==0){ pointer = l; // set first box to be processed to home box } else{ pointer = box[l].nei[k-1].number; // remaining boxes are neighbor boxes } //----------------------------------------50 // neighbor box - box parameters //----------------------------------------50 first_j = box[pointer].offset; //----------------------------------------50 // neighbor box - distance, force, charge and type parameters //----------------------------------------50 rB = &rv[first_j]; qB = &qv[first_j]; //----------------------------------------50 // Do for the # of particles in home box //----------------------------------------50 for (i=0; i<NUMBER_PAR_PER_BOX; i=i+1){ // do for the # of particles in current (home or neighbor) box for (j=0; j<NUMBER_PAR_PER_BOX; j=j+1){ // // coefficients r2 = rA[i].v + rB[j].v - DOT(rA[i],rB[j]); u2 = a2*r2; vij= exp(-u2); fs = 2.*vij; d.x = rA[i].x - rB[j].x; d.y = rA[i].y - rB[j].y; d.z = rA[i].z - rB[j].z; fxij=fs*d.x; fyij=fs*d.y; fzij=fs*d.z; // forces fA[i].v += qB[j]*vij; fA[i].x += qB[j]*fxij; fA[i].y += qB[j]*fyij; fA[i].z += qB[j]*fzij; } // for j } // for i } // for k } // for l /* for (i = 0; i < NUMBER_PAR_PER_BOX; ++i) { fprintf(stderr, "%f %f %f %f\n", fA[i].v, fA[i].x, fA[i].y, fA[i].z); } */ } // main #ifdef __cplusplus } #endif
convolution_3x3_pack1to4_bf16s.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv3x3s1_pack1to4_bf16s_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; #if __ARM_NEON && __aarch64__ Mat top_blob_fp32(outw, outh, opt.num_threads, (size_t)4u * 4 * 2, 4 * 2, opt.workspace_allocator); #else Mat top_blob_fp32(outw, outh, opt.num_threads, (size_t)4u * 4, 4, opt.workspace_allocator); #endif const float* bias = _bias; int remain_outch_start = 0; #if __ARM_NEON && __aarch64__ int nn_outch = 0; nn_outch = outch >> 1; remain_outch_start = nn_outch << 1; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 2; Mat out0 = top_blob_fp32.channel(get_omp_thread_num()); float32x4_t _bias0 = bias ? vld1q_f32((const float*)bias + p * 4) : vdupq_n_f32(0.f); float32x4_t _bias1 = bias ? vld1q_f32((const float*)bias + (p + 1) * 4) : vdupq_n_f32(0.f); { float* ptr = (float*)out0; for (int i = 0; i < outh; i++) { int j = 0; for (; j + 3 < outw; j += 4) { vst1q_f32(ptr, _bias0); vst1q_f32(ptr + 4, _bias0); vst1q_f32(ptr + 8, _bias0); vst1q_f32(ptr + 12, _bias0); vst1q_f32(ptr + 16, _bias1); vst1q_f32(ptr + 20, _bias1); vst1q_f32(ptr + 24, _bias1); vst1q_f32(ptr + 28, _bias1); ptr += 32; } for (; j + 1 < outw; j += 2) { vst1q_f32(ptr, _bias0); vst1q_f32(ptr + 4, _bias0); vst1q_f32(ptr + 8, _bias1); vst1q_f32(ptr + 12, _bias1); ptr += 16; } for (; j < outw; j++) { vst1q_f32(ptr, _bias0); vst1q_f32(ptr + 4, _bias1); ptr += 8; } } } const unsigned short* k0 = kernel.channel(p); const unsigned short* k1 = kernel.channel(p + 1); int q = 0; for (; q < inch - 1; q++) { float* outptr0 = out0; const Mat img0 = bottom_blob.channel(q); const unsigned short* r0 = img0.row<const unsigned short>(0); const unsigned short* r1 = img0.row<const unsigned short>(1); const unsigned short* r2 = img0.row<const unsigned short>(2); float32x4_t _k00_0 = vcvt_f32_bf16(vld1_u16(k0)); float32x4_t _k01_0 = vcvt_f32_bf16(vld1_u16(k0 + 4)); float32x4_t _k02_0 = vcvt_f32_bf16(vld1_u16(k0 + 8)); float32x4_t _k10_0 = vcvt_f32_bf16(vld1_u16(k0 + 12)); float32x4_t _k11_0 = vcvt_f32_bf16(vld1_u16(k0 + 16)); float32x4_t _k12_0 = vcvt_f32_bf16(vld1_u16(k0 + 20)); float32x4_t _k20_0 = vcvt_f32_bf16(vld1_u16(k0 + 24)); float32x4_t _k21_0 = vcvt_f32_bf16(vld1_u16(k0 + 28)); float32x4_t _k22_0 = vcvt_f32_bf16(vld1_u16(k0 + 32)); float32x4_t _k00_1 = vcvt_f32_bf16(vld1_u16(k1)); float32x4_t _k01_1 = vcvt_f32_bf16(vld1_u16(k1 + 4)); float32x4_t _k02_1 = vcvt_f32_bf16(vld1_u16(k1 + 8)); float32x4_t _k10_1 = vcvt_f32_bf16(vld1_u16(k1 + 12)); float32x4_t _k11_1 = vcvt_f32_bf16(vld1_u16(k1 + 16)); float32x4_t _k12_1 = vcvt_f32_bf16(vld1_u16(k1 + 20)); float32x4_t _k20_1 = vcvt_f32_bf16(vld1_u16(k1 + 24)); float32x4_t _k21_1 = vcvt_f32_bf16(vld1_u16(k1 + 28)); float32x4_t _k22_1 = vcvt_f32_bf16(vld1_u16(k1 + 32)); int i = 0; for (; i < outh; i++) { int j = 0; for (; j + 3 < outw; j += 4) { asm volatile( "prfm pldl1keep, [%1, #64] \n" "ld1 {v0.4h}, [%1], #8 \n" "ld1 {v1.s}[0], [%1] \n" "prfm pldl1keep, [%0, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%0], #64 \n" "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "prfm pldl1keep, [%0, #512] \n" "ld1 {v28.4s, v29.4s, v30.4s, v31.4s}, [%0] \n" "fmla v24.4s, %8.4s, v0.s[0] \n" "fmla v25.4s, %8.4s, v0.s[1] \n" "fmla v26.4s, %8.4s, v0.s[2] \n" "fmla v27.4s, %8.4s, v0.s[3] \n" "fmla v28.4s, %17.4s, v0.s[0] \n" "fmla v29.4s, %17.4s, v0.s[1] \n" "fmla v30.4s, %17.4s, v0.s[2] \n" "fmla v31.4s, %17.4s, v0.s[3] \n" "prfm pldl1keep, [%2, #64] \n" "ld1 {v2.4h}, [%2], #8 \n" "ld1 {v3.s}[0], [%2] \n" "fmla v24.4s, %9.4s, v0.s[1] \n" "fmla v25.4s, %9.4s, v0.s[2] \n" "fmla v26.4s, %9.4s, v0.s[3] \n" "fmla v27.4s, %9.4s, v1.s[0] \n" "fmla v28.4s, %18.4s, v0.s[1] \n" "fmla v29.4s, %18.4s, v0.s[2] \n" "fmla v30.4s, %18.4s, v0.s[3] \n" "fmla v31.4s, %18.4s, v1.s[0] \n" "shll v2.4s, v2.4h, #16 \n" "shll v3.4s, v3.4h, #16 \n" "fmla v24.4s, %10.4s, v0.s[2] \n" "fmla v25.4s, %10.4s, v0.s[3] \n" "fmla v26.4s, %10.4s, v1.s[0] \n" "fmla v27.4s, %10.4s, v1.s[1] \n" "fmla v28.4s, %19.4s, v0.s[2] \n" "fmla v29.4s, %19.4s, v0.s[3] \n" "fmla v30.4s, %19.4s, v1.s[0] \n" "fmla v31.4s, %19.4s, v1.s[1] \n" "fmla v24.4s, %11.4s, v2.s[0] \n" "fmla v25.4s, %11.4s, v2.s[1] \n" "fmla v26.4s, %11.4s, v2.s[2] \n" "fmla v27.4s, %11.4s, v2.s[3] \n" "fmla v28.4s, %20.4s, v2.s[0] \n" "fmla v29.4s, %20.4s, v2.s[1] \n" "fmla v30.4s, %20.4s, v2.s[2] \n" "fmla v31.4s, %20.4s, v2.s[3] \n" "prfm pldl1keep, [%3, #64] \n" "ld1 {v0.4h}, [%3], #8 \n" "ld1 {v1.s}[0], [%3] \n" "fmla v24.4s, %12.4s, v2.s[1] \n" "fmla v25.4s, %12.4s, v2.s[2] \n" "fmla v26.4s, %12.4s, v2.s[3] \n" "fmla v27.4s, %12.4s, v3.s[0] \n" "fmla v28.4s, %21.4s, v2.s[1] \n" "fmla v29.4s, %21.4s, v2.s[2] \n" "fmla v30.4s, %21.4s, v2.s[3] \n" "fmla v31.4s, %21.4s, v3.s[0] \n" "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "fmla v24.4s, %13.4s, v2.s[2] \n" "fmla v25.4s, %13.4s, v2.s[3] \n" "fmla v26.4s, %13.4s, v3.s[0] \n" "fmla v27.4s, %13.4s, v3.s[1] \n" "fmla v28.4s, %22.4s, v2.s[2] \n" "fmla v29.4s, %22.4s, v2.s[3] \n" "fmla v30.4s, %22.4s, v3.s[0] \n" "fmla v31.4s, %22.4s, v3.s[1] \n" "fmla v24.4s, %14.4s, v0.s[0] \n" "fmla v25.4s, %14.4s, v0.s[1] \n" "fmla v26.4s, %14.4s, v0.s[2] \n" "fmla v27.4s, %14.4s, v0.s[3] \n" "fmla v28.4s, %23.4s, v0.s[0] \n" "fmla v29.4s, %23.4s, v0.s[1] \n" "fmla v30.4s, %23.4s, v0.s[2] \n" "fmla v31.4s, %23.4s, v0.s[3] \n" "fmla v24.4s, %15.4s, v0.s[1] \n" "fmla v25.4s, %15.4s, v0.s[2] \n" "fmla v26.4s, %15.4s, v0.s[3] \n" "fmla v27.4s, %15.4s, v1.s[0] \n" "fmla v28.4s, %24.4s, v0.s[1] \n" "fmla v29.4s, %24.4s, v0.s[2] \n" "fmla v30.4s, %24.4s, v0.s[3] \n" "fmla v31.4s, %24.4s, v1.s[0] \n" "sub %0, %0, #64 \n" "fmla v24.4s, %16.4s, v0.s[2] \n" "fmla v25.4s, %16.4s, v0.s[3] \n" "fmla v26.4s, %16.4s, v1.s[0] \n" "fmla v27.4s, %16.4s, v1.s[1] \n" "fmla v28.4s, %25.4s, v0.s[2] \n" "fmla v29.4s, %25.4s, v0.s[3] \n" "fmla v30.4s, %25.4s, v1.s[0] \n" "fmla v31.4s, %25.4s, v1.s[1] \n" "st1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%0], #64 \n" "st1 {v28.4s, v29.4s, v30.4s, v31.4s}, [%0], #64 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "w"(_k00_0), // %8 "w"(_k01_0), // %9 "w"(_k02_0), // %10 "w"(_k10_0), // %11 "w"(_k11_0), // %12 "w"(_k12_0), // %13 "w"(_k20_0), // %14 "w"(_k21_0), // %15 "w"(_k22_0), // %16 "w"(_k00_1), // %17 "w"(_k01_1), // %18 "w"(_k02_1), // %19 "w"(_k10_1), // %20 "w"(_k11_1), // %21 "w"(_k12_1), // %22 "w"(_k20_1), // %23 "w"(_k21_1), // %24 "w"(_k22_1) // %25 : "memory", "v0", "v1", "v2", "v3", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } for (; j + 1 < outw; j += 2) { asm volatile( "prfm pldl1keep, [%1, #64] \n" "ld1 {v0.4h}, [%1] \n" "prfm pldl1keep, [%0, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%0] \n" "shll v0.4s, v0.4h, #16 \n" "fmla v24.4s, %8.4s, v0.s[0] \n" "fmla v25.4s, %8.4s, v0.s[1] \n" "fmla v26.4s, %17.4s, v0.s[0] \n" "fmla v27.4s, %17.4s, v0.s[1] \n" "prfm pldl1keep, [%2, #64] \n" "ld1 {v1.4h}, [%2] \n" "fmla v24.4s, %9.4s, v0.s[1] \n" "fmla v25.4s, %9.4s, v0.s[2] \n" "fmla v26.4s, %18.4s, v0.s[1] \n" "fmla v27.4s, %18.4s, v0.s[2] \n" "shll v1.4s, v1.4h, #16 \n" "fmla v24.4s, %10.4s, v0.s[2] \n" "fmla v25.4s, %10.4s, v0.s[3] \n" "fmla v26.4s, %19.4s, v0.s[2] \n" "fmla v27.4s, %19.4s, v0.s[3] \n" "fmla v24.4s, %11.4s, v1.s[0] \n" "fmla v25.4s, %11.4s, v1.s[1] \n" "fmla v26.4s, %20.4s, v1.s[0] \n" "fmla v27.4s, %20.4s, v1.s[1] \n" "prfm pldl1keep, [%3, #64] \n" "ld1 {v0.4h}, [%3] \n" "fmla v24.4s, %12.4s, v1.s[1] \n" "fmla v25.4s, %12.4s, v1.s[2] \n" "fmla v26.4s, %21.4s, v1.s[1] \n" "fmla v27.4s, %21.4s, v1.s[2] \n" "shll v0.4s, v0.4h, #16 \n" "fmla v24.4s, %13.4s, v1.s[2] \n" "fmla v25.4s, %13.4s, v1.s[3] \n" "fmla v26.4s, %22.4s, v1.s[2] \n" "fmla v27.4s, %22.4s, v1.s[3] \n" "fmla v24.4s, %14.4s, v0.s[0] \n" "fmla v25.4s, %14.4s, v0.s[1] \n" "fmla v26.4s, %23.4s, v0.s[0] \n" "fmla v27.4s, %23.4s, v0.s[1] \n" "add %1, %1, #4 \n" "fmla v24.4s, %15.4s, v0.s[1] \n" "fmla v25.4s, %15.4s, v0.s[2] \n" "fmla v26.4s, %24.4s, v0.s[1] \n" "fmla v27.4s, %24.4s, v0.s[2] \n" "add %2, %2, #4 \n" "fmla v24.4s, %16.4s, v0.s[2] \n" "fmla v25.4s, %16.4s, v0.s[3] \n" "fmla v26.4s, %25.4s, v0.s[2] \n" "fmla v27.4s, %25.4s, v0.s[3] \n" "add %3, %3, #4 \n" "st1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%0], #64 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "w"(_k00_0), // %8 "w"(_k01_0), // %9 "w"(_k02_0), // %10 "w"(_k10_0), // %11 "w"(_k11_0), // %12 "w"(_k12_0), // %13 "w"(_k20_0), // %14 "w"(_k21_0), // %15 "w"(_k22_0), // %16 "w"(_k00_1), // %17 "w"(_k01_1), // %18 "w"(_k02_1), // %19 "w"(_k10_1), // %20 "w"(_k11_1), // %21 "w"(_k12_1), // %22 "w"(_k20_1), // %23 "w"(_k21_1), // %24 "w"(_k22_1) // %25 : "memory", "v0", "v1", "v24", "v25", "v26", "v27"); } for (; j < outw; j++) { float32x4_t _sum00 = vld1q_f32(outptr0); float32x4_t _sum10 = vld1q_f32(outptr0 + 4); float32x4_t _r0 = vcvt_f32_bf16(vld1_u16(r0)); float32x4_t _r1 = vcvt_f32_bf16(vld1_u16(r1)); float32x4_t _r2 = vcvt_f32_bf16(vld1_u16(r2)); _sum00 = vfmaq_laneq_f32(_sum00, _k00_0, _r0, 0); _sum00 = vfmaq_laneq_f32(_sum00, _k01_0, _r0, 1); _sum00 = vfmaq_laneq_f32(_sum00, _k02_0, _r0, 2); _sum00 = vfmaq_laneq_f32(_sum00, _k10_0, _r1, 0); _sum00 = vfmaq_laneq_f32(_sum00, _k11_0, _r1, 1); _sum00 = vfmaq_laneq_f32(_sum00, _k12_0, _r1, 2); _sum00 = vfmaq_laneq_f32(_sum00, _k20_0, _r2, 0); _sum00 = vfmaq_laneq_f32(_sum00, _k21_0, _r2, 1); _sum00 = vfmaq_laneq_f32(_sum00, _k22_0, _r2, 2); _sum10 = vfmaq_laneq_f32(_sum10, _k00_1, _r0, 0); _sum10 = vfmaq_laneq_f32(_sum10, _k01_1, _r0, 1); _sum10 = vfmaq_laneq_f32(_sum10, _k02_1, _r0, 2); _sum10 = vfmaq_laneq_f32(_sum10, _k10_1, _r1, 0); _sum10 = vfmaq_laneq_f32(_sum10, _k11_1, _r1, 1); _sum10 = vfmaq_laneq_f32(_sum10, _k12_1, _r1, 2); _sum10 = vfmaq_laneq_f32(_sum10, _k20_1, _r2, 0); _sum10 = vfmaq_laneq_f32(_sum10, _k21_1, _r2, 1); _sum10 = vfmaq_laneq_f32(_sum10, _k22_1, _r2, 2); vst1q_f32(outptr0, _sum00); vst1q_f32(outptr0 + 4, _sum10); r0 += 1; r1 += 1; r2 += 1; outptr0 += 8; } r0 += 2; r1 += 2; r2 += 2; } k0 += 9 * 4; k1 += 9 * 4; } for (; q < inch; q++) { unsigned short* outptr0_bf16 = top_blob.channel(p); unsigned short* outptr1_bf16 = top_blob.channel(p + 1); const float* outptr0 = out0; const Mat img0 = bottom_blob.channel(q); const unsigned short* r0 = img0.row<const unsigned short>(0); const unsigned short* r1 = img0.row<const unsigned short>(1); const unsigned short* r2 = img0.row<const unsigned short>(2); float32x4_t _k00_0 = vcvt_f32_bf16(vld1_u16(k0)); float32x4_t _k01_0 = vcvt_f32_bf16(vld1_u16(k0 + 4)); float32x4_t _k02_0 = vcvt_f32_bf16(vld1_u16(k0 + 8)); float32x4_t _k10_0 = vcvt_f32_bf16(vld1_u16(k0 + 12)); float32x4_t _k11_0 = vcvt_f32_bf16(vld1_u16(k0 + 16)); float32x4_t _k12_0 = vcvt_f32_bf16(vld1_u16(k0 + 20)); float32x4_t _k20_0 = vcvt_f32_bf16(vld1_u16(k0 + 24)); float32x4_t _k21_0 = vcvt_f32_bf16(vld1_u16(k0 + 28)); float32x4_t _k22_0 = vcvt_f32_bf16(vld1_u16(k0 + 32)); float32x4_t _k00_1 = vcvt_f32_bf16(vld1_u16(k1)); float32x4_t _k01_1 = vcvt_f32_bf16(vld1_u16(k1 + 4)); float32x4_t _k02_1 = vcvt_f32_bf16(vld1_u16(k1 + 8)); float32x4_t _k10_1 = vcvt_f32_bf16(vld1_u16(k1 + 12)); float32x4_t _k11_1 = vcvt_f32_bf16(vld1_u16(k1 + 16)); float32x4_t _k12_1 = vcvt_f32_bf16(vld1_u16(k1 + 20)); float32x4_t _k20_1 = vcvt_f32_bf16(vld1_u16(k1 + 24)); float32x4_t _k21_1 = vcvt_f32_bf16(vld1_u16(k1 + 28)); float32x4_t _k22_1 = vcvt_f32_bf16(vld1_u16(k1 + 32)); int i = 0; for (; i < outh; i++) { int j = 0; for (; j + 3 < outw; j += 4) { asm volatile( "prfm pldl1keep, [%3, #64] \n" "ld1 {v0.4h}, [%3], #8 \n" "ld1 {v1.s}[0], [%3] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%2], #64 \n" "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v28.4s, v29.4s, v30.4s, v31.4s}, [%2], #64 \n" "fmla v24.4s, %12.4s, v0.s[0] \n" "fmla v25.4s, %12.4s, v0.s[1] \n" "fmla v26.4s, %12.4s, v0.s[2] \n" "fmla v27.4s, %12.4s, v0.s[3] \n" "fmla v28.4s, %21.4s, v0.s[0] \n" "fmla v29.4s, %21.4s, v0.s[1] \n" "fmla v30.4s, %21.4s, v0.s[2] \n" "fmla v31.4s, %21.4s, v0.s[3] \n" "fmla v24.4s, %13.4s, v0.s[1] \n" "fmla v25.4s, %13.4s, v0.s[2] \n" "fmla v26.4s, %13.4s, v0.s[3] \n" "fmla v27.4s, %13.4s, v1.s[0] \n" "fmla v28.4s, %22.4s, v0.s[1] \n" "fmla v29.4s, %22.4s, v0.s[2] \n" "fmla v30.4s, %22.4s, v0.s[3] \n" "fmla v31.4s, %22.4s, v1.s[0] \n" "prfm pldl1keep, [%4, #64] \n" "ld1 {v2.4h}, [%4], #8 \n" "ld1 {v3.s}[0], [%4] \n" "fmla v24.4s, %14.4s, v0.s[2] \n" "fmla v25.4s, %14.4s, v0.s[3] \n" "fmla v26.4s, %14.4s, v1.s[0] \n" "fmla v27.4s, %14.4s, v1.s[1] \n" "fmla v28.4s, %23.4s, v0.s[2] \n" "fmla v29.4s, %23.4s, v0.s[3] \n" "fmla v30.4s, %23.4s, v1.s[0] \n" "fmla v31.4s, %23.4s, v1.s[1] \n" "shll v2.4s, v2.4h, #16 \n" "shll v3.4s, v3.4h, #16 \n" "fmla v24.4s, %15.4s, v2.s[0] \n" "fmla v25.4s, %15.4s, v2.s[1] \n" "fmla v26.4s, %15.4s, v2.s[2] \n" "fmla v27.4s, %15.4s, v2.s[3] \n" "fmla v28.4s, %24.4s, v2.s[0] \n" "fmla v29.4s, %24.4s, v2.s[1] \n" "fmla v30.4s, %24.4s, v2.s[2] \n" "fmla v31.4s, %24.4s, v2.s[3] \n" "fmla v24.4s, %16.4s, v2.s[1] \n" "fmla v25.4s, %16.4s, v2.s[2] \n" "fmla v26.4s, %16.4s, v2.s[3] \n" "fmla v27.4s, %16.4s, v3.s[0] \n" "fmla v28.4s, %25.4s, v2.s[1] \n" "fmla v29.4s, %25.4s, v2.s[2] \n" "fmla v30.4s, %25.4s, v2.s[3] \n" "fmla v31.4s, %25.4s, v3.s[0] \n" "prfm pldl1keep, [%5, #64] \n" "ld1 {v0.4h}, [%5], #8 \n" "ld1 {v1.s}[0], [%5] \n" "fmla v24.4s, %17.4s, v2.s[2] \n" "fmla v25.4s, %17.4s, v2.s[3] \n" "fmla v26.4s, %17.4s, v3.s[0] \n" "fmla v27.4s, %17.4s, v3.s[1] \n" "fmla v28.4s, %26.4s, v2.s[2] \n" "fmla v29.4s, %26.4s, v2.s[3] \n" "fmla v30.4s, %26.4s, v3.s[0] \n" "fmla v31.4s, %26.4s, v3.s[1] \n" "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "fmla v24.4s, %18.4s, v0.s[0] \n" "fmla v25.4s, %18.4s, v0.s[1] \n" "fmla v26.4s, %18.4s, v0.s[2] \n" "fmla v27.4s, %18.4s, v0.s[3] \n" "fmla v28.4s, %27.4s, v0.s[0] \n" "fmla v29.4s, %27.4s, v0.s[1] \n" "fmla v30.4s, %27.4s, v0.s[2] \n" "fmla v31.4s, %27.4s, v0.s[3] \n" "fmla v24.4s, %19.4s, v0.s[1] \n" "fmla v25.4s, %19.4s, v0.s[2] \n" "fmla v26.4s, %19.4s, v0.s[3] \n" "fmla v27.4s, %19.4s, v1.s[0] \n" "fmla v28.4s, %28.4s, v0.s[1] \n" "fmla v29.4s, %28.4s, v0.s[2] \n" "fmla v30.4s, %28.4s, v0.s[3] \n" "fmla v31.4s, %28.4s, v1.s[0] \n" "fmla v24.4s, %20.4s, v0.s[2] \n" "fmla v25.4s, %20.4s, v0.s[3] \n" "fmla v26.4s, %20.4s, v1.s[0] \n" "fmla v27.4s, %20.4s, v1.s[1] \n" "fmla v28.4s, %29.4s, v0.s[2] \n" "fmla v29.4s, %29.4s, v0.s[3] \n" "fmla v30.4s, %29.4s, v1.s[0] \n" "fmla v31.4s, %29.4s, v1.s[1] \n" "shrn v24.4h, v24.4s, #16 \n" "shrn v25.4h, v25.4s, #16 \n" "shrn v26.4h, v26.4s, #16 \n" "shrn v27.4h, v27.4s, #16 \n" "shrn v28.4h, v28.4s, #16 \n" "shrn v29.4h, v29.4s, #16 \n" "shrn v30.4h, v30.4s, #16 \n" "shrn v31.4h, v31.4s, #16 \n" "st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%0], #32 \n" "st1 {v28.4h, v29.4h, v30.4h, v31.4h}, [%1], #32 \n" : "=r"(outptr0_bf16), // %0 "=r"(outptr1_bf16), // %1 "=r"(outptr0), // %2 "=r"(r0), // %3 "=r"(r1), // %4 "=r"(r2) // %5 : "0"(outptr0_bf16), "1"(outptr1_bf16), "2"(outptr0), "3"(r0), "4"(r1), "5"(r2), "w"(_k00_0), // %12 "w"(_k01_0), // %13 "w"(_k02_0), // %14 "w"(_k10_0), // %15 "w"(_k11_0), // %16 "w"(_k12_0), // %17 "w"(_k20_0), // %18 "w"(_k21_0), // %19 "w"(_k22_0), // %20 "w"(_k00_1), // %21 "w"(_k01_1), // %22 "w"(_k02_1), // %23 "w"(_k10_1), // %24 "w"(_k11_1), // %25 "w"(_k12_1), // %26 "w"(_k20_1), // %27 "w"(_k21_1), // %28 "w"(_k22_1) // %29 : "memory", "v0", "v1", "v2", "v3", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } for (; j + 1 < outw; j += 2) { asm volatile( "prfm pldl1keep, [%3, #64] \n" "ld1 {v0.4h}, [%3] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%2], #64 \n" "shll v0.4s, v0.4h, #16 \n" "fmla v24.4s, %12.4s, v0.s[0] \n" "fmla v25.4s, %12.4s, v0.s[1] \n" "fmla v26.4s, %21.4s, v0.s[0] \n" "fmla v27.4s, %21.4s, v0.s[1] \n" "prfm pldl1keep, [%4, #64] \n" "ld1 {v1.4h}, [%4] \n" "fmla v24.4s, %13.4s, v0.s[1] \n" "fmla v25.4s, %13.4s, v0.s[2] \n" "fmla v26.4s, %22.4s, v0.s[1] \n" "fmla v27.4s, %22.4s, v0.s[2] \n" "shll v1.4s, v1.4h, #16 \n" "fmla v24.4s, %14.4s, v0.s[2] \n" "fmla v25.4s, %14.4s, v0.s[3] \n" "fmla v26.4s, %23.4s, v0.s[2] \n" "fmla v27.4s, %23.4s, v0.s[3] \n" "fmla v24.4s, %15.4s, v1.s[0] \n" "fmla v25.4s, %15.4s, v1.s[1] \n" "fmla v26.4s, %24.4s, v1.s[0] \n" "fmla v27.4s, %24.4s, v1.s[1] \n" "prfm pldl1keep, [%5, #64] \n" "ld1 {v0.4h}, [%5] \n" "fmla v24.4s, %16.4s, v1.s[1] \n" "fmla v25.4s, %16.4s, v1.s[2] \n" "fmla v26.4s, %25.4s, v1.s[1] \n" "fmla v27.4s, %25.4s, v1.s[2] \n" "shll v0.4s, v0.4h, #16 \n" "fmla v24.4s, %17.4s, v1.s[2] \n" "fmla v25.4s, %17.4s, v1.s[3] \n" "fmla v26.4s, %26.4s, v1.s[2] \n" "fmla v27.4s, %26.4s, v1.s[3] \n" "fmla v24.4s, %18.4s, v0.s[0] \n" "fmla v25.4s, %18.4s, v0.s[1] \n" "fmla v26.4s, %27.4s, v0.s[0] \n" "fmla v27.4s, %27.4s, v0.s[1] \n" "fmla v24.4s, %19.4s, v0.s[1] \n" "fmla v25.4s, %19.4s, v0.s[2] \n" "fmla v26.4s, %28.4s, v0.s[1] \n" "fmla v27.4s, %28.4s, v0.s[2] \n" "add %3, %3, #4 \n" "fmla v24.4s, %20.4s, v0.s[2] \n" "fmla v25.4s, %20.4s, v0.s[3] \n" "fmla v26.4s, %29.4s, v0.s[2] \n" "fmla v27.4s, %29.4s, v0.s[3] \n" "add %4, %4, #4 \n" "shrn v24.4h, v24.4s, #16 \n" "shrn v25.4h, v25.4s, #16 \n" "shrn v26.4h, v26.4s, #16 \n" "shrn v27.4h, v27.4s, #16 \n" "add %5, %5, #4 \n" "st1 {v24.4h, v25.4h}, [%0], #16 \n" "st1 {v26.4h, v27.4h}, [%1], #16 \n" : "=r"(outptr0_bf16), // %0 "=r"(outptr1_bf16), // %1 "=r"(outptr0), // %2 "=r"(r0), // %3 "=r"(r1), // %4 "=r"(r2) // %5 : "0"(outptr0_bf16), "1"(outptr1_bf16), "2"(outptr0), "3"(r0), "4"(r1), "5"(r2), "w"(_k00_0), // %12 "w"(_k01_0), // %13 "w"(_k02_0), // %14 "w"(_k10_0), // %15 "w"(_k11_0), // %16 "w"(_k12_0), // %17 "w"(_k20_0), // %18 "w"(_k21_0), // %19 "w"(_k22_0), // %20 "w"(_k00_1), // %21 "w"(_k01_1), // %22 "w"(_k02_1), // %23 "w"(_k10_1), // %24 "w"(_k11_1), // %25 "w"(_k12_1), // %26 "w"(_k20_1), // %27 "w"(_k21_1), // %28 "w"(_k22_1) // %29 : "memory", "v0", "v1", "v24", "v25", "v26", "v27"); } for (; j < outw; j++) { float32x4_t _sum00 = vld1q_f32(outptr0); float32x4_t _sum10 = vld1q_f32(outptr0 + 4); float32x4_t _r0 = vcvt_f32_bf16(vld1_u16(r0)); float32x4_t _r1 = vcvt_f32_bf16(vld1_u16(r1)); float32x4_t _r2 = vcvt_f32_bf16(vld1_u16(r2)); _sum00 = vfmaq_laneq_f32(_sum00, _k00_0, _r0, 0); _sum00 = vfmaq_laneq_f32(_sum00, _k01_0, _r0, 1); _sum00 = vfmaq_laneq_f32(_sum00, _k02_0, _r0, 2); _sum00 = vfmaq_laneq_f32(_sum00, _k10_0, _r1, 0); _sum00 = vfmaq_laneq_f32(_sum00, _k11_0, _r1, 1); _sum00 = vfmaq_laneq_f32(_sum00, _k12_0, _r1, 2); _sum00 = vfmaq_laneq_f32(_sum00, _k20_0, _r2, 0); _sum00 = vfmaq_laneq_f32(_sum00, _k21_0, _r2, 1); _sum00 = vfmaq_laneq_f32(_sum00, _k22_0, _r2, 2); _sum10 = vfmaq_laneq_f32(_sum10, _k00_1, _r0, 0); _sum10 = vfmaq_laneq_f32(_sum10, _k01_1, _r0, 1); _sum10 = vfmaq_laneq_f32(_sum10, _k02_1, _r0, 2); _sum10 = vfmaq_laneq_f32(_sum10, _k10_1, _r1, 0); _sum10 = vfmaq_laneq_f32(_sum10, _k11_1, _r1, 1); _sum10 = vfmaq_laneq_f32(_sum10, _k12_1, _r1, 2); _sum10 = vfmaq_laneq_f32(_sum10, _k20_1, _r2, 0); _sum10 = vfmaq_laneq_f32(_sum10, _k21_1, _r2, 1); _sum10 = vfmaq_laneq_f32(_sum10, _k22_1, _r2, 2); vst1_u16(outptr0_bf16, vcvt_bf16_f32(_sum00)); vst1_u16(outptr1_bf16, vcvt_bf16_f32(_sum10)); r0 += 1; r1 += 1; r2 += 1; outptr0 += 8; outptr0_bf16 += 4; outptr1_bf16 += 4; } r0 += 2; r1 += 2; r2 += 2; } k0 += 9 * 4; k1 += 9 * 4; } } #endif // __ARM_NEON && __aarch64__ #pragma omp parallel for num_threads(opt.num_threads) for (int p = remain_outch_start; p < outch; p++) { Mat out0 = top_blob_fp32.channel(get_omp_thread_num()); float32x4_t _bias0 = bias ? vld1q_f32((const float*)bias + p * 4) : vdupq_n_f32(0.f); out0.fill(_bias0); const unsigned short* k0 = kernel.channel(p); int q = 0; for (; q < inch - 1; q++) { float* outptr0 = out0.row(0); const Mat img0 = bottom_blob.channel(q); const unsigned short* r0 = img0.row<unsigned short>(0); const unsigned short* r1 = img0.row<unsigned short>(1); const unsigned short* r2 = img0.row<unsigned short>(2); float32x4_t _k00 = vcvt_f32_bf16(vld1_u16(k0)); float32x4_t _k01 = vcvt_f32_bf16(vld1_u16(k0 + 4)); float32x4_t _k02 = vcvt_f32_bf16(vld1_u16(k0 + 8)); float32x4_t _k10 = vcvt_f32_bf16(vld1_u16(k0 + 12)); float32x4_t _k11 = vcvt_f32_bf16(vld1_u16(k0 + 16)); float32x4_t _k12 = vcvt_f32_bf16(vld1_u16(k0 + 20)); float32x4_t _k20 = vcvt_f32_bf16(vld1_u16(k0 + 24)); float32x4_t _k21 = vcvt_f32_bf16(vld1_u16(k0 + 28)); float32x4_t _k22 = vcvt_f32_bf16(vld1_u16(k0 + 32)); int i = 0; for (; i < outh; i++) { int j = 0; #if __aarch64__ for (; j + 7 < outw; j += 8) { asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%0], #64 \n" // "prfm pldl1keep, [%0, #512] \n" "ld1 {v28.4s, v29.4s, v30.4s, v31.4s}, [%0] \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v0.4h, v1.4h}, [%1], #16 \n" "ld1 {v2.s}[0], [%1] \n" "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "fmla v24.4s, %8.4s, v0.s[0] \n" "fmla v25.4s, %8.4s, v0.s[1] \n" "fmla v26.4s, %8.4s, v0.s[2] \n" "fmla v27.4s, %8.4s, v0.s[3] \n" "fmla v28.4s, %8.4s, v1.s[0] \n" "fmla v29.4s, %8.4s, v1.s[1] \n" "fmla v30.4s, %8.4s, v1.s[2] \n" "fmla v31.4s, %8.4s, v1.s[3] \n" "fmla v24.4s, %9.4s, v0.s[1] \n" "fmla v25.4s, %9.4s, v0.s[2] \n" "fmla v26.4s, %9.4s, v0.s[3] \n" "fmla v27.4s, %9.4s, v1.s[0] \n" "fmla v28.4s, %9.4s, v1.s[1] \n" "fmla v29.4s, %9.4s, v1.s[2] \n" "fmla v30.4s, %9.4s, v1.s[3] \n" "fmla v31.4s, %9.4s, v2.s[0] \n" "fmla v24.4s, %10.4s, v0.s[2] \n" "fmla v25.4s, %10.4s, v0.s[3] \n" "fmla v26.4s, %10.4s, v1.s[0] \n" "fmla v27.4s, %10.4s, v1.s[1] \n" "fmla v28.4s, %10.4s, v1.s[2] \n" "fmla v29.4s, %10.4s, v1.s[3] \n" "fmla v30.4s, %10.4s, v2.s[0] \n" "fmla v31.4s, %10.4s, v2.s[1] \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v4.4h, v5.4h}, [%2], #16 \n" "ld1 {v2.s}[0], [%2] \n" "shll v4.4s, v4.4h, #16 \n" "shll v5.4s, v5.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "fmla v24.4s, %11.4s, v4.s[0] \n" "fmla v25.4s, %11.4s, v4.s[1] \n" "fmla v26.4s, %11.4s, v4.s[2] \n" "fmla v27.4s, %11.4s, v4.s[3] \n" "fmla v28.4s, %11.4s, v5.s[0] \n" "fmla v29.4s, %11.4s, v5.s[1] \n" "fmla v30.4s, %11.4s, v5.s[2] \n" "fmla v31.4s, %11.4s, v5.s[3] \n" "fmla v24.4s, %12.4s, v4.s[1] \n" "fmla v25.4s, %12.4s, v4.s[2] \n" "fmla v26.4s, %12.4s, v4.s[3] \n" "fmla v27.4s, %12.4s, v5.s[0] \n" "fmla v28.4s, %12.4s, v5.s[1] \n" "fmla v29.4s, %12.4s, v5.s[2] \n" "fmla v30.4s, %12.4s, v5.s[3] \n" "fmla v31.4s, %12.4s, v2.s[0] \n" "fmla v24.4s, %13.4s, v4.s[2] \n" "fmla v25.4s, %13.4s, v4.s[3] \n" "fmla v26.4s, %13.4s, v5.s[0] \n" "fmla v27.4s, %13.4s, v5.s[1] \n" "fmla v28.4s, %13.4s, v5.s[2] \n" "fmla v29.4s, %13.4s, v5.s[3] \n" "fmla v30.4s, %13.4s, v2.s[0] \n" "fmla v31.4s, %13.4s, v2.s[1] \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v0.4h, v1.4h}, [%3], #16 \n" "ld1 {v2.s}[0], [%3] \n" "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "fmla v24.4s, %14.4s, v0.s[0] \n" "fmla v25.4s, %14.4s, v0.s[1] \n" "fmla v26.4s, %14.4s, v0.s[2] \n" "fmla v27.4s, %14.4s, v0.s[3] \n" "fmla v28.4s, %14.4s, v1.s[0] \n" "fmla v29.4s, %14.4s, v1.s[1] \n" "fmla v30.4s, %14.4s, v1.s[2] \n" "fmla v31.4s, %14.4s, v1.s[3] \n" "fmla v24.4s, %15.4s, v0.s[1] \n" "fmla v25.4s, %15.4s, v0.s[2] \n" "fmla v26.4s, %15.4s, v0.s[3] \n" "fmla v27.4s, %15.4s, v1.s[0] \n" "fmla v28.4s, %15.4s, v1.s[1] \n" "fmla v29.4s, %15.4s, v1.s[2] \n" "fmla v30.4s, %15.4s, v1.s[3] \n" "fmla v31.4s, %15.4s, v2.s[0] \n" "sub %0, %0, #64 \n" "fmla v24.4s, %16.4s, v0.s[2] \n" "fmla v25.4s, %16.4s, v0.s[3] \n" "fmla v26.4s, %16.4s, v1.s[0] \n" "fmla v27.4s, %16.4s, v1.s[1] \n" "fmla v28.4s, %16.4s, v1.s[2] \n" "fmla v29.4s, %16.4s, v1.s[3] \n" "fmla v30.4s, %16.4s, v2.s[0] \n" "fmla v31.4s, %16.4s, v2.s[1] \n" "st1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%0], #64 \n" "st1 {v28.4s, v29.4s, v30.4s, v31.4s}, [%0], #64 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "w"(_k00), // %8 "w"(_k01), // %9 "w"(_k02), // %10 "w"(_k10), // %11 "w"(_k11), // %12 "w"(_k12), // %13 "w"(_k20), // %14 "w"(_k21), // %15 "w"(_k22) // %16 : "memory", "v0", "v1", "v2", "v4", "v5", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } #endif // __aarch64__ for (; j + 3 < outw; j += 4) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%1, #64] \n" "ld1 {v0.4h}, [%1], #8 \n" "prfm pldl1keep, [%0, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%0] \n" "shll v0.4s, v0.4h, #16 \n" "ld1 {v1.s}[0], [%1] \n" "fmla v24.4s, %8.4s, v0.s[0] \n" "fmla v25.4s, %8.4s, v0.s[1] \n" "fmla v26.4s, %8.4s, v0.s[2] \n" "fmla v27.4s, %8.4s, v0.s[3] \n" "shll v1.4s, v1.4h, #16 \n" "fmla v24.4s, %9.4s, v0.s[1] \n" "fmla v25.4s, %9.4s, v0.s[2] \n" "prfm pldl1keep, [%2, #64] \n" "ld1 {v2.4h}, [%2], #8 \n" "fmla v26.4s, %9.4s, v0.s[3] \n" "fmla v27.4s, %9.4s, v1.s[0] \n" "ld1 {v3.s}[0], [%2] \n" "fmla v24.4s, %10.4s, v0.s[2] \n" "fmla v25.4s, %10.4s, v0.s[3] \n" "shll v2.4s, v2.4h, #16 \n" "fmla v26.4s, %10.4s, v1.s[0] \n" "fmla v27.4s, %10.4s, v1.s[1] \n" "fmla v24.4s, %11.4s, v2.s[0] \n" "fmla v25.4s, %11.4s, v2.s[1] \n" "fmla v26.4s, %11.4s, v2.s[2] \n" "fmla v27.4s, %11.4s, v2.s[3] \n" "shll v3.4s, v3.4h, #16 \n" "fmla v24.4s, %12.4s, v2.s[1] \n" "fmla v25.4s, %12.4s, v2.s[2] \n" "prfm pldl1keep, [%3, #64] \n" "ld1 {v0.4h}, [%3], #8 \n" "fmla v26.4s, %12.4s, v2.s[3] \n" "fmla v27.4s, %12.4s, v3.s[0] \n" "ld1 {v1.s}[0], [%3] \n" "fmla v24.4s, %13.4s, v2.s[2] \n" "fmla v25.4s, %13.4s, v2.s[3] \n" "shll v0.4s, v0.4h, #16 \n" "fmla v26.4s, %13.4s, v3.s[0] \n" "fmla v27.4s, %13.4s, v3.s[1] \n" "fmla v24.4s, %14.4s, v0.s[0] \n" "fmla v25.4s, %14.4s, v0.s[1] \n" "fmla v26.4s, %14.4s, v0.s[2] \n" "fmla v27.4s, %14.4s, v0.s[3] \n" "shll v1.4s, v1.4h, #16 \n" "fmla v24.4s, %15.4s, v0.s[1] \n" "fmla v25.4s, %15.4s, v0.s[2] \n" "fmla v26.4s, %15.4s, v0.s[3] \n" "fmla v27.4s, %15.4s, v1.s[0] \n" "fmla v24.4s, %16.4s, v0.s[2] \n" "fmla v25.4s, %16.4s, v0.s[3] \n" "fmla v26.4s, %16.4s, v1.s[0] \n" "fmla v27.4s, %16.4s, v1.s[1] \n" "st1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%0], #64 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "w"(_k00), // %8 "w"(_k01), // %9 "w"(_k02), // %10 "w"(_k10), // %11 "w"(_k11), // %12 "w"(_k12), // %13 "w"(_k20), // %14 "w"(_k21), // %15 "w"(_k22) // %16 : "memory", "v0", "v1", "v2", "v3", "v24", "v25", "v26", "v27"); #else // __aarch64__ asm volatile( "pld [%0, #512] \n" "vldm %0, {d24-d31} \n" "pld [%1, #64] \n" "vld1.u16 {d1}, [%1]! \n" "vld1.u32 {d2[0]}, [%1] \n" "vshll.u16 q0, d1, #16 \n" "vshll.u16 q1, d2, #16 \n" "vmla.f32 q12, %q8, d0[0] \n" "vmla.f32 q13, %q8, d0[1] \n" "vmla.f32 q14, %q8, d1[0] \n" "vmla.f32 q15, %q8, d1[1] \n" "vmla.f32 q12, %q9, d0[1] \n" "vmla.f32 q13, %q9, d1[0] \n" "vmla.f32 q14, %q9, d1[1] \n" "vmla.f32 q15, %q9, d2[0] \n" "vmla.f32 q12, %q10, d1[0] \n" "vmla.f32 q13, %q10, d1[1] \n" "vmla.f32 q14, %q10, d2[0] \n" "vmla.f32 q15, %q10, d2[1] \n" "pld [%2, #64] \n" "vld1.u16 {d5}, [%2]! \n" "vld1.u32 {d3[0]}, [%2] \n" "vshll.u16 q2, d5, #16 \n" "vshll.u16 q1, d3, #16 \n" "vmla.f32 q12, %q11, d4[0] \n" "vmla.f32 q13, %q11, d4[1] \n" "vmla.f32 q14, %q11, d5[0] \n" "vmla.f32 q15, %q11, d5[1] \n" "vmla.f32 q12, %q12, d4[1] \n" "vmla.f32 q13, %q12, d5[0] \n" "vmla.f32 q14, %q12, d5[1] \n" "vmla.f32 q15, %q12, d2[0] \n" "vmla.f32 q12, %q13, d5[0] \n" "vmla.f32 q13, %q13, d5[1] \n" "vmla.f32 q14, %q13, d2[0] \n" "vmla.f32 q15, %q13, d2[1] \n" "pld [%3, #64] \n" "vld1.u16 {d1}, [%3]! \n" "vld1.u32 {d2[0]}, [%3] \n" "vshll.u16 q0, d1, #16 \n" "vshll.u16 q1, d2, #16 \n" "vmla.f32 q12, %q14, d0[0] \n" "vmla.f32 q13, %q14, d0[1] \n" "vmla.f32 q14, %q14, d1[0] \n" "vmla.f32 q15, %q14, d1[1] \n" "vmla.f32 q12, %q15, d0[1] \n" "vmla.f32 q13, %q15, d1[0] \n" "vmla.f32 q14, %q15, d1[1] \n" "vmla.f32 q15, %q15, d2[0] \n" "vmla.f32 q12, %q16, d1[0] \n" "vmla.f32 q13, %q16, d1[1] \n" "vmla.f32 q14, %q16, d2[0] \n" "vmla.f32 q15, %q16, d2[1] \n" "vstm %0!, {d24-d31} \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "w"(_k00), // %8 "w"(_k01), // %9 "w"(_k02), // %10 "w"(_k10), // %11 "w"(_k11), // %12 "w"(_k12), // %13 "w"(_k20), // %14 "w"(_k21), // %15 "w"(_k22) // %16 : "memory", "q0", "q1", "q2", "q12", "q13", "q14", "q15"); #endif // __aarch64__ } for (; j + 1 < outw; j += 2) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%1, #64] \n" "ld1 {v0.4h}, [%1] \n" "prfm pldl1keep, [%0, #256] \n" "ld1 {v28.4s, v29.4s}, [%0] \n" "shll v0.4s, v0.4h, #16 \n" "fmul v24.4s, %8.4s, v0.s[0] \n" "fmul v25.4s, %8.4s, v0.s[1] \n" "prfm pldl1keep, [%2, #64] \n" "ld1 {v1.4h}, [%2] \n" "fmul v26.4s, %9.4s, v0.s[1] \n" "fmul v27.4s, %9.4s, v0.s[2] \n" "shll v1.4s, v1.4h, #16 \n" "fmla v28.4s, %10.4s, v0.s[2] \n" "fmla v29.4s, %10.4s, v0.s[3] \n" "fmla v24.4s, %11.4s, v1.s[0] \n" "fmla v25.4s, %11.4s, v1.s[1] \n" "prfm pldl1keep, [%3, #64] \n" "ld1 {v0.4h}, [%3] \n" "fmla v26.4s, %12.4s, v1.s[1] \n" "fmla v27.4s, %12.4s, v1.s[2] \n" "shll v0.4s, v0.4h, #16 \n" "fmla v28.4s, %13.4s, v1.s[2] \n" "fmla v29.4s, %13.4s, v1.s[3] \n" "fmla v24.4s, %14.4s, v0.s[0] \n" "fmla v25.4s, %14.4s, v0.s[1] \n" "fmla v26.4s, %15.4s, v0.s[1] \n" "fmla v27.4s, %15.4s, v0.s[2] \n" "fmla v28.4s, %16.4s, v0.s[2] \n" "fmla v29.4s, %16.4s, v0.s[3] \n" "add %1, %1, #4 \n" "fadd v24.4s, v24.4s, v26.4s \n" "fadd v25.4s, v25.4s, v27.4s \n" "add %2, %2, #4 \n" "fadd v28.4s, v28.4s, v24.4s \n" "fadd v29.4s, v29.4s, v25.4s \n" "add %3, %3, #4 \n" "st1 {v28.4s, v29.4s}, [%0], #32 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "w"(_k00), // %8 "w"(_k01), // %9 "w"(_k02), // %10 "w"(_k10), // %11 "w"(_k11), // %12 "w"(_k12), // %13 "w"(_k20), // %14 "w"(_k21), // %15 "w"(_k22) // %16 : "memory", "v0", "v1", "v24", "v25", "v26", "v27", "v28", "v29"); #else // __aarch64__ asm volatile( "pld [%1, #64] \n" "vld1.u16 {d1}, [%1] \n" "pld [%0, #256] \n" "vld1.f32 {d24-d27}, [%0 :128] \n" "vshll.u16 q0, d1, #16 \n" "vmul.f32 q14, %q8, d0[0] \n" "vmul.f32 q15, %q8, d0[1] \n" "vmla.f32 q12, %q9, d0[1] \n" "vmla.f32 q13, %q9, d1[0] \n" "pld [%2, #64] \n" "vld1.u16 {d3}, [%2] \n" "vmla.f32 q14, %q10, d1[0] \n" "vmla.f32 q15, %q10, d1[1] \n" "vshll.u16 q1, d3, #16 \n" "vmla.f32 q12, %q11, d2[0] \n" "vmla.f32 q13, %q11, d2[1] \n" "vmla.f32 q14, %q12, d2[1] \n" "vmla.f32 q15, %q12, d3[0] \n" "pld [%3, #64] \n" "vld1.u16 {d1}, [%3] \n" "vmla.f32 q12, %q13, d3[0] \n" "vmla.f32 q13, %q13, d3[1] \n" "vshll.u16 q0, d1, #16 \n" "vmla.f32 q14, %q14, d0[0] \n" "vmla.f32 q15, %q14, d0[1] \n" "vmla.f32 q12, %q15, d0[1] \n" "vmla.f32 q13, %q15, d1[0] \n" "add %1, %1, #4 \n" "vmla.f32 q14, %q16, d1[0] \n" "vmla.f32 q15, %q16, d1[1] \n" "add %2, %2, #4 \n" "vadd.f32 q12, q12, q14 \n" "vadd.f32 q13, q13, q15 \n" "add %3, %3, #4 \n" "vst1.f32 {d24-d27}, [%0 :128]! \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "w"(_k00), // %8 "w"(_k01), // %9 "w"(_k02), // %10 "w"(_k10), // %11 "w"(_k11), // %12 "w"(_k12), // %13 "w"(_k20), // %14 "w"(_k21), // %15 "w"(_k22) // %16 : "memory", "q0", "q1", "q12", "q13", "q14", "q15"); #endif // __aarch64__ } for (; j < outw; j++) { float32x4_t _sum0 = vld1q_f32(outptr0); float32x4_t _r0 = vcvt_f32_bf16(vld1_u16(r0)); float32x4_t _r1 = vcvt_f32_bf16(vld1_u16(r1)); float32x4_t _r2 = vcvt_f32_bf16(vld1_u16(r2)); #if __aarch64__ _sum0 = vfmaq_laneq_f32(_sum0, _k00, _r0, 0); _sum0 = vfmaq_laneq_f32(_sum0, _k01, _r0, 1); _sum0 = vfmaq_laneq_f32(_sum0, _k02, _r0, 2); _sum0 = vfmaq_laneq_f32(_sum0, _k10, _r1, 0); _sum0 = vfmaq_laneq_f32(_sum0, _k11, _r1, 1); _sum0 = vfmaq_laneq_f32(_sum0, _k12, _r1, 2); _sum0 = vfmaq_laneq_f32(_sum0, _k20, _r2, 0); _sum0 = vfmaq_laneq_f32(_sum0, _k21, _r2, 1); _sum0 = vfmaq_laneq_f32(_sum0, _k22, _r2, 2); #else _sum0 = vmlaq_lane_f32(_sum0, _k00, vget_low_f32(_r0), 0); _sum0 = vmlaq_lane_f32(_sum0, _k01, vget_low_f32(_r0), 1); _sum0 = vmlaq_lane_f32(_sum0, _k02, vget_high_f32(_r0), 0); _sum0 = vmlaq_lane_f32(_sum0, _k10, vget_low_f32(_r1), 0); _sum0 = vmlaq_lane_f32(_sum0, _k11, vget_low_f32(_r1), 1); _sum0 = vmlaq_lane_f32(_sum0, _k12, vget_high_f32(_r1), 0); _sum0 = vmlaq_lane_f32(_sum0, _k20, vget_low_f32(_r2), 0); _sum0 = vmlaq_lane_f32(_sum0, _k21, vget_low_f32(_r2), 1); _sum0 = vmlaq_lane_f32(_sum0, _k22, vget_high_f32(_r2), 0); #endif vst1q_f32(outptr0, _sum0); r0 += 1; r1 += 1; r2 += 1; outptr0 += 4; } r0 += 2; r1 += 2; r2 += 2; } k0 += 9 * 4; } for (; q < inch; q++) { unsigned short* outptr0_bf16 = top_blob.channel(p); const float* outptr0 = out0.row(0); const Mat img0 = bottom_blob.channel(q); const unsigned short* r0 = img0.row<unsigned short>(0); const unsigned short* r1 = img0.row<unsigned short>(1); const unsigned short* r2 = img0.row<unsigned short>(2); float32x4_t _k00 = vcvt_f32_bf16(vld1_u16(k0)); float32x4_t _k01 = vcvt_f32_bf16(vld1_u16(k0 + 4)); float32x4_t _k02 = vcvt_f32_bf16(vld1_u16(k0 + 8)); float32x4_t _k10 = vcvt_f32_bf16(vld1_u16(k0 + 12)); float32x4_t _k11 = vcvt_f32_bf16(vld1_u16(k0 + 16)); float32x4_t _k12 = vcvt_f32_bf16(vld1_u16(k0 + 20)); float32x4_t _k20 = vcvt_f32_bf16(vld1_u16(k0 + 24)); float32x4_t _k21 = vcvt_f32_bf16(vld1_u16(k0 + 28)); float32x4_t _k22 = vcvt_f32_bf16(vld1_u16(k0 + 32)); int i = 0; for (; i < outh; i++) { int j = 0; #if __aarch64__ for (; j + 7 < outw; j += 8) { asm volatile( "prfm pldl1keep, [%1, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%1], #64 \n" "prfm pldl1keep, [%1, #512] \n" "ld1 {v28.4s, v29.4s, v30.4s, v31.4s}, [%1], #64 \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v0.4h, v1.4h}, [%2], #16 \n" "ld1 {v2.s}[0], [%2] \n" "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "fmla v24.4s, %10.4s, v0.s[0] \n" "fmla v25.4s, %10.4s, v0.s[1] \n" "fmla v26.4s, %10.4s, v0.s[2] \n" "fmla v27.4s, %10.4s, v0.s[3] \n" "fmla v28.4s, %10.4s, v1.s[0] \n" "fmla v29.4s, %10.4s, v1.s[1] \n" "fmla v30.4s, %10.4s, v1.s[2] \n" "fmla v31.4s, %10.4s, v1.s[3] \n" "fmla v24.4s, %11.4s, v0.s[1] \n" "fmla v25.4s, %11.4s, v0.s[2] \n" "fmla v26.4s, %11.4s, v0.s[3] \n" "fmla v27.4s, %11.4s, v1.s[0] \n" "fmla v28.4s, %11.4s, v1.s[1] \n" "fmla v29.4s, %11.4s, v1.s[2] \n" "fmla v30.4s, %11.4s, v1.s[3] \n" "fmla v31.4s, %11.4s, v2.s[0] \n" "fmla v24.4s, %12.4s, v0.s[2] \n" "fmla v25.4s, %12.4s, v0.s[3] \n" "fmla v26.4s, %12.4s, v1.s[0] \n" "fmla v27.4s, %12.4s, v1.s[1] \n" "fmla v28.4s, %12.4s, v1.s[2] \n" "fmla v29.4s, %12.4s, v1.s[3] \n" "fmla v30.4s, %12.4s, v2.s[0] \n" "fmla v31.4s, %12.4s, v2.s[1] \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v4.4h, v5.4h}, [%3], #16 \n" "ld1 {v2.s}[0], [%3] \n" "shll v4.4s, v4.4h, #16 \n" "shll v5.4s, v5.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "fmla v24.4s, %13.4s, v4.s[0] \n" "fmla v25.4s, %13.4s, v4.s[1] \n" "fmla v26.4s, %13.4s, v4.s[2] \n" "fmla v27.4s, %13.4s, v4.s[3] \n" "fmla v28.4s, %13.4s, v5.s[0] \n" "fmla v29.4s, %13.4s, v5.s[1] \n" "fmla v30.4s, %13.4s, v5.s[2] \n" "fmla v31.4s, %13.4s, v5.s[3] \n" "fmla v24.4s, %14.4s, v4.s[1] \n" "fmla v25.4s, %14.4s, v4.s[2] \n" "fmla v26.4s, %14.4s, v4.s[3] \n" "fmla v27.4s, %14.4s, v5.s[0] \n" "fmla v28.4s, %14.4s, v5.s[1] \n" "fmla v29.4s, %14.4s, v5.s[2] \n" "fmla v30.4s, %14.4s, v5.s[3] \n" "fmla v31.4s, %14.4s, v2.s[0] \n" "fmla v24.4s, %15.4s, v4.s[2] \n" "fmla v25.4s, %15.4s, v4.s[3] \n" "fmla v26.4s, %15.4s, v5.s[0] \n" "fmla v27.4s, %15.4s, v5.s[1] \n" "fmla v28.4s, %15.4s, v5.s[2] \n" "fmla v29.4s, %15.4s, v5.s[3] \n" "fmla v30.4s, %15.4s, v2.s[0] \n" "fmla v31.4s, %15.4s, v2.s[1] \n" "prfm pldl1keep, [%4, #128] \n" "ld1 {v0.4h, v1.4h}, [%4], #16 \n" "ld1 {v2.s}[0], [%4] \n" "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "fmla v24.4s, %16.4s, v0.s[0] \n" "fmla v25.4s, %16.4s, v0.s[1] \n" "fmla v26.4s, %16.4s, v0.s[2] \n" "fmla v27.4s, %16.4s, v0.s[3] \n" "fmla v28.4s, %16.4s, v1.s[0] \n" "fmla v29.4s, %16.4s, v1.s[1] \n" "fmla v30.4s, %16.4s, v1.s[2] \n" "fmla v31.4s, %16.4s, v1.s[3] \n" "fmla v24.4s, %17.4s, v0.s[1] \n" "fmla v25.4s, %17.4s, v0.s[2] \n" "fmla v26.4s, %17.4s, v0.s[3] \n" "fmla v27.4s, %17.4s, v1.s[0] \n" "fmla v28.4s, %17.4s, v1.s[1] \n" "fmla v29.4s, %17.4s, v1.s[2] \n" "fmla v30.4s, %17.4s, v1.s[3] \n" "fmla v31.4s, %17.4s, v2.s[0] \n" "fmla v24.4s, %18.4s, v0.s[2] \n" "fmla v25.4s, %18.4s, v0.s[3] \n" "fmla v26.4s, %18.4s, v1.s[0] \n" "fmla v27.4s, %18.4s, v1.s[1] \n" "fmla v28.4s, %18.4s, v1.s[2] \n" "fmla v29.4s, %18.4s, v1.s[3] \n" "fmla v30.4s, %18.4s, v2.s[0] \n" "fmla v31.4s, %18.4s, v2.s[1] \n" "shrn v24.4h, v24.4s, #16 \n" "shrn v25.4h, v25.4s, #16 \n" "shrn v26.4h, v26.4s, #16 \n" "shrn v27.4h, v27.4s, #16 \n" "shrn v28.4h, v28.4s, #16 \n" "shrn v29.4h, v29.4s, #16 \n" "shrn v30.4h, v30.4s, #16 \n" "shrn v31.4h, v31.4s, #16 \n" "st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%0], #32 \n" "st1 {v28.4h, v29.4h, v30.4h, v31.4h}, [%0], #32 \n" : "=r"(outptr0_bf16), // %0 "=r"(outptr0), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2) // %4 : "0"(outptr0_bf16), "1"(outptr0), "2"(r0), "3"(r1), "4"(r2), "w"(_k00), // %10 "w"(_k01), // %11 "w"(_k02), // %12 "w"(_k10), // %13 "w"(_k11), // %14 "w"(_k12), // %15 "w"(_k20), // %16 "w"(_k21), // %17 "w"(_k22) // %18 : "memory", "v0", "v1", "v2", "v4", "v5", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } #endif // __aarch64__ for (; j + 3 < outw; j += 4) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%2, #64] \n" "ld1 {v0.4h}, [%2], #8 \n" "prfm pldl1keep, [%1, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%1], #64 \n" "shll v0.4s, v0.4h, #16 \n" "ld1 {v1.s}[0], [%2] \n" "fmla v24.4s, %10.4s, v0.s[0] \n" "fmla v25.4s, %10.4s, v0.s[1] \n" "fmla v26.4s, %10.4s, v0.s[2] \n" "fmla v27.4s, %10.4s, v0.s[3] \n" "shll v1.4s, v1.4h, #16 \n" "fmla v24.4s, %11.4s, v0.s[1] \n" "fmla v25.4s, %11.4s, v0.s[2] \n" "prfm pldl1keep, [%3, #64] \n" "ld1 {v2.4h}, [%3], #8 \n" "fmla v26.4s, %11.4s, v0.s[3] \n" "fmla v27.4s, %11.4s, v1.s[0] \n" "ld1 {v3.s}[0], [%3] \n" "fmla v24.4s, %12.4s, v0.s[2] \n" "fmla v25.4s, %12.4s, v0.s[3] \n" "shll v2.4s, v2.4h, #16 \n" "fmla v26.4s, %12.4s, v1.s[0] \n" "fmla v27.4s, %12.4s, v1.s[1] \n" "fmla v24.4s, %13.4s, v2.s[0] \n" "fmla v25.4s, %13.4s, v2.s[1] \n" "fmla v26.4s, %13.4s, v2.s[2] \n" "fmla v27.4s, %13.4s, v2.s[3] \n" "shll v3.4s, v3.4h, #16 \n" "fmla v24.4s, %14.4s, v2.s[1] \n" "fmla v25.4s, %14.4s, v2.s[2] \n" "prfm pldl1keep, [%4, #64] \n" "ld1 {v0.4h}, [%4], #8 \n" "fmla v26.4s, %14.4s, v2.s[3] \n" "fmla v27.4s, %14.4s, v3.s[0] \n" "ld1 {v1.s}[0], [%4] \n" "fmla v24.4s, %15.4s, v2.s[2] \n" "fmla v25.4s, %15.4s, v2.s[3] \n" "shll v0.4s, v0.4h, #16 \n" "fmla v26.4s, %15.4s, v3.s[0] \n" "fmla v27.4s, %15.4s, v3.s[1] \n" "fmla v24.4s, %16.4s, v0.s[0] \n" "fmla v25.4s, %16.4s, v0.s[1] \n" "fmla v26.4s, %16.4s, v0.s[2] \n" "fmla v27.4s, %16.4s, v0.s[3] \n" "shll v1.4s, v1.4h, #16 \n" "fmla v24.4s, %17.4s, v0.s[1] \n" "fmla v25.4s, %17.4s, v0.s[2] \n" "fmla v26.4s, %17.4s, v0.s[3] \n" "fmla v27.4s, %17.4s, v1.s[0] \n" "fmla v24.4s, %18.4s, v0.s[2] \n" "fmla v25.4s, %18.4s, v0.s[3] \n" "fmla v26.4s, %18.4s, v1.s[0] \n" "fmla v27.4s, %18.4s, v1.s[1] \n" "shrn v24.4h, v24.4s, #16 \n" "shrn v25.4h, v25.4s, #16 \n" "shrn v26.4h, v26.4s, #16 \n" "shrn v27.4h, v27.4s, #16 \n" "st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%0], #32 \n" : "=r"(outptr0_bf16), // %0 "=r"(outptr0), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2) // %4 : "0"(outptr0_bf16), "1"(outptr0), "2"(r0), "3"(r1), "4"(r2), "w"(_k00), // %10 "w"(_k01), // %11 "w"(_k02), // %12 "w"(_k10), // %13 "w"(_k11), // %14 "w"(_k12), // %15 "w"(_k20), // %16 "w"(_k21), // %17 "w"(_k22) // %18 : "memory", "v0", "v1", "v2", "v3", "v24", "v25", "v26", "v27"); #else // __aarch64__ asm volatile( "pld [%1, #512] \n" "vldm %1!, {d24-d31} \n" "pld [%2, #64] \n" "vld1.u16 {d1}, [%2]! \n" "vld1.u32 {d2[0]}, [%2] \n" "vshll.u16 q0, d1, #16 \n" "vshll.u16 q1, d2, #16 \n" "vmla.f32 q12, %q10, d0[0] \n" "vmla.f32 q13, %q10, d0[1] \n" "vmla.f32 q14, %q10, d1[0] \n" "vmla.f32 q15, %q10, d1[1] \n" "vmla.f32 q12, %q11, d0[1] \n" "vmla.f32 q13, %q11, d1[0] \n" "vmla.f32 q14, %q11, d1[1] \n" "vmla.f32 q15, %q11, d2[0] \n" "vmla.f32 q12, %q12, d1[0] \n" "vmla.f32 q13, %q12, d1[1] \n" "vmla.f32 q14, %q12, d2[0] \n" "vmla.f32 q15, %q12, d2[1] \n" "pld [%3, #64] \n" "vld1.u16 {d5}, [%3]! \n" "vld1.u32 {d3[0]}, [%3] \n" "vshll.u16 q2, d5, #16 \n" "vshll.u16 q1, d3, #16 \n" "vmla.f32 q12, %q13, d4[0] \n" "vmla.f32 q13, %q13, d4[1] \n" "vmla.f32 q14, %q13, d5[0] \n" "vmla.f32 q15, %q13, d5[1] \n" "vmla.f32 q12, %q14, d4[1] \n" "vmla.f32 q13, %q14, d5[0] \n" "vmla.f32 q14, %q14, d5[1] \n" "vmla.f32 q15, %q14, d2[0] \n" "vmla.f32 q12, %q15, d5[0] \n" "vmla.f32 q13, %q15, d5[1] \n" "vmla.f32 q14, %q15, d2[0] \n" "vmla.f32 q15, %q15, d2[1] \n" "pld [%4, #64] \n" "vld1.u16 {d1}, [%4]! \n" "vld1.u32 {d2[0]}, [%4] \n" "vshll.u16 q0, d1, #16 \n" "vshll.u16 q1, d2, #16 \n" "vmla.f32 q12, %q16, d0[0] \n" "vmla.f32 q13, %q16, d0[1] \n" "vmla.f32 q14, %q16, d1[0] \n" "vmla.f32 q15, %q16, d1[1] \n" "vmla.f32 q12, %q17, d0[1] \n" "vmla.f32 q13, %q17, d1[0] \n" "vmla.f32 q14, %q17, d1[1] \n" "vmla.f32 q15, %q17, d2[0] \n" "vmla.f32 q12, %q18, d1[0] \n" "vmla.f32 q13, %q18, d1[1] \n" "vmla.f32 q14, %q18, d2[0] \n" "vmla.f32 q15, %q18, d2[1] \n" "vshrn.s32 d24, q12, #16 \n" "vshrn.s32 d25, q13, #16 \n" "vshrn.s32 d26, q14, #16 \n" "vshrn.s32 d27, q15, #16 \n" "vst1.u16 {d24-d27}, [%0 :64]! \n" : "=r"(outptr0_bf16), // %0 "=r"(outptr0), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2) // %4 : "0"(outptr0_bf16), "1"(outptr0), "2"(r0), "3"(r1), "4"(r2), "w"(_k00), // %10 "w"(_k01), // %11 "w"(_k02), // %12 "w"(_k10), // %13 "w"(_k11), // %14 "w"(_k12), // %15 "w"(_k20), // %16 "w"(_k21), // %17 "w"(_k22) // %18 : "memory", "q0", "q1", "q2", "q12", "q13", "q14", "q15"); #endif // __aarch64__ } for (; j + 1 < outw; j += 2) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%2, #64] \n" "ld1 {v0.4h}, [%2] \n" "prfm pldl1keep, [%1, #256] \n" "ld1 {v28.4s, v29.4s}, [%1], #32 \n" "shll v0.4s, v0.4h, #16 \n" "fmul v24.4s, %10.4s, v0.s[0] \n" "fmul v25.4s, %10.4s, v0.s[1] \n" "prfm pldl1keep, [%3, #64] \n" "ld1 {v1.4h}, [%3] \n" "fmul v26.4s, %11.4s, v0.s[1] \n" "fmul v27.4s, %11.4s, v0.s[2] \n" "shll v1.4s, v1.4h, #16 \n" "fmla v28.4s, %12.4s, v0.s[2] \n" "fmla v29.4s, %12.4s, v0.s[3] \n" "fmla v24.4s, %13.4s, v1.s[0] \n" "fmla v25.4s, %13.4s, v1.s[1] \n" "prfm pldl1keep, [%4, #64] \n" "ld1 {v0.4h}, [%4] \n" "fmla v26.4s, %14.4s, v1.s[1] \n" "fmla v27.4s, %14.4s, v1.s[2] \n" "shll v0.4s, v0.4h, #16 \n" "fmla v28.4s, %15.4s, v1.s[2] \n" "fmla v29.4s, %15.4s, v1.s[3] \n" "fmla v24.4s, %16.4s, v0.s[0] \n" "fmla v25.4s, %16.4s, v0.s[1] \n" "fmla v26.4s, %17.4s, v0.s[1] \n" "fmla v27.4s, %17.4s, v0.s[2] \n" "fmla v28.4s, %18.4s, v0.s[2] \n" "fmla v29.4s, %18.4s, v0.s[3] \n" "add %2, %2, #4 \n" "fadd v24.4s, v24.4s, v26.4s \n" "fadd v25.4s, v25.4s, v27.4s \n" "add %3, %3, #4 \n" "fadd v28.4s, v28.4s, v24.4s \n" "fadd v29.4s, v29.4s, v25.4s \n" "add %4, %4, #4 \n" "shrn v28.4h, v28.4s, #16 \n" "shrn v29.4h, v29.4s, #16 \n" "st1 {v28.4h, v29.4h}, [%0], #16 \n" : "=r"(outptr0_bf16), // %0 "=r"(outptr0), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2) // %4 : "0"(outptr0_bf16), "1"(outptr0), "2"(r0), "3"(r1), "4"(r2), "w"(_k00), // %10 "w"(_k01), // %11 "w"(_k02), // %12 "w"(_k10), // %13 "w"(_k11), // %14 "w"(_k12), // %15 "w"(_k20), // %16 "w"(_k21), // %17 "w"(_k22) // %18 : "memory", "v0", "v1", "v24", "v25", "v26", "v27", "v28", "v29"); #else // __aarch64__ asm volatile( "pld [%2, #64] \n" "vld1.u16 {d1}, [%2] \n" "pld [%1, #256] \n" "vld1.f32 {d24-d27}, [%1 :128]! \n" "vshll.u16 q0, d1, #16 \n" "vmul.f32 q14, %q10, d0[0] \n" "vmul.f32 q15, %q10, d0[1] \n" "vmla.f32 q12, %q11, d0[1] \n" "vmla.f32 q13, %q11, d1[0] \n" "pld [%3, #64] \n" "vld1.u16 {d3}, [%3] \n" "vmla.f32 q14, %q12, d1[0] \n" "vmla.f32 q15, %q12, d1[1] \n" "vshll.u16 q1, d3, #16 \n" "vmla.f32 q12, %q13, d2[0] \n" "vmla.f32 q13, %q13, d2[1] \n" "vmla.f32 q14, %q14, d2[1] \n" "vmla.f32 q15, %q14, d3[0] \n" "pld [%4, #64] \n" "vld1.u16 {d1}, [%4] \n" "vmla.f32 q12, %q15, d3[0] \n" "vmla.f32 q13, %q15, d3[1] \n" "vshll.u16 q0, d1, #16 \n" "vmla.f32 q14, %q16, d0[0] \n" "vmla.f32 q15, %q16, d0[1] \n" "vmla.f32 q12, %q17, d0[1] \n" "vmla.f32 q13, %q17, d1[0] \n" "add %2, %2, #4 \n" "vmla.f32 q14, %q18, d1[0] \n" "vmla.f32 q15, %q18, d1[1] \n" "add %3, %3, #4 \n" "vadd.f32 q12, q12, q14 \n" "vadd.f32 q13, q13, q15 \n" "add %4, %4, #4 \n" "vshrn.s32 d24, q12, #16 \n" "vshrn.s32 d25, q13, #16 \n" "vst1.f32 {d24-d25}, [%0 :64]! \n" : "=r"(outptr0_bf16), // %0 "=r"(outptr0), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2) // %4 : "0"(outptr0_bf16), "1"(outptr0), "2"(r0), "3"(r1), "4"(r2), "w"(_k00), // %10 "w"(_k01), // %11 "w"(_k02), // %12 "w"(_k10), // %13 "w"(_k11), // %14 "w"(_k12), // %15 "w"(_k20), // %16 "w"(_k21), // %17 "w"(_k22) // %18 : "memory", "q0", "q1", "q12", "q13", "q14", "q15"); #endif // __aarch64__ } for (; j < outw; j++) { float32x4_t _sum0 = vld1q_f32(outptr0); float32x4_t _r0 = vcvt_f32_bf16(vld1_u16(r0)); float32x4_t _r1 = vcvt_f32_bf16(vld1_u16(r1)); float32x4_t _r2 = vcvt_f32_bf16(vld1_u16(r2)); #if __aarch64__ _sum0 = vfmaq_laneq_f32(_sum0, _k00, _r0, 0); _sum0 = vfmaq_laneq_f32(_sum0, _k01, _r0, 1); _sum0 = vfmaq_laneq_f32(_sum0, _k02, _r0, 2); _sum0 = vfmaq_laneq_f32(_sum0, _k10, _r1, 0); _sum0 = vfmaq_laneq_f32(_sum0, _k11, _r1, 1); _sum0 = vfmaq_laneq_f32(_sum0, _k12, _r1, 2); _sum0 = vfmaq_laneq_f32(_sum0, _k20, _r2, 0); _sum0 = vfmaq_laneq_f32(_sum0, _k21, _r2, 1); _sum0 = vfmaq_laneq_f32(_sum0, _k22, _r2, 2); #else _sum0 = vmlaq_lane_f32(_sum0, _k00, vget_low_f32(_r0), 0); _sum0 = vmlaq_lane_f32(_sum0, _k01, vget_low_f32(_r0), 1); _sum0 = vmlaq_lane_f32(_sum0, _k02, vget_high_f32(_r0), 0); _sum0 = vmlaq_lane_f32(_sum0, _k10, vget_low_f32(_r1), 0); _sum0 = vmlaq_lane_f32(_sum0, _k11, vget_low_f32(_r1), 1); _sum0 = vmlaq_lane_f32(_sum0, _k12, vget_high_f32(_r1), 0); _sum0 = vmlaq_lane_f32(_sum0, _k20, vget_low_f32(_r2), 0); _sum0 = vmlaq_lane_f32(_sum0, _k21, vget_low_f32(_r2), 1); _sum0 = vmlaq_lane_f32(_sum0, _k22, vget_high_f32(_r2), 0); #endif vst1_u16(outptr0_bf16, vcvt_bf16_f32(_sum0)); r0 += 1; r1 += 1; r2 += 1; outptr0 += 4; outptr0_bf16 += 4; } r0 += 2; r1 += 2; r2 += 2; } k0 += 9 * 4; } } } static void conv3x3s2_pack1to4_bf16s_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; #if __ARM_NEON && __aarch64__ Mat top_blob_fp32(outw, outh, opt.num_threads, (size_t)4u * 4 * 2, 4 * 2, opt.workspace_allocator); #else Mat top_blob_fp32(outw, outh, opt.num_threads, (size_t)4u * 4, 4, opt.workspace_allocator); #endif const int tailstep = w - 2 * outw + w; const float* bias = _bias; int remain_outch_start = 0; #if __ARM_NEON && __aarch64__ int nn_outch = 0; nn_outch = outch >> 1; remain_outch_start = nn_outch << 1; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 2; Mat out0 = top_blob_fp32.channel(get_omp_thread_num()); float32x4_t _bias0 = bias ? vld1q_f32((const float*)bias + p * 4) : vdupq_n_f32(0.f); float32x4_t _bias1 = bias ? vld1q_f32((const float*)bias + (p + 1) * 4) : vdupq_n_f32(0.f); { float* ptr = (float*)out0; for (int i = 0; i < outh; i++) { int j = 0; for (; j + 3 < outw; j += 4) { vst1q_f32(ptr, _bias0); vst1q_f32(ptr + 4, _bias0); vst1q_f32(ptr + 8, _bias0); vst1q_f32(ptr + 12, _bias0); vst1q_f32(ptr + 16, _bias1); vst1q_f32(ptr + 20, _bias1); vst1q_f32(ptr + 24, _bias1); vst1q_f32(ptr + 28, _bias1); ptr += 32; } for (; j + 1 < outw; j += 2) { vst1q_f32(ptr, _bias0); vst1q_f32(ptr + 4, _bias0); vst1q_f32(ptr + 8, _bias1); vst1q_f32(ptr + 12, _bias1); ptr += 16; } for (; j < outw; j++) { vst1q_f32(ptr, _bias0); vst1q_f32(ptr + 4, _bias1); ptr += 8; } } } const unsigned short* k0 = kernel.channel(p); const unsigned short* k1 = kernel.channel(p + 1); int q = 0; for (; q < inch - 1; q++) { float* outptr0 = out0; const Mat img0 = bottom_blob.channel(q); const unsigned short* r0 = img0.row<const unsigned short>(0); const unsigned short* r1 = img0.row<const unsigned short>(1); const unsigned short* r2 = img0.row<const unsigned short>(2); float32x4_t _k00_0 = vcvt_f32_bf16(vld1_u16(k0)); float32x4_t _k01_0 = vcvt_f32_bf16(vld1_u16(k0 + 4)); float32x4_t _k02_0 = vcvt_f32_bf16(vld1_u16(k0 + 8)); float32x4_t _k10_0 = vcvt_f32_bf16(vld1_u16(k0 + 12)); float32x4_t _k11_0 = vcvt_f32_bf16(vld1_u16(k0 + 16)); float32x4_t _k12_0 = vcvt_f32_bf16(vld1_u16(k0 + 20)); float32x4_t _k20_0 = vcvt_f32_bf16(vld1_u16(k0 + 24)); float32x4_t _k21_0 = vcvt_f32_bf16(vld1_u16(k0 + 28)); float32x4_t _k22_0 = vcvt_f32_bf16(vld1_u16(k0 + 32)); float32x4_t _k00_1 = vcvt_f32_bf16(vld1_u16(k1)); float32x4_t _k01_1 = vcvt_f32_bf16(vld1_u16(k1 + 4)); float32x4_t _k02_1 = vcvt_f32_bf16(vld1_u16(k1 + 8)); float32x4_t _k10_1 = vcvt_f32_bf16(vld1_u16(k1 + 12)); float32x4_t _k11_1 = vcvt_f32_bf16(vld1_u16(k1 + 16)); float32x4_t _k12_1 = vcvt_f32_bf16(vld1_u16(k1 + 20)); float32x4_t _k20_1 = vcvt_f32_bf16(vld1_u16(k1 + 24)); float32x4_t _k21_1 = vcvt_f32_bf16(vld1_u16(k1 + 28)); float32x4_t _k22_1 = vcvt_f32_bf16(vld1_u16(k1 + 32)); int i = 0; for (; i < outh; i++) { int j = 0; for (; j + 3 < outw; j += 4) { asm volatile( // r0 "prfm pldl1keep, [%1, #128] \n" "ld1 {v0.4h, v1.4h}, [%1], #16 \n" "prfm pldl1keep, [%0, #512] \n" "ld1 {v6.4s, v7.4s, v8.4s, v9.4s}, [%0], #64 \n" // sum0 "shll v0.4s, v0.4h, #16 \n" // "prfm pldl1keep, [%0, #512] \n" "ld1 {v10.4s, v11.4s, v12.4s, v13.4s}, [%0] \n" // sum1 "shll v1.4s, v1.4h, #16 \n" "fmla v6.4s, %8.4s, v0.s[0] \n" "fmla v7.4s, %8.4s, v0.s[2] \n" "fmla v8.4s, %8.4s, v1.s[0] \n" "fmla v9.4s, %8.4s, v1.s[2] \n" "fmla v10.4s, %17.4s, v0.s[0] \n" "fmla v11.4s, %17.4s, v0.s[2] \n" "fmla v12.4s, %17.4s, v1.s[0] \n" "fmla v13.4s, %17.4s, v1.s[2] \n" "ld1 {v4.h}[0], [%1] \n" "fmla v6.4s, %9.4s, v0.s[1] \n" "fmla v7.4s, %9.4s, v0.s[3] \n" "fmla v8.4s, %9.4s, v1.s[1] \n" "fmla v9.4s, %9.4s, v1.s[3] \n" "fmla v10.4s, %18.4s, v0.s[1] \n" "fmla v11.4s, %18.4s, v0.s[3] \n" "fmla v12.4s, %18.4s, v1.s[1] \n" "fmla v13.4s, %18.4s, v1.s[3] \n" "shll v4.4s, v4.4h, #16 \n" // r1 "prfm pldl1keep, [%2, #128] \n" "ld1 {v2.4h, v3.4h}, [%2], #16 \n" "fmla v6.4s, %10.4s, v0.s[2] \n" "fmla v7.4s, %10.4s, v1.s[0] \n" "fmla v8.4s, %10.4s, v1.s[2] \n" "fmla v9.4s, %10.4s, v4.s[0] \n" "shll v2.4s, v2.4h, #16 \n" "fmla v10.4s, %19.4s, v0.s[2] \n" "fmla v11.4s, %19.4s, v1.s[0] \n" "fmla v12.4s, %19.4s, v1.s[2] \n" "fmla v13.4s, %19.4s, v4.s[0] \n" "shll v3.4s, v3.4h, #16 \n" "fmla v6.4s, %11.4s, v2.s[0] \n" "fmla v7.4s, %11.4s, v2.s[2] \n" "fmla v8.4s, %11.4s, v3.s[0] \n" "fmla v9.4s, %11.4s, v3.s[2] \n" "fmla v10.4s, %20.4s, v2.s[0] \n" "fmla v11.4s, %20.4s, v2.s[2] \n" "fmla v12.4s, %20.4s, v3.s[0] \n" "fmla v13.4s, %20.4s, v3.s[2] \n" "ld1 {v5.h}[0], [%2] \n" "fmla v6.4s, %12.4s, v2.s[1] \n" "fmla v7.4s, %12.4s, v2.s[3] \n" "fmla v8.4s, %12.4s, v3.s[1] \n" "fmla v9.4s, %12.4s, v3.s[3] \n" "shll v5.4s, v5.4h, #16 \n" "fmla v10.4s, %21.4s, v2.s[1] \n" "fmla v11.4s, %21.4s, v2.s[3] \n" "fmla v12.4s, %21.4s, v3.s[1] \n" "fmla v13.4s, %21.4s, v3.s[3] \n" // r2 "prfm pldl1keep, [%3, #128] \n" "ld1 {v0.4h, v1.4h}, [%3], #16 \n" "fmla v6.4s, %13.4s, v2.s[2] \n" "fmla v7.4s, %13.4s, v3.s[0] \n" "fmla v8.4s, %13.4s, v3.s[2] \n" "fmla v9.4s, %13.4s, v5.s[0] \n" "shll v0.4s, v0.4h, #16 \n" "fmla v10.4s, %22.4s, v2.s[2] \n" "fmla v11.4s, %22.4s, v3.s[0] \n" "fmla v12.4s, %22.4s, v3.s[2] \n" "fmla v13.4s, %22.4s, v5.s[0] \n" "shll v1.4s, v1.4h, #16 \n" "fmla v6.4s, %14.4s, v0.s[0] \n" "fmla v7.4s, %14.4s, v0.s[2] \n" "fmla v8.4s, %14.4s, v1.s[0] \n" "fmla v9.4s, %14.4s, v1.s[2] \n" "fmla v10.4s, %23.4s, v0.s[0] \n" "fmla v11.4s, %23.4s, v0.s[2] \n" "fmla v12.4s, %23.4s, v1.s[0] \n" "fmla v13.4s, %23.4s, v1.s[2] \n" "ld1 {v4.h}[0], [%3] \n" "fmla v6.4s, %15.4s, v0.s[1] \n" "fmla v7.4s, %15.4s, v0.s[3] \n" "fmla v8.4s, %15.4s, v1.s[1] \n" "fmla v9.4s, %15.4s, v1.s[3] \n" "fmla v10.4s, %24.4s, v0.s[1] \n" "fmla v11.4s, %24.4s, v0.s[3] \n" "fmla v12.4s, %24.4s, v1.s[1] \n" "fmla v13.4s, %24.4s, v1.s[3] \n" "shll v4.4s, v4.4h, #16 \n" "fmla v6.4s, %16.4s, v0.s[2] \n" "fmla v7.4s, %16.4s, v1.s[0] \n" "fmla v8.4s, %16.4s, v1.s[2] \n" "fmla v9.4s, %16.4s, v4.s[0] \n" "sub %0, %0, #64 \n" "fmla v10.4s, %25.4s, v0.s[2] \n" "fmla v11.4s, %25.4s, v1.s[0] \n" "fmla v12.4s, %25.4s, v1.s[2] \n" "fmla v13.4s, %25.4s, v4.s[0] \n" "st1 {v6.4s, v7.4s, v8.4s, v9.4s}, [%0], #64 \n" "st1 {v10.4s, v11.4s, v12.4s, v13.4s}, [%0], #64 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "w"(_k00_0), // %8 "w"(_k01_0), // %9 "w"(_k02_0), // %10 "w"(_k10_0), // %11 "w"(_k11_0), // %12 "w"(_k12_0), // %13 "w"(_k20_0), // %14 "w"(_k21_0), // %15 "w"(_k22_0), // %16 "w"(_k00_1), // %17 "w"(_k01_1), // %18 "w"(_k02_1), // %19 "w"(_k10_1), // %20 "w"(_k11_1), // %21 "w"(_k12_1), // %22 "w"(_k20_1), // %23 "w"(_k21_1), // %24 "w"(_k22_1) // %25 : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13"); } for (; j + 1 < outw; j += 2) { asm volatile( // r0 "prfm pldl1keep, [%1, #64] \n" "ld1 {v0.4h}, [%1], #8 \n" "prfm pldl1keep, [%0, #512] \n" "ld1 {v10.4s, v11.4s, v12.4s, v13.4s}, [%0] \n" // sum0 sum1 "shll v0.4s, v0.4h, #16 \n" "fmla v10.4s, %8.4s, v0.s[0] \n" "fmla v11.4s, %8.4s, v0.s[2] \n" "fmla v12.4s, %17.4s, v0.s[0] \n" "fmla v13.4s, %17.4s, v0.s[2] \n" "ld1 {v1.h}[0], [%1] \n" "fmla v10.4s, %9.4s, v0.s[1] \n" "fmla v11.4s, %9.4s, v0.s[3] \n" "fmla v12.4s, %18.4s, v0.s[1] \n" "fmla v13.4s, %18.4s, v0.s[3] \n" "shll v1.4s, v1.4h, #16 \n" // r1 "prfm pldl1keep, [%2, #64] \n" "ld1 {v2.4h}, [%2], #8 \n" "fmla v10.4s, %10.4s, v0.s[2] \n" "fmla v11.4s, %10.4s, v1.s[0] \n" "fmla v12.4s, %19.4s, v0.s[2] \n" "fmla v13.4s, %19.4s, v1.s[0] \n" "shll v2.4s, v2.4h, #16 \n" "fmla v10.4s, %11.4s, v2.s[0] \n" "fmla v11.4s, %11.4s, v2.s[2] \n" "fmla v12.4s, %20.4s, v2.s[0] \n" "fmla v13.4s, %20.4s, v2.s[2] \n" "ld1 {v3.h}[0], [%2] \n" "fmla v10.4s, %12.4s, v2.s[1] \n" "fmla v11.4s, %12.4s, v2.s[3] \n" "fmla v12.4s, %21.4s, v2.s[1] \n" "fmla v13.4s, %21.4s, v2.s[3] \n" "shll v3.4s, v3.4h, #16 \n" // r2 "prfm pldl1keep, [%3, #64] \n" "ld1 {v0.4h}, [%3], #8 \n" "fmla v10.4s, %13.4s, v2.s[2] \n" "fmla v11.4s, %13.4s, v3.s[0] \n" "fmla v12.4s, %22.4s, v2.s[2] \n" "fmla v13.4s, %22.4s, v3.s[0] \n" "shll v0.4s, v0.4h, #16 \n" "fmla v10.4s, %14.4s, v0.s[0] \n" "fmla v11.4s, %14.4s, v0.s[2] \n" "fmla v12.4s, %23.4s, v0.s[0] \n" "fmla v13.4s, %23.4s, v0.s[2] \n" "ld1 {v1.h}[0], [%3] \n" "fmla v10.4s, %15.4s, v0.s[1] \n" "fmla v11.4s, %15.4s, v0.s[3] \n" "fmla v12.4s, %24.4s, v0.s[1] \n" "fmla v13.4s, %24.4s, v0.s[3] \n" "shll v1.4s, v1.4h, #16 \n" "fmla v10.4s, %16.4s, v0.s[2] \n" "fmla v11.4s, %16.4s, v1.s[0] \n" "fmla v12.4s, %25.4s, v0.s[2] \n" "fmla v13.4s, %25.4s, v1.s[0] \n" "st1 {v10.4s, v11.4s, v12.4s, v13.4s}, [%0], #64 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "w"(_k00_0), // %8 "w"(_k01_0), // %9 "w"(_k02_0), // %10 "w"(_k10_0), // %11 "w"(_k11_0), // %12 "w"(_k12_0), // %13 "w"(_k20_0), // %14 "w"(_k21_0), // %15 "w"(_k22_0), // %16 "w"(_k00_1), // %17 "w"(_k01_1), // %18 "w"(_k02_1), // %19 "w"(_k10_1), // %20 "w"(_k11_1), // %21 "w"(_k12_1), // %22 "w"(_k20_1), // %23 "w"(_k21_1), // %24 "w"(_k22_1) // %25 : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13"); } for (; j < outw; j++) { float32x4_t _sum0 = vld1q_f32(outptr0); float32x4_t _sum1 = vld1q_f32(outptr0 + 4); float32x4_t _r0 = vcvt_f32_bf16(vld1_u16(r0)); float32x4_t _r1 = vcvt_f32_bf16(vld1_u16(r1)); float32x4_t _r2 = vcvt_f32_bf16(vld1_u16(r2)); _sum0 = vfmaq_laneq_f32(_sum0, _k00_0, _r0, 0); _sum0 = vfmaq_laneq_f32(_sum0, _k01_0, _r0, 1); _sum0 = vfmaq_laneq_f32(_sum0, _k02_0, _r0, 2); _sum0 = vfmaq_laneq_f32(_sum0, _k10_0, _r1, 0); _sum0 = vfmaq_laneq_f32(_sum0, _k11_0, _r1, 1); _sum0 = vfmaq_laneq_f32(_sum0, _k12_0, _r1, 2); _sum0 = vfmaq_laneq_f32(_sum0, _k20_0, _r2, 0); _sum0 = vfmaq_laneq_f32(_sum0, _k21_0, _r2, 1); _sum0 = vfmaq_laneq_f32(_sum0, _k22_0, _r2, 2); _sum1 = vfmaq_laneq_f32(_sum1, _k00_1, _r0, 0); _sum1 = vfmaq_laneq_f32(_sum1, _k01_1, _r0, 1); _sum1 = vfmaq_laneq_f32(_sum1, _k02_1, _r0, 2); _sum1 = vfmaq_laneq_f32(_sum1, _k10_1, _r1, 0); _sum1 = vfmaq_laneq_f32(_sum1, _k11_1, _r1, 1); _sum1 = vfmaq_laneq_f32(_sum1, _k12_1, _r1, 2); _sum1 = vfmaq_laneq_f32(_sum1, _k20_1, _r2, 0); _sum1 = vfmaq_laneq_f32(_sum1, _k21_1, _r2, 1); _sum1 = vfmaq_laneq_f32(_sum1, _k22_1, _r2, 2); vst1q_f32(outptr0, _sum0); vst1q_f32(outptr0 + 4, _sum1); r0 += 2; r1 += 2; r2 += 2; outptr0 += 8; } r0 += tailstep; r1 += tailstep; r2 += tailstep; } k0 += 9 * 4; k1 += 9 * 4; } for (; q < inch; q++) { unsigned short* outptr0_bf16 = top_blob.channel(p); unsigned short* outptr1_bf16 = top_blob.channel(p + 1); const float* outptr0 = out0; const Mat img0 = bottom_blob.channel(q); const unsigned short* r0 = img0.row<const unsigned short>(0); const unsigned short* r1 = img0.row<const unsigned short>(1); const unsigned short* r2 = img0.row<const unsigned short>(2); float32x4_t _k00_0 = vcvt_f32_bf16(vld1_u16(k0)); float32x4_t _k01_0 = vcvt_f32_bf16(vld1_u16(k0 + 4)); float32x4_t _k02_0 = vcvt_f32_bf16(vld1_u16(k0 + 8)); float32x4_t _k10_0 = vcvt_f32_bf16(vld1_u16(k0 + 12)); float32x4_t _k11_0 = vcvt_f32_bf16(vld1_u16(k0 + 16)); float32x4_t _k12_0 = vcvt_f32_bf16(vld1_u16(k0 + 20)); float32x4_t _k20_0 = vcvt_f32_bf16(vld1_u16(k0 + 24)); float32x4_t _k21_0 = vcvt_f32_bf16(vld1_u16(k0 + 28)); float32x4_t _k22_0 = vcvt_f32_bf16(vld1_u16(k0 + 32)); float32x4_t _k00_1 = vcvt_f32_bf16(vld1_u16(k1)); float32x4_t _k01_1 = vcvt_f32_bf16(vld1_u16(k1 + 4)); float32x4_t _k02_1 = vcvt_f32_bf16(vld1_u16(k1 + 8)); float32x4_t _k10_1 = vcvt_f32_bf16(vld1_u16(k1 + 12)); float32x4_t _k11_1 = vcvt_f32_bf16(vld1_u16(k1 + 16)); float32x4_t _k12_1 = vcvt_f32_bf16(vld1_u16(k1 + 20)); float32x4_t _k20_1 = vcvt_f32_bf16(vld1_u16(k1 + 24)); float32x4_t _k21_1 = vcvt_f32_bf16(vld1_u16(k1 + 28)); float32x4_t _k22_1 = vcvt_f32_bf16(vld1_u16(k1 + 32)); int i = 0; for (; i < outh; i++) { int j = 0; for (; j + 3 < outw; j += 4) { asm volatile( // r0 "prfm pldl1keep, [%3, #128] \n" "ld1 {v0.4h, v1.4h}, [%3], #16 \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v6.4s, v7.4s, v8.4s, v9.4s}, [%2], #64 \n" // sum0 "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v10.4s, v11.4s, v12.4s, v13.4s}, [%2], #64 \n" // sum1 "fmla v6.4s, %12.4s, v0.s[0] \n" "fmla v7.4s, %12.4s, v0.s[2] \n" "fmla v8.4s, %12.4s, v1.s[0] \n" "fmla v9.4s, %12.4s, v1.s[2] \n" "fmla v10.4s, %21.4s, v0.s[0] \n" "fmla v11.4s, %21.4s, v0.s[2] \n" "fmla v12.4s, %21.4s, v1.s[0] \n" "fmla v13.4s, %21.4s, v1.s[2] \n" "ld1 {v4.h}[0], [%3] \n" "fmla v6.4s, %13.4s, v0.s[1] \n" "fmla v7.4s, %13.4s, v0.s[3] \n" "fmla v8.4s, %13.4s, v1.s[1] \n" "fmla v9.4s, %13.4s, v1.s[3] \n" "shll v4.4s, v4.4h, #16 \n" "fmla v10.4s, %22.4s, v0.s[1] \n" "fmla v11.4s, %22.4s, v0.s[3] \n" "fmla v12.4s, %22.4s, v1.s[1] \n" "fmla v13.4s, %22.4s, v1.s[3] \n" // r1 "prfm pldl1keep, [%4, #128] \n" "ld1 {v2.4h, v3.4h}, [%4], #16 \n" "fmla v6.4s, %14.4s, v0.s[2] \n" "fmla v7.4s, %14.4s, v1.s[0] \n" "fmla v8.4s, %14.4s, v1.s[2] \n" "fmla v9.4s, %14.4s, v4.s[0] \n" "shll v2.4s, v2.4h, #16 \n" "fmla v10.4s, %23.4s, v0.s[2] \n" "fmla v11.4s, %23.4s, v1.s[0] \n" "fmla v12.4s, %23.4s, v1.s[2] \n" "fmla v13.4s, %23.4s, v4.s[0] \n" "shll v3.4s, v3.4h, #16 \n" "fmla v6.4s, %15.4s, v2.s[0] \n" "fmla v7.4s, %15.4s, v2.s[2] \n" "fmla v8.4s, %15.4s, v3.s[0] \n" "fmla v9.4s, %15.4s, v3.s[2] \n" "fmla v10.4s, %24.4s, v2.s[0] \n" "fmla v11.4s, %24.4s, v2.s[2] \n" "fmla v12.4s, %24.4s, v3.s[0] \n" "fmla v13.4s, %24.4s, v3.s[2] \n" "ld1 {v5.h}[0], [%4] \n" "fmla v6.4s, %16.4s, v2.s[1] \n" "fmla v7.4s, %16.4s, v2.s[3] \n" "fmla v8.4s, %16.4s, v3.s[1] \n" "fmla v9.4s, %16.4s, v3.s[3] \n" "shll v5.4s, v5.4h, #16 \n" "fmla v10.4s, %25.4s, v2.s[1] \n" "fmla v11.4s, %25.4s, v2.s[3] \n" "fmla v12.4s, %25.4s, v3.s[1] \n" "fmla v13.4s, %25.4s, v3.s[3] \n" // r2 "prfm pldl1keep, [%5, #128] \n" "ld1 {v0.4h, v1.4h}, [%5], #16 \n" "fmla v6.4s, %17.4s, v2.s[2] \n" "fmla v7.4s, %17.4s, v3.s[0] \n" "fmla v8.4s, %17.4s, v3.s[2] \n" "fmla v9.4s, %17.4s, v5.s[0] \n" "shll v0.4s, v0.4h, #16 \n" "fmla v10.4s, %26.4s, v2.s[2] \n" "fmla v11.4s, %26.4s, v3.s[0] \n" "fmla v12.4s, %26.4s, v3.s[2] \n" "fmla v13.4s, %26.4s, v5.s[0] \n" "shll v1.4s, v1.4h, #16 \n" "fmla v6.4s, %18.4s, v0.s[0] \n" "fmla v7.4s, %18.4s, v0.s[2] \n" "fmla v8.4s, %18.4s, v1.s[0] \n" "fmla v9.4s, %18.4s, v1.s[2] \n" "fmla v10.4s, %27.4s, v0.s[0] \n" "fmla v11.4s, %27.4s, v0.s[2] \n" "fmla v12.4s, %27.4s, v1.s[0] \n" "fmla v13.4s, %27.4s, v1.s[2] \n" "ld1 {v4.h}[0], [%5] \n" "fmla v6.4s, %19.4s, v0.s[1] \n" "fmla v7.4s, %19.4s, v0.s[3] \n" "fmla v8.4s, %19.4s, v1.s[1] \n" "fmla v9.4s, %19.4s, v1.s[3] \n" "fmla v10.4s, %28.4s, v0.s[1] \n" "fmla v11.4s, %28.4s, v0.s[3] \n" "fmla v12.4s, %28.4s, v1.s[1] \n" "fmla v13.4s, %28.4s, v1.s[3] \n" "shll v4.4s, v4.4h, #16 \n" "fmla v6.4s, %20.4s, v0.s[2] \n" "fmla v7.4s, %20.4s, v1.s[0] \n" "fmla v8.4s, %20.4s, v1.s[2] \n" "fmla v9.4s, %20.4s, v4.s[0] \n" "fmla v10.4s, %29.4s, v0.s[2] \n" "fmla v11.4s, %29.4s, v1.s[0] \n" "fmla v12.4s, %29.4s, v1.s[2] \n" "fmla v13.4s, %29.4s, v4.s[0] \n" "shrn v6.4h, v6.4s, #16 \n" "shrn v7.4h, v7.4s, #16 \n" "shrn v8.4h, v8.4s, #16 \n" "shrn v9.4h, v9.4s, #16 \n" "shrn v10.4h, v10.4s, #16 \n" "shrn v11.4h, v11.4s, #16 \n" "st1 {v6.4h, v7.4h, v8.4h, v9.4h}, [%0], #32 \n" "shrn v12.4h, v12.4s, #16 \n" "shrn v13.4h, v13.4s, #16 \n" "st1 {v10.4h, v11.4h, v12.4h, v13.4h}, [%1], #32 \n" : "=r"(outptr0_bf16), // %0 "=r"(outptr1_bf16), // %1 "=r"(outptr0), // %2 "=r"(r0), // %3 "=r"(r1), // %4 "=r"(r2) // %5 : "0"(outptr0_bf16), "1"(outptr1_bf16), "2"(outptr0), "3"(r0), "4"(r1), "5"(r2), "w"(_k00_0), // %12 "w"(_k01_0), // %13 "w"(_k02_0), // %14 "w"(_k10_0), // %15 "w"(_k11_0), // %16 "w"(_k12_0), // %17 "w"(_k20_0), // %18 "w"(_k21_0), // %19 "w"(_k22_0), // %20 "w"(_k00_1), // %21 "w"(_k01_1), // %22 "w"(_k02_1), // %23 "w"(_k10_1), // %24 "w"(_k11_1), // %25 "w"(_k12_1), // %26 "w"(_k20_1), // %27 "w"(_k21_1), // %28 "w"(_k22_1) // %29 : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13"); } for (; j + 1 < outw; j += 2) { asm volatile( // r0 "prfm pldl1keep, [%3, #64] \n" "ld1 {v0.4h}, [%3], #8 \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v10.4s, v11.4s, v12.4s, v13.4s}, [%2], #64 \n" // sum0 sum1 "shll v0.4s, v0.4h, #16 \n" "fmla v10.4s, %12.4s, v0.s[0] \n" "fmla v11.4s, %12.4s, v0.s[2] \n" "fmla v12.4s, %21.4s, v0.s[0] \n" "fmla v13.4s, %21.4s, v0.s[2] \n" "ld1 {v1.h}[0], [%3] \n" "fmla v10.4s, %13.4s, v0.s[1] \n" "fmla v11.4s, %13.4s, v0.s[3] \n" "fmla v12.4s, %22.4s, v0.s[1] \n" "fmla v13.4s, %22.4s, v0.s[3] \n" "shll v1.4s, v1.4h, #16 \n" // r1 "prfm pldl1keep, [%4, #64] \n" "ld1 {v2.4h}, [%4], #8 \n" "fmla v10.4s, %14.4s, v0.s[2] \n" "fmla v11.4s, %14.4s, v1.s[0] \n" "fmla v12.4s, %23.4s, v0.s[2] \n" "fmla v13.4s, %23.4s, v1.s[0] \n" "shll v2.4s, v2.4h, #16 \n" "fmla v10.4s, %15.4s, v2.s[0] \n" "fmla v11.4s, %15.4s, v2.s[2] \n" "fmla v12.4s, %24.4s, v2.s[0] \n" "fmla v13.4s, %24.4s, v2.s[2] \n" "ld1 {v3.h}[0], [%4] \n" "fmla v10.4s, %16.4s, v2.s[1] \n" "fmla v11.4s, %16.4s, v2.s[3] \n" "fmla v12.4s, %25.4s, v2.s[1] \n" "fmla v13.4s, %25.4s, v2.s[3] \n" "shll v3.4s, v3.4h, #16 \n" // r2 "prfm pldl1keep, [%5, #64] \n" "ld1 {v0.4h}, [%5], #8 \n" "fmla v10.4s, %17.4s, v2.s[2] \n" "fmla v11.4s, %17.4s, v3.s[0] \n" "fmla v12.4s, %26.4s, v2.s[2] \n" "fmla v13.4s, %26.4s, v3.s[0] \n" "shll v0.4s, v0.4h, #16 \n" "fmla v10.4s, %18.4s, v0.s[0] \n" "fmla v11.4s, %18.4s, v0.s[2] \n" "fmla v12.4s, %27.4s, v0.s[0] \n" "fmla v13.4s, %27.4s, v0.s[2] \n" "ld1 {v1.h}[0], [%5] \n" "fmla v10.4s, %19.4s, v0.s[1] \n" "fmla v11.4s, %19.4s, v0.s[3] \n" "fmla v12.4s, %28.4s, v0.s[1] \n" "fmla v13.4s, %28.4s, v0.s[3] \n" "shll v1.4s, v1.4h, #16 \n" "fmla v10.4s, %20.4s, v0.s[2] \n" "fmla v11.4s, %20.4s, v1.s[0] \n" "fmla v12.4s, %29.4s, v0.s[2] \n" "fmla v13.4s, %29.4s, v1.s[0] \n" "shrn v10.4h, v10.4s, #16 \n" "shrn v11.4h, v11.4s, #16 \n" "shrn v12.4h, v12.4s, #16 \n" "shrn v13.4h, v13.4s, #16 \n" "st1 {v10.4h, v11.4h}, [%0], #16 \n" "st1 {v12.4h, v13.4h}, [%1], #16 \n" : "=r"(outptr0_bf16), // %0 "=r"(outptr1_bf16), // %1 "=r"(outptr0), // %2 "=r"(r0), // %3 "=r"(r1), // %4 "=r"(r2) // %5 : "0"(outptr0_bf16), "1"(outptr1_bf16), "2"(outptr0), "3"(r0), "4"(r1), "5"(r2), "w"(_k00_0), // %12 "w"(_k01_0), // %13 "w"(_k02_0), // %14 "w"(_k10_0), // %15 "w"(_k11_0), // %16 "w"(_k12_0), // %17 "w"(_k20_0), // %18 "w"(_k21_0), // %19 "w"(_k22_0), // %20 "w"(_k00_1), // %21 "w"(_k01_1), // %22 "w"(_k02_1), // %23 "w"(_k10_1), // %24 "w"(_k11_1), // %25 "w"(_k12_1), // %26 "w"(_k20_1), // %27 "w"(_k21_1), // %28 "w"(_k22_1) // %29 : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13"); } for (; j < outw; j++) { float32x4_t _sum0 = vld1q_f32(outptr0); float32x4_t _sum1 = vld1q_f32(outptr0 + 4); float32x4_t _r0 = vcvt_f32_bf16(vld1_u16(r0)); float32x4_t _r1 = vcvt_f32_bf16(vld1_u16(r1)); float32x4_t _r2 = vcvt_f32_bf16(vld1_u16(r2)); _sum0 = vfmaq_laneq_f32(_sum0, _k00_0, _r0, 0); _sum0 = vfmaq_laneq_f32(_sum0, _k01_0, _r0, 1); _sum0 = vfmaq_laneq_f32(_sum0, _k02_0, _r0, 2); _sum0 = vfmaq_laneq_f32(_sum0, _k10_0, _r1, 0); _sum0 = vfmaq_laneq_f32(_sum0, _k11_0, _r1, 1); _sum0 = vfmaq_laneq_f32(_sum0, _k12_0, _r1, 2); _sum0 = vfmaq_laneq_f32(_sum0, _k20_0, _r2, 0); _sum0 = vfmaq_laneq_f32(_sum0, _k21_0, _r2, 1); _sum0 = vfmaq_laneq_f32(_sum0, _k22_0, _r2, 2); _sum1 = vfmaq_laneq_f32(_sum1, _k00_1, _r0, 0); _sum1 = vfmaq_laneq_f32(_sum1, _k01_1, _r0, 1); _sum1 = vfmaq_laneq_f32(_sum1, _k02_1, _r0, 2); _sum1 = vfmaq_laneq_f32(_sum1, _k10_1, _r1, 0); _sum1 = vfmaq_laneq_f32(_sum1, _k11_1, _r1, 1); _sum1 = vfmaq_laneq_f32(_sum1, _k12_1, _r1, 2); _sum1 = vfmaq_laneq_f32(_sum1, _k20_1, _r2, 0); _sum1 = vfmaq_laneq_f32(_sum1, _k21_1, _r2, 1); _sum1 = vfmaq_laneq_f32(_sum1, _k22_1, _r2, 2); vst1_u16(outptr0_bf16, vcvt_bf16_f32(_sum0)); vst1_u16(outptr1_bf16, vcvt_bf16_f32(_sum1)); r0 += 2; r1 += 2; r2 += 2; outptr0 += 8; outptr0_bf16 += 4; outptr1_bf16 += 4; } r0 += tailstep; r1 += tailstep; r2 += tailstep; } k0 += 9 * 4; k1 += 9 * 4; } } #endif // __ARM_NEON && __aarch64__ #pragma omp parallel for num_threads(opt.num_threads) for (int p = remain_outch_start; p < outch; p++) { Mat out0 = top_blob_fp32.channel(get_omp_thread_num()); float32x4_t _bias0 = bias ? vld1q_f32((const float*)bias + p * 4) : vdupq_n_f32(0.f); out0.fill(_bias0); const unsigned short* k0 = kernel.channel(p); int q = 0; for (; q < inch - 1; q++) { float* outptr0 = out0; const Mat img0 = bottom_blob.channel(q); const unsigned short* r0 = img0.row<const unsigned short>(0); const unsigned short* r1 = img0.row<const unsigned short>(1); const unsigned short* r2 = img0.row<const unsigned short>(2); float32x4_t _k00 = vcvt_f32_bf16(vld1_u16(k0)); float32x4_t _k01 = vcvt_f32_bf16(vld1_u16(k0 + 4)); float32x4_t _k02 = vcvt_f32_bf16(vld1_u16(k0 + 8)); float32x4_t _k10 = vcvt_f32_bf16(vld1_u16(k0 + 12)); float32x4_t _k11 = vcvt_f32_bf16(vld1_u16(k0 + 16)); float32x4_t _k12 = vcvt_f32_bf16(vld1_u16(k0 + 20)); float32x4_t _k20 = vcvt_f32_bf16(vld1_u16(k0 + 24)); float32x4_t _k21 = vcvt_f32_bf16(vld1_u16(k0 + 28)); float32x4_t _k22 = vcvt_f32_bf16(vld1_u16(k0 + 32)); int i = 0; for (; i < outh; i++) { int j = 0; for (; j + 3 < outw; j += 4) { #if __aarch64__ asm volatile( // r0 "prfm pldl1keep, [%1, #128] \n" "ld1 {v0.4h, v1.4h}, [%1], #16 \n" "prfm pldl1keep, [%0, #512] \n" "ld1 {v6.4s, v7.4s, v8.4s, v9.4s}, [%0] \n" // sum0 "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "fmla v6.4s, %8.4s, v0.s[0] \n" "fmla v7.4s, %8.4s, v0.s[2] \n" "fmla v8.4s, %8.4s, v1.s[0] \n" "fmla v9.4s, %8.4s, v1.s[2] \n" "ld1 {v4.h}[0], [%1] \n" "fmla v6.4s, %9.4s, v0.s[1] \n" "fmla v7.4s, %9.4s, v0.s[3] \n" "fmla v8.4s, %9.4s, v1.s[1] \n" "fmla v9.4s, %9.4s, v1.s[3] \n" "shll v4.4s, v4.4h, #16 \n" // r1 "prfm pldl1keep, [%2, #128] \n" "ld1 {v2.4h, v3.4h}, [%2], #16 \n" "fmla v6.4s, %10.4s, v0.s[2] \n" "fmla v7.4s, %10.4s, v1.s[0] \n" "fmla v8.4s, %10.4s, v1.s[2] \n" "fmla v9.4s, %10.4s, v4.s[0] \n" "shll v2.4s, v2.4h, #16 \n" "shll v3.4s, v3.4h, #16 \n" "fmla v6.4s, %11.4s, v2.s[0] \n" "fmla v7.4s, %11.4s, v2.s[2] \n" "fmla v8.4s, %11.4s, v3.s[0] \n" "fmla v9.4s, %11.4s, v3.s[2] \n" "ld1 {v5.h}[0], [%2] \n" "fmla v6.4s, %12.4s, v2.s[1] \n" "fmla v7.4s, %12.4s, v2.s[3] \n" "fmla v8.4s, %12.4s, v3.s[1] \n" "fmla v9.4s, %12.4s, v3.s[3] \n" "shll v5.4s, v5.4h, #16 \n" // r2 "prfm pldl1keep, [%3, #128] \n" "ld1 {v0.4h, v1.4h}, [%3], #16 \n" "fmla v6.4s, %13.4s, v2.s[2] \n" "fmla v7.4s, %13.4s, v3.s[0] \n" "fmla v8.4s, %13.4s, v3.s[2] \n" "fmla v9.4s, %13.4s, v5.s[0] \n" "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "fmla v6.4s, %14.4s, v0.s[0] \n" "fmla v7.4s, %14.4s, v0.s[2] \n" "fmla v8.4s, %14.4s, v1.s[0] \n" "fmla v9.4s, %14.4s, v1.s[2] \n" "ld1 {v4.h}[0], [%3] \n" "fmla v6.4s, %15.4s, v0.s[1] \n" "fmla v7.4s, %15.4s, v0.s[3] \n" "fmla v8.4s, %15.4s, v1.s[1] \n" "fmla v9.4s, %15.4s, v1.s[3] \n" "shll v4.4s, v4.4h, #16 \n" "fmla v6.4s, %16.4s, v0.s[2] \n" "fmla v7.4s, %16.4s, v1.s[0] \n" "fmla v8.4s, %16.4s, v1.s[2] \n" "fmla v9.4s, %16.4s, v4.s[0] \n" "st1 {v6.4s, v7.4s, v8.4s, v9.4s}, [%0], #64 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "w"(_k00), // %8 "w"(_k01), // %9 "w"(_k02), // %10 "w"(_k10), // %11 "w"(_k11), // %12 "w"(_k12), // %13 "w"(_k20), // %14 "w"(_k21), // %15 "w"(_k22) // %16 : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9"); #else // __aarch64__ asm volatile( // r0 "pld [%1, #128] \n" "vld1.u16 {d12-d13}, [%1]! \n" "pld [%0, #512] \n" "vldm %0, {d0-d7} \n" // sum0 "vshll.u16 q4, d12, #16 \n" "vshll.u16 q5, d13, #16 \n" "vld1.u16 {d12[0]}, [%1] \n" "vmla.f32 q0, %q8, d8[0] \n" "vmla.f32 q1, %q8, d9[0] \n" "vmla.f32 q2, %q8, d10[0] \n" "vmla.f32 q3, %q8, d11[0] \n" "vmla.f32 q0, %q9, d8[1] \n" "vmla.f32 q1, %q9, d9[1] \n" "vshl.u32 d8, d12, #16 \n" "vmla.f32 q2, %q9, d10[1] \n" "vmla.f32 q3, %q9, d11[1] \n" // r1 "pld [%2, #128] \n" "vld1.u16 {d12-d13}, [%2]! \n" "vmla.f32 q0, %q10, d9[0] \n" "vmla.f32 q1, %q10, d10[0] \n" "vmla.f32 q2, %q10, d11[0] \n" "vmla.f32 q3, %q10, d8[0] \n" "vshll.u16 q4, d12, #16 \n" "vshll.u16 q5, d13, #16 \n" "vld1.u16 {d12[0]}, [%2] \n" "vmla.f32 q0, %q11, d8[0] \n" "vmla.f32 q1, %q11, d9[0] \n" "vmla.f32 q2, %q11, d10[0] \n" "vmla.f32 q3, %q11, d11[0] \n" "vmla.f32 q0, %q12, d8[1] \n" "vmla.f32 q1, %q12, d9[1] \n" "vshl.u32 d8, d12, #16 \n" "vmla.f32 q2, %q12, d10[1] \n" "vmla.f32 q3, %q12, d11[1] \n" // r2 "pld [%3, #128] \n" "vld1.u16 {d12-d13}, [%3]! \n" "vmla.f32 q0, %q13, d9[0] \n" "vmla.f32 q1, %q13, d10[0] \n" "vmla.f32 q2, %q13, d11[0] \n" "vmla.f32 q3, %q13, d8[0] \n" "vshll.u16 q4, d12, #16 \n" "vshll.u16 q5, d13, #16 \n" "vld1.u16 {d12[0]}, [%3] \n" "vmla.f32 q0, %q14, d8[0] \n" "vmla.f32 q1, %q14, d9[0] \n" "vmla.f32 q2, %q14, d10[0] \n" "vmla.f32 q3, %q14, d11[0] \n" "vmla.f32 q0, %q15, d8[1] \n" "vmla.f32 q1, %q15, d9[1] \n" "vshl.u32 d8, d12, #16 \n" "vmla.f32 q2, %q15, d10[1] \n" "vmla.f32 q3, %q15, d11[1] \n" "vmla.f32 q0, %q16, d9[0] \n" "vmla.f32 q1, %q16, d10[0] \n" "vmla.f32 q2, %q16, d11[0] \n" "vmla.f32 q3, %q16, d8[0] \n" "vstm %0!, {d0-d7} \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "w"(_k00), // %8 "w"(_k01), // %9 "w"(_k02), // %10 "w"(_k10), // %11 "w"(_k11), // %12 "w"(_k12), // %13 "w"(_k20), // %14 "w"(_k21), // %15 "w"(_k22) // %16 : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6"); #endif // __aarch64__ } for (; j + 1 < outw; j += 2) { #if __aarch64__ asm volatile( // r0 "prfm pldl1keep, [%1, #64] \n" "ld1 {v0.4h}, [%1], #8 \n" "prfm pldl1keep, [%0, #256] \n" "ld1 {v8.4s, v9.4s}, [%0] \n" // sum0 "shll v0.4s, v0.4h, #16 \n" "fmul v6.4s, %8.4s, v0.s[0] \n" "fmul v7.4s, %8.4s, v0.s[2] \n" "ld1 {v1.h}[0], [%1] \n" "fmla v8.4s, %9.4s, v0.s[1] \n" "fmla v9.4s, %9.4s, v0.s[3] \n" "shll v1.4s, v1.4h, #16 \n" // r1 "prfm pldl1keep, [%2, #64] \n" "ld1 {v2.4h}, [%2], #8 \n" "fmla v6.4s, %10.4s, v0.s[2] \n" "fmla v7.4s, %10.4s, v1.s[0] \n" "shll v2.4s, v2.4h, #16 \n" "fmla v8.4s, %11.4s, v2.s[0] \n" "fmla v9.4s, %11.4s, v2.s[2] \n" "ld1 {v3.h}[0], [%2] \n" "fmla v6.4s, %12.4s, v2.s[1] \n" "fmla v7.4s, %12.4s, v2.s[3] \n" "shll v3.4s, v3.4h, #16 \n" // r2 "prfm pldl1keep, [%3, #64] \n" "ld1 {v0.4h}, [%3], #8 \n" "fmla v8.4s, %13.4s, v2.s[2] \n" "fmla v9.4s, %13.4s, v3.s[0] \n" "shll v0.4s, v0.4h, #16 \n" "fmla v6.4s, %14.4s, v0.s[0] \n" "fmla v7.4s, %14.4s, v0.s[2] \n" "ld1 {v1.h}[0], [%3] \n" "fmla v8.4s, %15.4s, v0.s[1] \n" "fmla v9.4s, %15.4s, v0.s[3] \n" "shll v1.4s, v1.4h, #16 \n" "fmla v6.4s, %16.4s, v0.s[2] \n" "fmla v7.4s, %16.4s, v1.s[0] \n" "fadd v8.4s, v8.4s, v6.4s \n" "fadd v9.4s, v9.4s, v7.4s \n" "st1 {v8.4s, v9.4s}, [%0], #32 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "w"(_k00), // %8 "w"(_k01), // %9 "w"(_k02), // %10 "w"(_k10), // %11 "w"(_k11), // %12 "w"(_k12), // %13 "w"(_k20), // %14 "w"(_k21), // %15 "w"(_k22) // %16 : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9"); #else // __aarch64__ asm volatile( // r0 "pld [%1, #64] \n" "vld1.u16 {d9}, [%1]! \n" "pld [%0, #256] \n" "vld1.f32 {d4-d7}, [%0] \n" // sum0 "vshll.u16 q4, d9, #16 \n" "vmul.f32 q0, %q8, d8[0] \n" "vmul.f32 q1, %q8, d9[0] \n" "vld1.u16 {d11[]}, [%1] \n" "vmla.f32 q2, %q9, d8[1] \n" "vmla.f32 q3, %q9, d9[1] \n" "vshll.u16 q5, d11, #16 \n" // r1 "pld [%2, #64] \n" "vld1.u16 {d13}, [%2]! \n" "vmla.f32 q0, %q10, d9[0] \n" "vmla.f32 q1, %q10, d10[0] \n" "vshll.u16 q6, d13, #16 \n" "vmla.f32 q2, %q11, d12[0] \n" "vmla.f32 q3, %q11, d13[0] \n" "vld1.u16 {d9[]}, [%2] \n" "vmla.f32 q0, %q12, d12[1] \n" "vmla.f32 q1, %q12, d13[1] \n" "vshll.u16 q4, d9, #16 \n" // r2 "pld [%3, #64] \n" "vld1.u16 {d11}, [%3]! \n" "vmla.f32 q2, %q13, d13[0] \n" "vmla.f32 q3, %q13, d8[0] \n" "vshll.u16 q5, d11, #16 \n" "vmla.f32 q0, %q14, d10[0] \n" "vmla.f32 q1, %q14, d11[0] \n" "vld1.u16 {d13[]}, [%3] \n" "vmla.f32 q2, %q15, d10[1] \n" "vmla.f32 q3, %q15, d11[1] \n" "vshll.u16 q6, d13, #16 \n" "vmla.f32 q0, %q16, d11[0] \n" "vmla.f32 q1, %q16, d12[0] \n" "vadd.f32 q2, q2, q0 \n" "vadd.f32 q3, q3, q1 \n" "vst1.f32 {d4-d7}, [%0]! \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "w"(_k00), // %8 "w"(_k01), // %9 "w"(_k02), // %10 "w"(_k10), // %11 "w"(_k11), // %12 "w"(_k12), // %13 "w"(_k20), // %14 "w"(_k21), // %15 "w"(_k22) // %16 : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6"); #endif // __aarch64__ } for (; j < outw; j++) { float32x4_t _sum0 = vld1q_f32(outptr0); float32x4_t _r0 = vcvt_f32_bf16(vld1_u16(r0)); float32x4_t _r1 = vcvt_f32_bf16(vld1_u16(r1)); float32x4_t _r2 = vcvt_f32_bf16(vld1_u16(r2)); #if __aarch64__ _sum0 = vfmaq_laneq_f32(_sum0, _k00, _r0, 0); _sum0 = vfmaq_laneq_f32(_sum0, _k01, _r0, 1); _sum0 = vfmaq_laneq_f32(_sum0, _k02, _r0, 2); _sum0 = vfmaq_laneq_f32(_sum0, _k10, _r1, 0); _sum0 = vfmaq_laneq_f32(_sum0, _k11, _r1, 1); _sum0 = vfmaq_laneq_f32(_sum0, _k12, _r1, 2); _sum0 = vfmaq_laneq_f32(_sum0, _k20, _r2, 0); _sum0 = vfmaq_laneq_f32(_sum0, _k21, _r2, 1); _sum0 = vfmaq_laneq_f32(_sum0, _k22, _r2, 2); #else _sum0 = vmlaq_lane_f32(_sum0, _k00, vget_low_f32(_r0), 0); _sum0 = vmlaq_lane_f32(_sum0, _k01, vget_low_f32(_r0), 1); _sum0 = vmlaq_lane_f32(_sum0, _k02, vget_high_f32(_r0), 0); _sum0 = vmlaq_lane_f32(_sum0, _k10, vget_low_f32(_r1), 0); _sum0 = vmlaq_lane_f32(_sum0, _k11, vget_low_f32(_r1), 1); _sum0 = vmlaq_lane_f32(_sum0, _k12, vget_high_f32(_r1), 0); _sum0 = vmlaq_lane_f32(_sum0, _k20, vget_low_f32(_r2), 0); _sum0 = vmlaq_lane_f32(_sum0, _k21, vget_low_f32(_r2), 1); _sum0 = vmlaq_lane_f32(_sum0, _k22, vget_high_f32(_r2), 0); #endif vst1q_f32(outptr0, _sum0); r0 += 2; r1 += 2; r2 += 2; outptr0 += 4; } r0 += tailstep; r1 += tailstep; r2 += tailstep; } k0 += 9 * 4; } for (; q < inch; q++) { unsigned short* outptr0_bf16 = top_blob.channel(p); const float* outptr0 = out0; const Mat img0 = bottom_blob.channel(q); const unsigned short* r0 = img0.row<const unsigned short>(0); const unsigned short* r1 = img0.row<const unsigned short>(1); const unsigned short* r2 = img0.row<const unsigned short>(2); float32x4_t _k00 = vcvt_f32_bf16(vld1_u16(k0)); float32x4_t _k01 = vcvt_f32_bf16(vld1_u16(k0 + 4)); float32x4_t _k02 = vcvt_f32_bf16(vld1_u16(k0 + 8)); float32x4_t _k10 = vcvt_f32_bf16(vld1_u16(k0 + 12)); float32x4_t _k11 = vcvt_f32_bf16(vld1_u16(k0 + 16)); float32x4_t _k12 = vcvt_f32_bf16(vld1_u16(k0 + 20)); float32x4_t _k20 = vcvt_f32_bf16(vld1_u16(k0 + 24)); float32x4_t _k21 = vcvt_f32_bf16(vld1_u16(k0 + 28)); float32x4_t _k22 = vcvt_f32_bf16(vld1_u16(k0 + 32)); int i = 0; for (; i < outh; i++) { int j = 0; for (; j + 3 < outw; j += 4) { #if __aarch64__ asm volatile( // r0 "prfm pldl1keep, [%2, #128] \n" "ld1 {v0.4h, v1.4h}, [%2], #16 \n" "prfm pldl1keep, [%1, #512] \n" "ld1 {v6.4s, v7.4s, v8.4s, v9.4s}, [%1], #64 \n" // sum0 "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "fmla v6.4s, %10.4s, v0.s[0] \n" "fmla v7.4s, %10.4s, v0.s[2] \n" "fmla v8.4s, %10.4s, v1.s[0] \n" "fmla v9.4s, %10.4s, v1.s[2] \n" "ld1 {v4.h}[0], [%2] \n" "fmla v6.4s, %11.4s, v0.s[1] \n" "fmla v7.4s, %11.4s, v0.s[3] \n" "fmla v8.4s, %11.4s, v1.s[1] \n" "fmla v9.4s, %11.4s, v1.s[3] \n" "shll v4.4s, v4.4h, #16 \n" // r1 "prfm pldl1keep, [%3, #128] \n" "ld1 {v2.4h, v3.4h}, [%3], #16 \n" "fmla v6.4s, %12.4s, v0.s[2] \n" "fmla v7.4s, %12.4s, v1.s[0] \n" "fmla v8.4s, %12.4s, v1.s[2] \n" "fmla v9.4s, %12.4s, v4.s[0] \n" "shll v2.4s, v2.4h, #16 \n" "shll v3.4s, v3.4h, #16 \n" "fmla v6.4s, %13.4s, v2.s[0] \n" "fmla v7.4s, %13.4s, v2.s[2] \n" "fmla v8.4s, %13.4s, v3.s[0] \n" "fmla v9.4s, %13.4s, v3.s[2] \n" "ld1 {v5.h}[0], [%3] \n" "fmla v6.4s, %14.4s, v2.s[1] \n" "fmla v7.4s, %14.4s, v2.s[3] \n" "fmla v8.4s, %14.4s, v3.s[1] \n" "fmla v9.4s, %14.4s, v3.s[3] \n" "shll v5.4s, v5.4h, #16 \n" // r2 "prfm pldl1keep, [%4, #128] \n" "ld1 {v0.4h, v1.4h}, [%4], #16 \n" "fmla v6.4s, %15.4s, v2.s[2] \n" "fmla v7.4s, %15.4s, v3.s[0] \n" "fmla v8.4s, %15.4s, v3.s[2] \n" "fmla v9.4s, %15.4s, v5.s[0] \n" "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "fmla v6.4s, %16.4s, v0.s[0] \n" "fmla v7.4s, %16.4s, v0.s[2] \n" "fmla v8.4s, %16.4s, v1.s[0] \n" "fmla v9.4s, %16.4s, v1.s[2] \n" "ld1 {v4.h}[0], [%4] \n" "fmla v6.4s, %17.4s, v0.s[1] \n" "fmla v7.4s, %17.4s, v0.s[3] \n" "fmla v8.4s, %17.4s, v1.s[1] \n" "fmla v9.4s, %17.4s, v1.s[3] \n" "shll v4.4s, v4.4h, #16 \n" "fmla v6.4s, %18.4s, v0.s[2] \n" "fmla v7.4s, %18.4s, v1.s[0] \n" "fmla v8.4s, %18.4s, v1.s[2] \n" "fmla v9.4s, %18.4s, v4.s[0] \n" "shrn v6.4h, v6.4s, #16 \n" "shrn v7.4h, v7.4s, #16 \n" "shrn v8.4h, v8.4s, #16 \n" "shrn v9.4h, v9.4s, #16 \n" "st1 {v6.4h, v7.4h, v8.4h, v9.4h}, [%0], #32 \n" : "=r"(outptr0_bf16), // %0 "=r"(outptr0), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2) // %4 : "0"(outptr0_bf16), "1"(outptr0), "2"(r0), "3"(r1), "4"(r2), "w"(_k00), // %10 "w"(_k01), // %11 "w"(_k02), // %12 "w"(_k10), // %13 "w"(_k11), // %14 "w"(_k12), // %15 "w"(_k20), // %16 "w"(_k21), // %17 "w"(_k22) // %18 : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9"); #else // __aarch64__ asm volatile( // r0 "pld [%2, #128] \n" "vld1.u16 {d12-d13}, [%2]! \n" "pld [%1, #512] \n" "vldm %1!, {d0-d7} \n" // sum0 "vshll.u16 q4, d12, #16 \n" "vshll.u16 q5, d13, #16 \n" "vld1.u16 {d12[0]}, [%2] \n" "vmla.f32 q0, %q10, d8[0] \n" "vmla.f32 q1, %q10, d9[0] \n" "vmla.f32 q2, %q10, d10[0] \n" "vmla.f32 q3, %q10, d11[0] \n" "vmla.f32 q0, %q11, d8[1] \n" "vmla.f32 q1, %q11, d9[1] \n" "vshl.u32 d8, d12, #16 \n" "vmla.f32 q2, %q11, d10[1] \n" "vmla.f32 q3, %q11, d11[1] \n" // r1 "pld [%3, #128] \n" "vld1.u16 {d12-d13}, [%3]! \n" "vmla.f32 q0, %q12, d9[0] \n" "vmla.f32 q1, %q12, d10[0] \n" "vmla.f32 q2, %q12, d11[0] \n" "vmla.f32 q3, %q12, d8[0] \n" "vshll.u16 q4, d12, #16 \n" "vshll.u16 q5, d13, #16 \n" "vld1.u16 {d12[0]}, [%3] \n" "vmla.f32 q0, %q13, d8[0] \n" "vmla.f32 q1, %q13, d9[0] \n" "vmla.f32 q2, %q13, d10[0] \n" "vmla.f32 q3, %q13, d11[0] \n" "vmla.f32 q0, %q14, d8[1] \n" "vmla.f32 q1, %q14, d9[1] \n" "vshl.u32 d8, d12, #16 \n" "vmla.f32 q2, %q14, d10[1] \n" "vmla.f32 q3, %q14, d11[1] \n" // r2 "pld [%4, #128] \n" "vld1.u16 {d12-d13}, [%4]! \n" "vmla.f32 q0, %q15, d9[0] \n" "vmla.f32 q1, %q15, d10[0] \n" "vmla.f32 q2, %q15, d11[0] \n" "vmla.f32 q3, %q15, d8[0] \n" "vshll.u16 q4, d12, #16 \n" "vshll.u16 q5, d13, #16 \n" "vld1.u16 {d12[0]}, [%4] \n" "vmla.f32 q0, %q16, d8[0] \n" "vmla.f32 q1, %q16, d9[0] \n" "vmla.f32 q2, %q16, d10[0] \n" "vmla.f32 q3, %q16, d11[0] \n" "vmla.f32 q0, %q17, d8[1] \n" "vmla.f32 q1, %q17, d9[1] \n" "vshl.u32 d8, d12, #16 \n" "vmla.f32 q2, %q17, d10[1] \n" "vmla.f32 q3, %q17, d11[1] \n" "vmla.f32 q0, %q18, d9[0] \n" "vmla.f32 q1, %q18, d10[0] \n" "vmla.f32 q2, %q18, d11[0] \n" "vmla.f32 q3, %q18, d8[0] \n" "vshrn.u32 d0, q0, #16 \n" "vshrn.u32 d1, q1, #16 \n" "vshrn.u32 d2, q2, #16 \n" "vshrn.u32 d3, q3, #16 \n" "vst1.u16 {d0-d3}, [%0 :64]! \n" : "=r"(outptr0_bf16), // %0 "=r"(outptr0), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2) // %4 : "0"(outptr0_bf16), "1"(outptr0), "2"(r0), "3"(r1), "4"(r2), "w"(_k00), // %10 "w"(_k01), // %11 "w"(_k02), // %12 "w"(_k10), // %13 "w"(_k11), // %14 "w"(_k12), // %15 "w"(_k20), // %16 "w"(_k21), // %17 "w"(_k22) // %18 : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6"); #endif // __aarch64__ } for (; j + 1 < outw; j += 2) { #if __aarch64__ asm volatile( // r0 "prfm pldl1keep, [%2, #64] \n" "ld1 {v0.4h}, [%2], #8 \n" "prfm pldl1keep, [%1, #256] \n" "ld1 {v8.4s, v9.4s}, [%1], #32 \n" // sum0 "shll v0.4s, v0.4h, #16 \n" "fmul v6.4s, %10.4s, v0.s[0] \n" "fmul v7.4s, %10.4s, v0.s[2] \n" "ld1 {v1.h}[0], [%2] \n" "fmla v8.4s, %11.4s, v0.s[1] \n" "fmla v9.4s, %11.4s, v0.s[3] \n" "shll v1.4s, v1.4h, #16 \n" // r1 "prfm pldl1keep, [%3, #64] \n" "ld1 {v2.4h}, [%3], #8 \n" "fmla v6.4s, %12.4s, v0.s[2] \n" "fmla v7.4s, %12.4s, v1.s[0] \n" "shll v2.4s, v2.4h, #16 \n" "fmla v8.4s, %13.4s, v2.s[0] \n" "fmla v9.4s, %13.4s, v2.s[2] \n" "ld1 {v3.h}[0], [%3] \n" "fmla v6.4s, %14.4s, v2.s[1] \n" "fmla v7.4s, %14.4s, v2.s[3] \n" "shll v3.4s, v3.4h, #16 \n" // r2 "prfm pldl1keep, [%4, #64] \n" "ld1 {v0.4h}, [%4], #8 \n" "fmla v8.4s, %15.4s, v2.s[2] \n" "fmla v9.4s, %15.4s, v3.s[0] \n" "shll v0.4s, v0.4h, #16 \n" "fmla v6.4s, %16.4s, v0.s[0] \n" "fmla v7.4s, %16.4s, v0.s[2] \n" "ld1 {v1.h}[0], [%4] \n" "fmla v8.4s, %17.4s, v0.s[1] \n" "fmla v9.4s, %17.4s, v0.s[3] \n" "shll v1.4s, v1.4h, #16 \n" "fmla v6.4s, %18.4s, v0.s[2] \n" "fmla v7.4s, %18.4s, v1.s[0] \n" "fadd v8.4s, v8.4s, v6.4s \n" "fadd v9.4s, v9.4s, v7.4s \n" "shrn v8.4h, v8.4s, #16 \n" "shrn v9.4h, v9.4s, #16 \n" "st1 {v8.4h, v9.4h}, [%0], #16 \n" : "=r"(outptr0_bf16), // %0 "=r"(outptr0), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2) // %4 : "0"(outptr0_bf16), "1"(outptr0), "2"(r0), "3"(r1), "4"(r2), "w"(_k00), // %10 "w"(_k01), // %11 "w"(_k02), // %12 "w"(_k10), // %13 "w"(_k11), // %14 "w"(_k12), // %15 "w"(_k20), // %16 "w"(_k21), // %17 "w"(_k22) // %18 : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9"); #else // __aarch64__ asm volatile( // r0 "pld [%2, #64] \n" "vld1.u16 {d9}, [%2]! \n" "pld [%1, #256] \n" "vld1.f32 {d4-d7}, [%1]! \n" // sum0 "vshll.u16 q4, d9, #16 \n" "vmul.f32 q0, %q10, d8[0] \n" "vmul.f32 q1, %q10, d9[0] \n" "vld1.u16 {d11[]}, [%2] \n" "vmla.f32 q2, %q11, d8[1] \n" "vmla.f32 q3, %q11, d9[1] \n" "vshll.u16 q5, d11, #16 \n" // r1 "pld [%3, #64] \n" "vld1.u16 {d13}, [%3]! \n" "vmla.f32 q0, %q12, d9[0] \n" "vmla.f32 q1, %q12, d10[0] \n" "vshll.u16 q6, d13, #16 \n" "vmla.f32 q2, %q13, d12[0] \n" "vmla.f32 q3, %q13, d13[0] \n" "vld1.u16 {d9[]}, [%3] \n" "vmla.f32 q0, %q14, d12[1] \n" "vmla.f32 q1, %q14, d13[1] \n" "vshll.u16 q4, d9, #16 \n" // r2 "pld [%4, #64] \n" "vld1.u16 {d11}, [%4]! \n" "vmla.f32 q2, %q15, d13[0] \n" "vmla.f32 q3, %q15, d8[0] \n" "vshll.u16 q5, d11, #16 \n" "vmla.f32 q0, %q16, d10[0] \n" "vmla.f32 q1, %q16, d11[0] \n" "vld1.u16 {d13[]}, [%4] \n" "vmla.f32 q2, %q17, d10[1] \n" "vmla.f32 q3, %q17, d11[1] \n" "vshll.u16 q6, d13, #16 \n" "vmla.f32 q0, %q18, d11[0] \n" "vmla.f32 q1, %q18, d12[0] \n" "vadd.f32 q2, q2, q0 \n" "vadd.f32 q3, q3, q1 \n" "vshrn.u32 d2, q2, #16 \n" "vshrn.u32 d3, q3, #16 \n" "vst1.u16 {d2-d3}, [%0 :64]! \n" : "=r"(outptr0_bf16), // %0 "=r"(outptr0), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2) // %4 : "0"(outptr0_bf16), "1"(outptr0), "2"(r0), "3"(r1), "4"(r2), "w"(_k00), // %10 "w"(_k01), // %11 "w"(_k02), // %12 "w"(_k10), // %13 "w"(_k11), // %14 "w"(_k12), // %15 "w"(_k20), // %16 "w"(_k21), // %17 "w"(_k22) // %18 : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6"); #endif // __aarch64__ } for (; j < outw; j++) { float32x4_t _sum0 = vld1q_f32(outptr0); float32x4_t _r0 = vcvt_f32_bf16(vld1_u16(r0)); float32x4_t _r1 = vcvt_f32_bf16(vld1_u16(r1)); float32x4_t _r2 = vcvt_f32_bf16(vld1_u16(r2)); #if __aarch64__ _sum0 = vfmaq_laneq_f32(_sum0, _k00, _r0, 0); _sum0 = vfmaq_laneq_f32(_sum0, _k01, _r0, 1); _sum0 = vfmaq_laneq_f32(_sum0, _k02, _r0, 2); _sum0 = vfmaq_laneq_f32(_sum0, _k10, _r1, 0); _sum0 = vfmaq_laneq_f32(_sum0, _k11, _r1, 1); _sum0 = vfmaq_laneq_f32(_sum0, _k12, _r1, 2); _sum0 = vfmaq_laneq_f32(_sum0, _k20, _r2, 0); _sum0 = vfmaq_laneq_f32(_sum0, _k21, _r2, 1); _sum0 = vfmaq_laneq_f32(_sum0, _k22, _r2, 2); #else _sum0 = vmlaq_lane_f32(_sum0, _k00, vget_low_f32(_r0), 0); _sum0 = vmlaq_lane_f32(_sum0, _k01, vget_low_f32(_r0), 1); _sum0 = vmlaq_lane_f32(_sum0, _k02, vget_high_f32(_r0), 0); _sum0 = vmlaq_lane_f32(_sum0, _k10, vget_low_f32(_r1), 0); _sum0 = vmlaq_lane_f32(_sum0, _k11, vget_low_f32(_r1), 1); _sum0 = vmlaq_lane_f32(_sum0, _k12, vget_high_f32(_r1), 0); _sum0 = vmlaq_lane_f32(_sum0, _k20, vget_low_f32(_r2), 0); _sum0 = vmlaq_lane_f32(_sum0, _k21, vget_low_f32(_r2), 1); _sum0 = vmlaq_lane_f32(_sum0, _k22, vget_high_f32(_r2), 0); #endif vst1_u16(outptr0_bf16, vcvt_bf16_f32(_sum0)); r0 += 2; r1 += 2; r2 += 2; outptr0 += 4; outptr0_bf16 += 4; } r0 += tailstep; r1 += tailstep; r2 += tailstep; } k0 += 9 * 4; } } }
GB_binop__bget_uint8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__bget_uint8) // A.*B function (eWiseMult): GB (_AemultB_01__bget_uint8) // A.*B function (eWiseMult): GB (_AemultB_02__bget_uint8) // A.*B function (eWiseMult): GB (_AemultB_03__bget_uint8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__bget_uint8) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__bget_uint8) // C+=b function (dense accum): GB (_Cdense_accumb__bget_uint8) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bget_uint8) // C=scalar+B GB (_bind1st__bget_uint8) // C=scalar+B' GB (_bind1st_tran__bget_uint8) // C=A+scalar GB (_bind2nd__bget_uint8) // C=A'+scalar GB (_bind2nd_tran__bget_uint8) // C type: uint8_t // A type: uint8_t // B,b type: uint8_t // BinaryOp: cij = GB_BITGET (aij, bij, uint8_t, 8) #define GB_ATYPE \ uint8_t #define GB_BTYPE \ uint8_t #define GB_CTYPE \ uint8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint8_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint8_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_BITGET (x, y, uint8_t, 8) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 1 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BGET || GxB_NO_UINT8 || GxB_NO_BGET_UINT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__bget_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__bget_uint8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__bget_uint8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint8_t uint8_t bwork = (*((uint8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__bget_uint8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__bget_uint8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__bget_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__bget_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__bget_uint8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__bget_uint8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t x = (*((uint8_t *) x_input)) ; uint8_t *Bx = (uint8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint8_t bij = GBX (Bx, p, false) ; Cx [p] = GB_BITGET (x, bij, uint8_t, 8) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__bget_uint8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t *Ax = (uint8_t *) Ax_input ; uint8_t y = (*((uint8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint8_t aij = GBX (Ax, p, false) ; Cx [p] = GB_BITGET (aij, y, uint8_t, 8) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_BITGET (x, aij, uint8_t, 8) ; \ } GrB_Info GB (_bind1st_tran__bget_uint8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t x = (*((const uint8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_BITGET (aij, y, uint8_t, 8) ; \ } GrB_Info GB (_bind2nd_tran__bget_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t y = (*((const uint8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
omp_taskloop_num_tasks.c
// This test is known to be fragile on NetBSD kernel at the moment. // UNSUPPORTED: netbsd // RUN: %libomp-compile-and-run // RUN: %libomp-compile && env KMP_TASKLOOP_MIN_TASKS=1 %libomp-run // These compilers don't support the taskloop construct // UNSUPPORTED: gcc-4, gcc-5, icc-16 // This test is known to be fragile on NetBSD kernel at the moment, // https://bugs.llvm.org/show_bug.cgi?id=42020. // UNSUPPORTED: netbsd /* * Test for taskloop * Method: caculate how many times the iteration space is dispatched * and judge if each dispatch has the requested grainsize * It is possible for two adjacent chunks are executed by the same thread */ #include <stdio.h> #include <omp.h> #include <stdlib.h> #include "omp_testsuite.h" #define CFDMAX_SIZE 1120 int test_omp_taskloop_num_tasks() { int i; int *tids; int *tidsArray; int count; int result = 0; int num_tasks; for (num_tasks = 1; num_tasks < 120; ++num_tasks) { count = 0; tidsArray = (int *)malloc(sizeof(int) * CFDMAX_SIZE); tids = tidsArray; #pragma omp parallel shared(tids) { int i; #pragma omp master #pragma omp taskloop num_tasks(num_tasks) for (i = 0; i < CFDMAX_SIZE; i++) { tids[i] = omp_get_thread_num(); } } for (i = 0; i < CFDMAX_SIZE - 1; ++i) { if (tids[i] != tids[i + 1]) { count++; } } if (count > num_tasks) { fprintf(stderr, "counted too many tasks: (wanted %d, got %d)\n", num_tasks, count); result++; } } return (result==0); } int main() { int i; int num_failed=0; for (i = 0; i < REPETITIONS; i++) { if (!test_omp_taskloop_num_tasks()) { num_failed++; } } return num_failed; }
threshold.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % TTTTT H H RRRR EEEEE SSSSS H H OOO L DDDD % % T H H R R E SS H H O O L D D % % T HHHHH RRRR EEE SSS HHHHH O O L D D % % T H H R R E SS H H O O L D D % % T H H R R EEEEE SSSSS H H OOO LLLLL DDDD % % % % % % MagickCore Image Threshold Methods % % % % Software Design % % Cristy % % October 1996 % % % % % % Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/property.h" #include "magick/blob.h" #include "magick/cache-view.h" #include "magick/color.h" #include "magick/color-private.h" #include "magick/colormap.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/configure.h" #include "magick/constitute.h" #include "magick/decorate.h" #include "magick/draw.h" #include "magick/enhance.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/effect.h" #include "magick/fx.h" #include "magick/gem.h" #include "magick/geometry.h" #include "magick/image-private.h" #include "magick/list.h" #include "magick/log.h" #include "magick/memory_.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/montage.h" #include "magick/option.h" #include "magick/pixel-private.h" #include "magick/quantize.h" #include "magick/quantum.h" #include "magick/random_.h" #include "magick/random-private.h" #include "magick/resize.h" #include "magick/resource_.h" #include "magick/segment.h" #include "magick/shear.h" #include "magick/signature-private.h" #include "magick/string_.h" #include "magick/string-private.h" #include "magick/thread-private.h" #include "magick/threshold.h" #include "magick/transform.h" #include "magick/xml-tree.h" /* Define declarations. */ #define ThresholdsFilename "thresholds.xml" /* Typedef declarations. */ struct _ThresholdMap { char *map_id, *description; size_t width, height; ssize_t divisor, *levels; }; /* Static declarations. */ static const char *MinimalThresholdMap = "<?xml version=\"1.0\"?>" "<thresholds>" " <threshold map=\"threshold\" alias=\"1x1\">" " <description>Threshold 1x1 (non-dither)</description>" " <levels width=\"1\" height=\"1\" divisor=\"2\">" " 1" " </levels>" " </threshold>" " <threshold map=\"checks\" alias=\"2x1\">" " <description>Checkerboard 2x1 (dither)</description>" " <levels width=\"2\" height=\"2\" divisor=\"3\">" " 1 2" " 2 1" " </levels>" " </threshold>" "</thresholds>"; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A d a p t i v e T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AdaptiveThresholdImage() selects an individual threshold for each pixel % based on the range of intensity values in its local neighborhood. This % allows for thresholding of an image whose global intensity histogram % doesn't contain distinctive peaks. % % The format of the AdaptiveThresholdImage method is: % % Image *AdaptiveThresholdImage(const Image *image, % const size_t width,const size_t height, % const ssize_t offset,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o width: the width of the local neighborhood. % % o height: the height of the local neighborhood. % % o offset: the mean offset. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AdaptiveThresholdImage(const Image *image, const size_t width,const size_t height,const ssize_t offset, ExceptionInfo *exception) { #define ThresholdImageTag "Threshold/Image" CacheView *image_view, *threshold_view; Image *threshold_image; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket zero; MagickRealType number_pixels; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); threshold_image=CloneImage(image,0,0,MagickTrue,exception); if (threshold_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(threshold_image,DirectClass) == MagickFalse) { InheritException(exception,&threshold_image->exception); threshold_image=DestroyImage(threshold_image); return((Image *) NULL); } /* Local adaptive threshold. */ status=MagickTrue; progress=0; GetMagickPixelPacket(image,&zero); number_pixels=(MagickRealType) (width*height); image_view=AcquireVirtualCacheView(image,exception); threshold_view=AcquireAuthenticCacheView(threshold_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,threshold_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; MagickPixelPacket channel_bias, channel_sum; register const IndexPacket *magick_restrict indexes; register const PixelPacket *magick_restrict p, *magick_restrict r; register IndexPacket *magick_restrict threshold_indexes; register PixelPacket *magick_restrict q; register ssize_t x; ssize_t u, v; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-((ssize_t) width/2L),y-(ssize_t) height/2L,image->columns+width,height,exception); q=GetCacheViewAuthenticPixels(threshold_view,0,y,threshold_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); threshold_indexes=GetCacheViewAuthenticIndexQueue(threshold_view); channel_bias=zero; channel_sum=zero; r=p; for (v=0; v < (ssize_t) height; v++) { for (u=0; u < (ssize_t) width; u++) { if (u == (ssize_t) (width-1)) { channel_bias.red+=r[u].red; channel_bias.green+=r[u].green; channel_bias.blue+=r[u].blue; channel_bias.opacity+=r[u].opacity; if (image->colorspace == CMYKColorspace) channel_bias.index=(MagickRealType) GetPixelIndex(indexes+(r-p)+u); } channel_sum.red+=r[u].red; channel_sum.green+=r[u].green; channel_sum.blue+=r[u].blue; channel_sum.opacity+=r[u].opacity; if (image->colorspace == CMYKColorspace) channel_sum.index=(MagickRealType) GetPixelIndex(indexes+(r-p)+u); } r+=image->columns+width; } for (x=0; x < (ssize_t) image->columns; x++) { MagickPixelPacket mean; mean=zero; r=p; channel_sum.red-=channel_bias.red; channel_sum.green-=channel_bias.green; channel_sum.blue-=channel_bias.blue; channel_sum.opacity-=channel_bias.opacity; channel_sum.index-=channel_bias.index; channel_bias=zero; for (v=0; v < (ssize_t) height; v++) { channel_bias.red+=r[0].red; channel_bias.green+=r[0].green; channel_bias.blue+=r[0].blue; channel_bias.opacity+=r[0].opacity; if (image->colorspace == CMYKColorspace) channel_bias.index=(MagickRealType) GetPixelIndex(indexes+x+(r-p)+0); channel_sum.red+=r[width-1].red; channel_sum.green+=r[width-1].green; channel_sum.blue+=r[width-1].blue; channel_sum.opacity+=r[width-1].opacity; if (image->colorspace == CMYKColorspace) channel_sum.index=(MagickRealType) GetPixelIndex(indexes+x+(r-p)+ width-1); r+=image->columns+width; } mean.red=(MagickRealType) (channel_sum.red/number_pixels+offset); mean.green=(MagickRealType) (channel_sum.green/number_pixels+offset); mean.blue=(MagickRealType) (channel_sum.blue/number_pixels+offset); mean.opacity=(MagickRealType) (channel_sum.opacity/number_pixels+offset); if (image->colorspace == CMYKColorspace) mean.index=(MagickRealType) (channel_sum.index/number_pixels+offset); SetPixelRed(q,((MagickRealType) GetPixelRed(q) <= mean.red) ? 0 : QuantumRange); SetPixelGreen(q,((MagickRealType) GetPixelGreen(q) <= mean.green) ? 0 : QuantumRange); SetPixelBlue(q,((MagickRealType) GetPixelBlue(q) <= mean.blue) ? 0 : QuantumRange); SetPixelOpacity(q,((MagickRealType) GetPixelOpacity(q) <= mean.opacity) ? 0 : QuantumRange); if (image->colorspace == CMYKColorspace) SetPixelIndex(threshold_indexes+x,(((MagickRealType) GetPixelIndex( threshold_indexes+x) <= mean.index) ? 0 : QuantumRange)); p++; q++; } sync=SyncCacheViewAuthenticPixels(threshold_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_AdaptiveThresholdImage) #endif proceed=SetImageProgress(image,ThresholdImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } threshold_view=DestroyCacheView(threshold_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) threshold_image=DestroyImage(threshold_image); return(threshold_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A u t o T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AutoThresholdImage() automatically selects a threshold and replaces each % pixel in the image with a black pixel if the image intentsity is less than % the selected threshold otherwise white. % % The format of the AutoThresholdImage method is: % % MagickBooleanType AutoThresholdImage(Image *image, % const AutoThresholdMethod method,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: The image to auto-threshold. % % o method: choose from Kapur, OTSU, or Triangle. % % o exception: return any errors or warnings in this structure. % */ static double KapurThreshold(const Image *image,const double *histogram, ExceptionInfo *exception) { #define MaxIntensity 255 double *black_entropy, *cumulative_histogram, entropy, epsilon, maximum_entropy, *white_entropy; register ssize_t i, j; size_t threshold; /* Compute optimal threshold from the entopy of the histogram. */ cumulative_histogram=(double *) AcquireQuantumMemory(MaxIntensity+1UL, sizeof(*cumulative_histogram)); black_entropy=(double *) AcquireQuantumMemory(MaxIntensity+1UL, sizeof(*black_entropy)); white_entropy=(double *) AcquireQuantumMemory(MaxIntensity+1UL, sizeof(*white_entropy)); if ((cumulative_histogram == (double *) NULL) || (black_entropy == (double *) NULL) || (white_entropy == (double *) NULL)) { if (white_entropy != (double *) NULL) white_entropy=(double *) RelinquishMagickMemory(white_entropy); if (black_entropy != (double *) NULL) black_entropy=(double *) RelinquishMagickMemory(black_entropy); if (cumulative_histogram != (double *) NULL) cumulative_histogram=(double *) RelinquishMagickMemory(cumulative_histogram); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(-1.0); } /* Entropy for black and white parts of the histogram. */ cumulative_histogram[0]=histogram[0]; for (i=1; i <= MaxIntensity; i++) cumulative_histogram[i]=cumulative_histogram[i-1]+histogram[i]; epsilon=MagickMinimumValue; for (j=0; j <= MaxIntensity; j++) { /* Black entropy. */ black_entropy[j]=0.0; if (cumulative_histogram[j] > epsilon) { entropy=0.0; for (i=0; i <= j; i++) if (histogram[i] > epsilon) entropy-=histogram[i]/cumulative_histogram[j]* log(histogram[i]/cumulative_histogram[j]); black_entropy[j]=entropy; } /* White entropy. */ white_entropy[j]=0.0; if ((1.0-cumulative_histogram[j]) > epsilon) { entropy=0.0; for (i=j+1; i <= MaxIntensity; i++) if (histogram[i] > epsilon) entropy-=histogram[i]/(1.0-cumulative_histogram[j])* log(histogram[i]/(1.0-cumulative_histogram[j])); white_entropy[j]=entropy; } } /* Find histogram bin with maximum entropy. */ maximum_entropy=black_entropy[0]+white_entropy[0]; threshold=0; for (j=1; j <= MaxIntensity; j++) if ((black_entropy[j]+white_entropy[j]) > maximum_entropy) { maximum_entropy=black_entropy[j]+white_entropy[j]; threshold=(size_t) j; } /* Free resources. */ white_entropy=(double *) RelinquishMagickMemory(white_entropy); black_entropy=(double *) RelinquishMagickMemory(black_entropy); cumulative_histogram=(double *) RelinquishMagickMemory(cumulative_histogram); return(100.0*threshold/MaxIntensity); } static double OTSUThreshold(const Image *image,const double *histogram, ExceptionInfo *exception) { double max_sigma, *myu, *omega, *probability, *sigma, threshold; register ssize_t i; /* Compute optimal threshold from maximization of inter-class variance. */ myu=(double *) AcquireQuantumMemory(MaxIntensity+1UL,sizeof(*myu)); omega=(double *) AcquireQuantumMemory(MaxIntensity+1UL,sizeof(*omega)); probability=(double *) AcquireQuantumMemory(MaxIntensity+1UL, sizeof(*probability)); sigma=(double *) AcquireQuantumMemory(MaxIntensity+1UL,sizeof(*sigma)); if ((myu == (double *) NULL) || (omega == (double *) NULL) || (probability == (double *) NULL) || (sigma == (double *) NULL)) { if (sigma != (double *) NULL) sigma=(double *) RelinquishMagickMemory(sigma); if (probability != (double *) NULL) probability=(double *) RelinquishMagickMemory(probability); if (omega != (double *) NULL) omega=(double *) RelinquishMagickMemory(omega); if (myu != (double *) NULL) myu=(double *) RelinquishMagickMemory(myu); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(-1.0); } /* Calculate probability density. */ for (i=0; i <= (ssize_t) MaxIntensity; i++) probability[i]=histogram[i]; /* Generate probability of graylevels and mean value for separation. */ omega[0]=probability[0]; myu[0]=0.0; for (i=1; i <= (ssize_t) MaxIntensity; i++) { omega[i]=omega[i-1]+probability[i]; myu[i]=myu[i-1]+i*probability[i]; } /* Sigma maximization: inter-class variance and compute optimal threshold. */ threshold=0; max_sigma=0.0; for (i=0; i < (ssize_t) MaxIntensity; i++) { sigma[i]=0.0; if ((omega[i] != 0.0) && (omega[i] != 1.0)) sigma[i]=pow(myu[MaxIntensity]*omega[i]-myu[i],2.0)/(omega[i]*(1.0- omega[i])); if (sigma[i] > max_sigma) { max_sigma=sigma[i]; threshold=(double) i; } } /* Free resources. */ myu=(double *) RelinquishMagickMemory(myu); omega=(double *) RelinquishMagickMemory(omega); probability=(double *) RelinquishMagickMemory(probability); sigma=(double *) RelinquishMagickMemory(sigma); return(100.0*threshold/MaxIntensity); } static double TriangleThreshold(const Image *image,const double *histogram, ExceptionInfo *exception) { double a, b, c, count, distance, inverse_ratio, max_distance, segment, x1, x2, y1, y2; register ssize_t i; ssize_t end, max, start, threshold; /* Compute optimal threshold with triangle algorithm. */ (void) exception; start=0; /* find start bin, first bin not zero count */ for (i=0; i <= (ssize_t) MaxIntensity; i++) if (histogram[i] > 0.0) { start=i; break; } end=0; /* find end bin, last bin not zero count */ for (i=(ssize_t) MaxIntensity; i >= 0; i--) if (histogram[i] > 0.0) { end=i; break; } max=0; /* find max bin, bin with largest count */ count=0.0; for (i=0; i <= (ssize_t) MaxIntensity; i++) if (histogram[i] > count) { max=i; count=histogram[i]; } /* Compute threshold at split point. */ x1=(double) max; y1=histogram[max]; x2=(double) end; if ((max-start) >= (end-max)) x2=(double) start; y2=0.0; a=y1-y2; b=x2-x1; c=(-1.0)*(a*x1+b*y1); inverse_ratio=1.0/sqrt(a*a+b*b+c*c); threshold=0; max_distance=0.0; if (x2 == (double) start) for (i=start; i < max; i++) { segment=inverse_ratio*(a*i+b*histogram[i]+c); distance=sqrt(segment*segment); if ((distance > max_distance) && (segment > 0.0)) { threshold=i; max_distance=distance; } } else for (i=end; i > max; i--) { segment=inverse_ratio*(a*i+b*histogram[i]+c); distance=sqrt(segment*segment); if ((distance > max_distance) && (segment < 0.0)) { threshold=i; max_distance=distance; } } return(100.0*threshold/MaxIntensity); } MagickExport MagickBooleanType AutoThresholdImage(Image *image, const AutoThresholdMethod method,ExceptionInfo *exception) { CacheView *image_view; char property[MagickPathExtent]; double gamma, *histogram, sum, threshold; MagickBooleanType status; register ssize_t i; ssize_t y; /* Form histogram. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); histogram=(double *) AcquireQuantumMemory(MaxIntensity+1UL, sizeof(*histogram)); if (histogram == (double *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); status=MagickTrue; (void) memset(histogram,0,(MaxIntensity+1UL)*sizeof(*histogram)); image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *magick_restrict p; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { double intensity = GetPixelIntensity(image,p); histogram[ScaleQuantumToChar(ClampToQuantum(intensity))]++; p++; } } image_view=DestroyCacheView(image_view); /* Normalize histogram. */ sum=0.0; for (i=0; i <= (ssize_t) MaxIntensity; i++) sum+=histogram[i]; gamma=PerceptibleReciprocal(sum); for (i=0; i <= (ssize_t) MaxIntensity; i++) histogram[i]=gamma*histogram[i]; /* Discover threshold from histogram. */ switch (method) { case KapurThresholdMethod: { threshold=KapurThreshold(image,histogram,exception); break; } case OTSUThresholdMethod: default: { threshold=OTSUThreshold(image,histogram,exception); break; } case TriangleThresholdMethod: { threshold=TriangleThreshold(image,histogram,exception); break; } } histogram=(double *) RelinquishMagickMemory(histogram); if (threshold < 0.0) status=MagickFalse; if (status == MagickFalse) return(MagickFalse); /* Threshold image. */ (void) FormatLocaleString(property,MagickPathExtent,"%g%%",threshold); (void) SetImageProperty(image,"auto-threshold:threshold",property); return(BilevelImage(image,QuantumRange*threshold/100.0)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % B i l e v e l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % BilevelImage() changes the value of individual pixels based on the % intensity of each pixel channel. The result is a high-contrast image. % % More precisely each channel value of the image is 'thresholded' so that if % it is equal to or less than the given value it is set to zero, while any % value greater than that give is set to it maximum or QuantumRange. % % This function is what is used to implement the "-threshold" operator for % the command line API. % % If the default channel setting is given the image is thresholded using just % the gray 'intensity' of the image, rather than the individual channels. % % The format of the BilevelImageChannel method is: % % MagickBooleanType BilevelImage(Image *image,const double threshold) % MagickBooleanType BilevelImageChannel(Image *image, % const ChannelType channel,const double threshold) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel type. % % o threshold: define the threshold values. % % Aside: You can get the same results as operator using LevelImageChannels() % with the 'threshold' value for both the black_point and the white_point. % */ MagickExport MagickBooleanType BilevelImage(Image *image,const double threshold) { MagickBooleanType status; status=BilevelImageChannel(image,DefaultChannels,threshold); return(status); } MagickExport MagickBooleanType BilevelImageChannel(Image *image, const ChannelType channel,const double threshold) { #define ThresholdImageTag "Threshold/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); if (IsGrayColorspace(image->colorspace) != MagickFalse) (void) SetImageColorspace(image,sRGBColorspace); /* Bilevel threshold image. */ status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); if ((channel & SyncChannels) != 0) { for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(q,GetPixelIntensity(image,q) <= threshold ? 0 : QuantumRange); SetPixelGreen(q,GetPixelRed(q)); SetPixelBlue(q,GetPixelRed(q)); q++; } } else for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) SetPixelRed(q,(MagickRealType) GetPixelRed(q) <= threshold ? 0 : QuantumRange); if ((channel & GreenChannel) != 0) SetPixelGreen(q,(MagickRealType) GetPixelGreen(q) <= threshold ? 0 : QuantumRange); if ((channel & BlueChannel) != 0) SetPixelBlue(q,(MagickRealType) GetPixelBlue(q) <= threshold ? 0 : QuantumRange); if ((channel & OpacityChannel) != 0) { if (image->matte == MagickFalse) SetPixelOpacity(q,(MagickRealType) GetPixelOpacity(q) <= threshold ? 0 : QuantumRange); else SetPixelAlpha(q,(MagickRealType) GetPixelAlpha(q) <= threshold ? OpaqueOpacity : TransparentOpacity); } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,(MagickRealType) GetPixelIndex(indexes+x) <= threshold ? 0 : QuantumRange); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_BilevelImageChannel) #endif proceed=SetImageProgress(image,ThresholdImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % B l a c k T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % BlackThresholdImage() is like ThresholdImage() but forces all pixels below % the threshold into black while leaving all pixels at or above the threshold % unchanged. % % The format of the BlackThresholdImage method is: % % MagickBooleanType BlackThresholdImage(Image *image,const char *threshold) % MagickBooleanType BlackThresholdImageChannel(Image *image, % const ChannelType channel,const char *threshold, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel or channels to be thresholded. % % o threshold: Define the threshold value. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType BlackThresholdImage(Image *image, const char *threshold) { MagickBooleanType status; status=BlackThresholdImageChannel(image,DefaultChannels,threshold, &image->exception); return(status); } MagickExport MagickBooleanType BlackThresholdImageChannel(Image *image, const ChannelType channel,const char *thresholds,ExceptionInfo *exception) { #define ThresholdImageTag "Threshold/Image" CacheView *image_view; GeometryInfo geometry_info; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket threshold; MagickStatusType flags; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (thresholds == (const char *) NULL) return(MagickTrue); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); GetMagickPixelPacket(image,&threshold); flags=ParseGeometry(thresholds,&geometry_info); threshold.red=geometry_info.rho; threshold.green=geometry_info.sigma; if ((flags & SigmaValue) == 0) threshold.green=threshold.red; threshold.blue=geometry_info.xi; if ((flags & XiValue) == 0) threshold.blue=threshold.red; threshold.opacity=geometry_info.psi; if ((flags & PsiValue) == 0) threshold.opacity=threshold.red; threshold.index=geometry_info.chi; if ((flags & ChiValue) == 0) threshold.index=threshold.red; if ((flags & PercentValue) != 0) { threshold.red*=(MagickRealType) (QuantumRange/100.0); threshold.green*=(MagickRealType) (QuantumRange/100.0); threshold.blue*=(MagickRealType) (QuantumRange/100.0); threshold.opacity*=(MagickRealType) (QuantumRange/100.0); threshold.index*=(MagickRealType) (QuantumRange/100.0); } if ((IsMagickGray(&threshold) == MagickFalse) && (IsGrayColorspace(image->colorspace) != MagickFalse)) (void) SetImageColorspace(image,sRGBColorspace); /* Black threshold image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if (((channel & RedChannel) != 0) && ((MagickRealType) GetPixelRed(q) < threshold.red)) SetPixelRed(q,0); if (((channel & GreenChannel) != 0) && ((MagickRealType) GetPixelGreen(q) < threshold.green)) SetPixelGreen(q,0); if (((channel & BlueChannel) != 0) && ((MagickRealType) GetPixelBlue(q) < threshold.blue)) SetPixelBlue(q,0); if (((channel & OpacityChannel) != 0) && ((MagickRealType) GetPixelOpacity(q) < threshold.opacity)) SetPixelOpacity(q,0); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace) && ((MagickRealType) GetPixelIndex(indexes+x) < threshold.index)) SetPixelIndex(indexes+x,0); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_BlackThresholdImageChannel) #endif proceed=SetImageProgress(image,ThresholdImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l a m p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClampImage() set each pixel whose value is below zero to zero and any the % pixel whose value is above the quantum range to the quantum range (e.g. % 65535) otherwise the pixel value remains unchanged. % % The format of the ClampImageChannel method is: % % MagickBooleanType ClampImage(Image *image) % MagickBooleanType ClampImageChannel(Image *image, % const ChannelType channel) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel type. % */ MagickExport MagickBooleanType ClampImage(Image *image) { MagickBooleanType status; status=ClampImageChannel(image,DefaultChannels); return(status); } MagickExport MagickBooleanType ClampImageChannel(Image *image, const ChannelType channel) { #define ClampImageTag "Clamp/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) { register ssize_t i; register PixelPacket *magick_restrict q; q=image->colormap; for (i=0; i < (ssize_t) image->colors; i++) { SetPixelRed(q,ClampPixel((MagickRealType) GetPixelRed(q))); SetPixelGreen(q,ClampPixel((MagickRealType) GetPixelGreen(q))); SetPixelBlue(q,ClampPixel((MagickRealType) GetPixelBlue(q))); SetPixelOpacity(q,ClampPixel((MagickRealType) GetPixelOpacity(q))); q++; } return(SyncImage(image)); } /* Clamp image. */ status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) SetPixelRed(q,ClampPixel((MagickRealType) GetPixelRed(q))); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampPixel((MagickRealType) GetPixelGreen(q))); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampPixel((MagickRealType) GetPixelBlue(q))); if ((channel & OpacityChannel) != 0) SetPixelOpacity(q,ClampPixel((MagickRealType) GetPixelOpacity(q))); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,ClampPixel((MagickRealType) GetPixelIndex( indexes+x))); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ClampImageChannel) #endif proceed=SetImageProgress(image,ClampImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y T h r e s h o l d M a p % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyThresholdMap() de-allocate the given ThresholdMap % % The format of the ListThresholdMaps method is: % % ThresholdMap *DestroyThresholdMap(Threshold *map) % % A description of each parameter follows. % % o map: Pointer to the Threshold map to destroy % */ MagickExport ThresholdMap *DestroyThresholdMap(ThresholdMap *map) { assert(map != (ThresholdMap *) NULL); if (map->map_id != (char *) NULL) map->map_id=DestroyString(map->map_id); if (map->description != (char *) NULL) map->description=DestroyString(map->description); if (map->levels != (ssize_t *) NULL) map->levels=(ssize_t *) RelinquishMagickMemory(map->levels); map=(ThresholdMap *) RelinquishMagickMemory(map); return(map); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t T h r e s h o l d M a p F i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetThresholdMapFile() look for a given threshold map name or alias in the % given XML file data, and return the allocated the map when found. % % The format of the ListThresholdMaps method is: % % ThresholdMap *GetThresholdMap(const char *xml,const char *filename, % const char *map_id,ExceptionInfo *exception) % % A description of each parameter follows. % % o xml: The threshold map list in XML format. % % o filename: The threshold map XML filename. % % o map_id: ID of the map to look for in XML list. % % o exception: return any errors or warnings in this structure. % */ MagickExport ThresholdMap *GetThresholdMapFile(const char *xml, const char *filename,const char *map_id,ExceptionInfo *exception) { const char *attribute, *content; double value; ThresholdMap *map; XMLTreeInfo *description, *levels, *threshold, *thresholds; map = (ThresholdMap *) NULL; (void) LogMagickEvent(ConfigureEvent,GetMagickModule(), "Loading threshold map file \"%s\" ...",filename); thresholds=NewXMLTree(xml,exception); if ( thresholds == (XMLTreeInfo *) NULL ) return(map); for (threshold = GetXMLTreeChild(thresholds,"threshold"); threshold != (XMLTreeInfo *) NULL; threshold = GetNextXMLTreeTag(threshold) ) { attribute=GetXMLTreeAttribute(threshold, "map"); if ((attribute != (char *) NULL) && (LocaleCompare(map_id,attribute) == 0)) break; attribute=GetXMLTreeAttribute(threshold, "alias"); if ((attribute != (char *) NULL) && (LocaleCompare(map_id,attribute) == 0)) break; } if (threshold == (XMLTreeInfo *) NULL) { thresholds=DestroyXMLTree(thresholds); return(map); } description=GetXMLTreeChild(threshold,"description"); if (description == (XMLTreeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingElement", "<description>, map \"%s\"", map_id); thresholds=DestroyXMLTree(thresholds); return(map); } levels=GetXMLTreeChild(threshold,"levels"); if (levels == (XMLTreeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingElement", "<levels>, map \"%s\"", map_id); thresholds=DestroyXMLTree(thresholds); return(map); } /* The map has been found -- allocate a Threshold Map to return */ map=(ThresholdMap *) AcquireMagickMemory(sizeof(ThresholdMap)); if (map == (ThresholdMap *) NULL) ThrowFatalException(ResourceLimitFatalError,"UnableToAcquireThresholdMap"); map->map_id=(char *) NULL; map->description=(char *) NULL; map->levels=(ssize_t *) NULL; /* Assign basic attributeibutes. */ attribute=GetXMLTreeAttribute(threshold,"map"); if (attribute != (char *) NULL) map->map_id=ConstantString(attribute); content=GetXMLTreeContent(description); if (content != (char *) NULL) map->description=ConstantString(content); attribute=GetXMLTreeAttribute(levels,"width"); if (attribute == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingAttribute", "<levels width>, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } map->width=StringToUnsignedLong(attribute); if (map->width == 0) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidAttribute", "<levels width>, map \"%s\"", map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } attribute=GetXMLTreeAttribute(levels,"height"); if (attribute == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingAttribute", "<levels height>, map \"%s\"", map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } map->height=StringToUnsignedLong(attribute); if (map->height == 0) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidAttribute", "<levels height>, map \"%s\"", map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } attribute=GetXMLTreeAttribute(levels, "divisor"); if (attribute == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingAttribute", "<levels divisor>, map \"%s\"", map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } map->divisor=(ssize_t) StringToLong(attribute); if (map->divisor < 2) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidAttribute", "<levels divisor>, map \"%s\"", map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } /* Allocate theshold levels array. */ content=GetXMLTreeContent(levels); if (content == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingContent", "<levels>, map \"%s\"", map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } map->levels=(ssize_t *) AcquireQuantumMemory((size_t) map->width,map->height* sizeof(*map->levels)); if (map->levels == (ssize_t *) NULL) ThrowFatalException(ResourceLimitFatalError,"UnableToAcquireThresholdMap"); { char *p; register ssize_t i; /* Parse levels into integer array. */ for (i=0; i< (ssize_t) (map->width*map->height); i++) { map->levels[i]=(ssize_t) strtol(content,&p,10); if (p == content) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidContent", "<level> too few values, map \"%s\"", map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } if ((map->levels[i] < 0) || (map->levels[i] > map->divisor)) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidContent", "<level> %.20g out of range, map \"%s\"", (double) map->levels[i],map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } content=p; } value=(double) strtol(content,&p,10); (void) value; if (p != content) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidContent", "<level> too many values, map \"%s\"", map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } } thresholds=DestroyXMLTree(thresholds); return(map); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t T h r e s h o l d M a p % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetThresholdMap() load and search one or more threshold map files for the % a map matching the given name or aliase. % % The format of the GetThresholdMap method is: % % ThresholdMap *GetThresholdMap(const char *map_id, % ExceptionInfo *exception) % % A description of each parameter follows. % % o map_id: ID of the map to look for. % % o exception: return any errors or warnings in this structure. % */ MagickExport ThresholdMap *GetThresholdMap(const char *map_id, ExceptionInfo *exception) { const StringInfo *option; LinkedListInfo *options; ThresholdMap *map; map=GetThresholdMapFile(MinimalThresholdMap,"built-in",map_id,exception); if (map != (ThresholdMap *) NULL) return(map); options=GetConfigureOptions(ThresholdsFilename,exception); option=(const StringInfo *) GetNextValueInLinkedList(options); while (option != (const StringInfo *) NULL) { map=GetThresholdMapFile((const char *) GetStringInfoDatum(option), GetStringInfoPath(option),map_id,exception); if (map != (ThresholdMap *) NULL) break; option=(const StringInfo *) GetNextValueInLinkedList(options); } options=DestroyConfigureOptions(options); return(map); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + L i s t T h r e s h o l d M a p F i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ListThresholdMapFile() lists the threshold maps and their descriptions % in the given XML file data. % % The format of the ListThresholdMaps method is: % % MagickBooleanType ListThresholdMaps(FILE *file,const char*xml, % const char *filename,ExceptionInfo *exception) % % A description of each parameter follows. % % o file: An pointer to the output FILE. % % o xml: The threshold map list in XML format. % % o filename: The threshold map XML filename. % % o exception: return any errors or warnings in this structure. % */ MagickBooleanType ListThresholdMapFile(FILE *file,const char *xml, const char *filename,ExceptionInfo *exception) { XMLTreeInfo *thresholds,*threshold,*description; const char *map,*alias,*content; assert( xml != (char *) NULL ); assert( file != (FILE *) NULL ); (void) LogMagickEvent(ConfigureEvent,GetMagickModule(), "Loading threshold map file \"%s\" ...",filename); thresholds=NewXMLTree(xml,exception); if ( thresholds == (XMLTreeInfo *) NULL ) return(MagickFalse); (void) FormatLocaleFile(file,"%-16s %-12s %s\n","Map","Alias","Description"); (void) FormatLocaleFile(file, "----------------------------------------------------\n"); for( threshold = GetXMLTreeChild(thresholds,"threshold"); threshold != (XMLTreeInfo *) NULL; threshold = GetNextXMLTreeTag(threshold) ) { map = GetXMLTreeAttribute(threshold, "map"); if (map == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingAttribute", "<map>"); thresholds=DestroyXMLTree(thresholds); return(MagickFalse); } alias = GetXMLTreeAttribute(threshold, "alias"); /* alias is optional, no if test needed */ description=GetXMLTreeChild(threshold,"description"); if ( description == (XMLTreeInfo *) NULL ) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingElement", "<description>, map \"%s\"", map); thresholds=DestroyXMLTree(thresholds); return(MagickFalse); } content=GetXMLTreeContent(description); if ( content == (char *) NULL ) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingContent", "<description>, map \"%s\"", map); thresholds=DestroyXMLTree(thresholds); return(MagickFalse); } (void) FormatLocaleFile(file,"%-16s %-12s %s\n",map,alias ? alias : "", content); } thresholds=DestroyXMLTree(thresholds); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L i s t T h r e s h o l d M a p s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ListThresholdMaps() lists the threshold maps and their descriptions % as defined by "threshold.xml" to a file. % % The format of the ListThresholdMaps method is: % % MagickBooleanType ListThresholdMaps(FILE *file,ExceptionInfo *exception) % % A description of each parameter follows. % % o file: An pointer to the output FILE. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType ListThresholdMaps(FILE *file, ExceptionInfo *exception) { const StringInfo *option; LinkedListInfo *options; MagickStatusType status; status=MagickTrue; if (file == (FILE *) NULL) file=stdout; options=GetConfigureOptions(ThresholdsFilename,exception); (void) FormatLocaleFile(file, "\n Threshold Maps for Ordered Dither Operations\n"); option=(const StringInfo *) GetNextValueInLinkedList(options); while (option != (const StringInfo *) NULL) { (void) FormatLocaleFile(file,"\nPath: %s\n\n",GetStringInfoPath(option)); status&=ListThresholdMapFile(file,(const char *) GetStringInfoDatum(option), GetStringInfoPath(option),exception); option=(const StringInfo *) GetNextValueInLinkedList(options); } options=DestroyConfigureOptions(options); return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % O r d e r e d D i t h e r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % OrderedDitherImage() uses the ordered dithering technique of reducing color % images to monochrome using positional information to retain as much % information as possible. % % WARNING: This function is deprecated, and is now just a call to % the more more powerful OrderedPosterizeImage(); function. % % The format of the OrderedDitherImage method is: % % MagickBooleanType OrderedDitherImage(Image *image) % MagickBooleanType OrderedDitherImageChannel(Image *image, % const ChannelType channel,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel or channels to be thresholded. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType OrderedDitherImage(Image *image) { MagickBooleanType status; status=OrderedDitherImageChannel(image,DefaultChannels,&image->exception); return(status); } MagickExport MagickBooleanType OrderedDitherImageChannel(Image *image, const ChannelType channel,ExceptionInfo *exception) { MagickBooleanType status; /* Call the augumented function OrderedPosterizeImage() */ status=OrderedPosterizeImageChannel(image,channel,"o8x8",exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % O r d e r e d P o s t e r i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % OrderedPosterizeImage() will perform a ordered dither based on a number % of pre-defined dithering threshold maps, but over multiple intensity % levels, which can be different for different channels, according to the % input argument. % % The format of the OrderedPosterizeImage method is: % % MagickBooleanType OrderedPosterizeImage(Image *image, % const char *threshold_map,ExceptionInfo *exception) % MagickBooleanType OrderedPosterizeImageChannel(Image *image, % const ChannelType channel,const char *threshold_map, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel or channels to be thresholded. % % o threshold_map: A string containing the name of the threshold dither % map to use, followed by zero or more numbers representing the number % of color levels tho dither between. % % Any level number less than 2 will be equivalent to 2, and means only % binary dithering will be applied to each color channel. % % No numbers also means a 2 level (bitmap) dither will be applied to all % channels, while a single number is the number of levels applied to each % channel in sequence. More numbers will be applied in turn to each of % the color channels. % % For example: "o3x3,6" will generate a 6 level posterization of the % image with a ordered 3x3 diffused pixel dither being applied between % each level. While checker,8,8,4 will produce a 332 colormaped image % with only a single checkerboard hash pattern (50% grey) between each % color level, to basically double the number of color levels with % a bare minimim of dithering. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType OrderedPosterizeImage(Image *image, const char *threshold_map,ExceptionInfo *exception) { MagickBooleanType status; status=OrderedPosterizeImageChannel(image,DefaultChannels,threshold_map, exception); return(status); } MagickExport MagickBooleanType OrderedPosterizeImageChannel(Image *image, const ChannelType channel,const char *threshold_map,ExceptionInfo *exception) { #define DitherImageTag "Dither/Image" CacheView *image_view; LongPixelPacket levels; MagickBooleanType status; MagickOffsetType progress; ssize_t y; ThresholdMap *map; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (threshold_map == (const char *) NULL) return(MagickTrue); { char token[MaxTextExtent]; register const char *p; p=(char *)threshold_map; while (((isspace((int) ((unsigned char) *p)) != 0) || (*p == ',')) && (*p != '\0')) p++; threshold_map=p; while (((isspace((int) ((unsigned char) *p)) == 0) && (*p != ',')) && (*p != '\0')) { if ((p-threshold_map) >= (MaxTextExtent-1)) break; token[p-threshold_map] = *p; p++; } token[p-threshold_map] = '\0'; map = GetThresholdMap(token, exception); if ( map == (ThresholdMap *) NULL ) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : '%s'","ordered-dither",threshold_map); return(MagickFalse); } } /* Set channel levels from extra comma separated arguments Default to 2, the single value given, or individual channel values */ #if 1 { /* parse directly as a comma separated list of integers */ char *p; p = strchr((char *) threshold_map,','); if ( p != (char *) NULL && isdigit((int) ((unsigned char) *(++p))) ) levels.index = (unsigned int) strtoul(p, &p, 10); else levels.index = 2; levels.red = ((channel & RedChannel ) != 0) ? levels.index : 0; levels.green = ((channel & GreenChannel) != 0) ? levels.index : 0; levels.blue = ((channel & BlueChannel) != 0) ? levels.index : 0; levels.opacity = ((channel & OpacityChannel) != 0) ? levels.index : 0; levels.index = ((channel & IndexChannel) != 0 && (image->colorspace == CMYKColorspace)) ? levels.index : 0; /* if more than a single number, each channel has a separate value */ if ( p != (char *) NULL && *p == ',' ) { p=strchr((char *) threshold_map,','); p++; if ((channel & RedChannel) != 0) levels.red = (unsigned int) strtoul(p, &p, 10), (void)(*p == ',' && p++); if ((channel & GreenChannel) != 0) levels.green = (unsigned int) strtoul(p, &p, 10), (void)(*p == ',' && p++); if ((channel & BlueChannel) != 0) levels.blue = (unsigned int) strtoul(p, &p, 10), (void)(*p == ',' && p++); if ((channel & IndexChannel) != 0 && image->colorspace == CMYKColorspace) levels.index=(unsigned int) strtoul(p, &p, 10), (void)(*p == ',' && p++); if ((channel & OpacityChannel) != 0) levels.opacity = (unsigned int) strtoul(p, &p, 10), (void)(*p == ',' && p++); } } #else /* Parse level values as a geometry */ /* This difficult! * How to map GeometryInfo structure elements into * LongPixelPacket structure elements, but according to channel? * Note the channels list may skip elements!!!! * EG -channel BA -ordered-dither map,2,3 * will need to map g.rho -> l.blue, and g.sigma -> l.opacity * A simpler way is needed, probably converting geometry to a temporary * array, then using channel to advance the index into ssize_t pixel packet. */ #endif #if 0 printf("DEBUG levels r=%u g=%u b=%u a=%u i=%u\n", levels.red, levels.green, levels.blue, levels.opacity, levels.index); #endif { /* Do the posterized ordered dithering of the image */ ssize_t d; /* d = number of psuedo-level divisions added between color levels */ d = map->divisor-1; /* reduce levels to levels - 1 */ levels.red = levels.red ? levels.red-1 : 0; levels.green = levels.green ? levels.green-1 : 0; levels.blue = levels.blue ? levels.blue-1 : 0; levels.opacity = levels.opacity ? levels.opacity-1 : 0; levels.index = levels.index ? levels.index-1 : 0; if (SetImageStorageClass(image,DirectClass) == MagickFalse) { InheritException(exception,&image->exception); return(MagickFalse); } status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t threshold, t, l; /* Figure out the dither threshold for this pixel This must be a integer from 1 to map->divisor-1 */ threshold = map->levels[(x%map->width) +map->width*(y%map->height)]; /* Dither each channel in the image as appropriate Notes on the integer Math... total number of divisions = (levels-1)*(divisor-1)+1) t1 = this colors psuedo_level = q->red * total_divisions / (QuantumRange+1) l = posterization level 0..levels t = dither threshold level 0..divisor-1 NB: 0 only on last Each color_level is of size QuantumRange / (levels-1) NB: All input levels and divisor are already had 1 subtracted Opacity is inverted so 'off' represents transparent. */ if (levels.red) { t = (ssize_t) (QuantumScale*GetPixelRed(q)*(levels.red*d+1)); l = t/d; t = t-l*d; SetPixelRed(q,ClampToQuantum((MagickRealType) ((l+(t >= threshold))*(MagickRealType) QuantumRange/levels.red))); } if (levels.green) { t = (ssize_t) (QuantumScale*GetPixelGreen(q)* (levels.green*d+1)); l = t/d; t = t-l*d; SetPixelGreen(q,ClampToQuantum((MagickRealType) ((l+(t >= threshold))*(MagickRealType) QuantumRange/levels.green))); } if (levels.blue) { t = (ssize_t) (QuantumScale*GetPixelBlue(q)* (levels.blue*d+1)); l = t/d; t = t-l*d; SetPixelBlue(q,ClampToQuantum((MagickRealType) ((l+(t >= threshold))*(MagickRealType) QuantumRange/levels.blue))); } if (levels.opacity) { t = (ssize_t) ((1.0-QuantumScale*GetPixelOpacity(q))* (levels.opacity*d+1)); l = t/d; t = t-l*d; SetPixelOpacity(q,ClampToQuantum((MagickRealType) ((1.0-l-(t >= threshold))*(MagickRealType) QuantumRange/ levels.opacity))); } if (levels.index) { t = (ssize_t) (QuantumScale*GetPixelIndex(indexes+x)* (levels.index*d+1)); l = t/d; t = t-l*d; SetPixelIndex(indexes+x,ClampToQuantum((MagickRealType) ((l+ (t>=threshold))*(MagickRealType) QuantumRange/levels.index))); } q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_OrderedPosterizeImageChannel) #endif proceed=SetImageProgress(image,DitherImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); } map=DestroyThresholdMap(map); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P e r c e p t i b l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PerceptibleImage() set each pixel whose value is less than |epsilon| to % epsilon or -epsilon (whichever is closer) otherwise the pixel value remains % unchanged. % % The format of the PerceptibleImageChannel method is: % % MagickBooleanType PerceptibleImage(Image *image,const double epsilon) % MagickBooleanType PerceptibleImageChannel(Image *image, % const ChannelType channel,const double epsilon) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel type. % % o epsilon: the epsilon threshold (e.g. 1.0e-9). % */ static inline Quantum PerceptibleThreshold(const Quantum quantum, const double epsilon) { double sign; sign=(double) quantum < 0.0 ? -1.0 : 1.0; if ((sign*quantum) >= epsilon) return(quantum); return((Quantum) (sign*epsilon)); } MagickExport MagickBooleanType PerceptibleImage(Image *image, const double epsilon) { MagickBooleanType status; status=PerceptibleImageChannel(image,DefaultChannels,epsilon); return(status); } MagickExport MagickBooleanType PerceptibleImageChannel(Image *image, const ChannelType channel,const double epsilon) { #define PerceptibleImageTag "Perceptible/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) { register ssize_t i; register PixelPacket *magick_restrict q; q=image->colormap; for (i=0; i < (ssize_t) image->colors; i++) { SetPixelRed(q,PerceptibleThreshold(GetPixelRed(q),epsilon)); SetPixelGreen(q,PerceptibleThreshold(GetPixelGreen(q),epsilon)); SetPixelBlue(q,PerceptibleThreshold(GetPixelBlue(q),epsilon)); SetPixelOpacity(q,PerceptibleThreshold(GetPixelOpacity(q),epsilon)); q++; } return(SyncImage(image)); } /* Perceptible image. */ status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) SetPixelRed(q,PerceptibleThreshold(GetPixelRed(q),epsilon)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,PerceptibleThreshold(GetPixelGreen(q),epsilon)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,PerceptibleThreshold(GetPixelBlue(q),epsilon)); if ((channel & OpacityChannel) != 0) SetPixelOpacity(q,PerceptibleThreshold(GetPixelOpacity(q),epsilon)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,PerceptibleThreshold(GetPixelIndex(indexes+x), epsilon)); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_PerceptibleImageChannel) #endif proceed=SetImageProgress(image,PerceptibleImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R a n d o m T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RandomThresholdImage() changes the value of individual pixels based on the % intensity of each pixel compared to a random threshold. The result is a % low-contrast, two color image. % % The format of the RandomThresholdImage method is: % % MagickBooleanType RandomThresholdImageChannel(Image *image, % const char *thresholds,ExceptionInfo *exception) % MagickBooleanType RandomThresholdImageChannel(Image *image, % const ChannelType channel,const char *thresholds, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel or channels to be thresholded. % % o thresholds: a geometry string containing low,high thresholds. If the % string contains 2x2, 3x3, or 4x4, an ordered dither of order 2, 3, or 4 % is performed instead. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType RandomThresholdImage(Image *image, const char *thresholds,ExceptionInfo *exception) { MagickBooleanType status; status=RandomThresholdImageChannel(image,DefaultChannels,thresholds, exception); return(status); } MagickExport MagickBooleanType RandomThresholdImageChannel(Image *image, const ChannelType channel,const char *thresholds,ExceptionInfo *exception) { #define ThresholdImageTag "Threshold/Image" CacheView *image_view; GeometryInfo geometry_info; MagickStatusType flags; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket threshold; MagickRealType min_threshold, max_threshold; RandomInfo **magick_restrict random_info; ssize_t y; #if defined(MAGICKCORE_OPENMP_SUPPORT) unsigned long key; #endif assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (thresholds == (const char *) NULL) return(MagickTrue); GetMagickPixelPacket(image,&threshold); min_threshold=0.0; max_threshold=(MagickRealType) QuantumRange; flags=ParseGeometry(thresholds,&geometry_info); min_threshold=geometry_info.rho; max_threshold=geometry_info.sigma; if ((flags & SigmaValue) == 0) max_threshold=min_threshold; if (strchr(thresholds,'%') != (char *) NULL) { max_threshold*=(MagickRealType) (0.01*QuantumRange); min_threshold*=(MagickRealType) (0.01*QuantumRange); } else if (((max_threshold == min_threshold) || (max_threshold == 1)) && (min_threshold <= 8)) { /* Backward Compatibility -- ordered-dither -- IM v 6.2.9-6. */ status=OrderedPosterizeImageChannel(image,channel,thresholds,exception); return(status); } /* Random threshold image. */ status=MagickTrue; progress=0; if (channel == CompositeChannels) { if (AcquireImageColormap(image,2) == MagickFalse) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); random_info=AcquireRandomInfoThreadSet(); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) key=GetRandomSecretKey(random_info[0]); #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,key == ~0UL) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { IndexPacket index; MagickRealType intensity; intensity=GetPixelIntensity(image,q); if (intensity < min_threshold) threshold.index=min_threshold; else if (intensity > max_threshold) threshold.index=max_threshold; else threshold.index=(MagickRealType)(QuantumRange* GetPseudoRandomValue(random_info[id])); index=(IndexPacket) (intensity <= threshold.index ? 0 : 1); SetPixelIndex(indexes+x,index); SetPixelRGBO(q,image->colormap+(ssize_t) index); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_RandomThresholdImageChannel) #endif proceed=SetImageProgress(image,ThresholdImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); random_info=DestroyRandomInfoThreadSet(random_info); return(status); } if (SetImageStorageClass(image,DirectClass) == MagickFalse) { InheritException(exception,&image->exception); return(MagickFalse); } random_info=AcquireRandomInfoThreadSet(); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) key=GetRandomSecretKey(random_info[0]); #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,key == ~0UL) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); register IndexPacket *magick_restrict indexes; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) { if ((MagickRealType) GetPixelRed(q) < min_threshold) threshold.red=min_threshold; else if ((MagickRealType) GetPixelRed(q) > max_threshold) threshold.red=max_threshold; else threshold.red=(MagickRealType) (QuantumRange* GetPseudoRandomValue(random_info[id])); } if ((channel & GreenChannel) != 0) { if ((MagickRealType) GetPixelGreen(q) < min_threshold) threshold.green=min_threshold; else if ((MagickRealType) GetPixelGreen(q) > max_threshold) threshold.green=max_threshold; else threshold.green=(MagickRealType) (QuantumRange* GetPseudoRandomValue(random_info[id])); } if ((channel & BlueChannel) != 0) { if ((MagickRealType) GetPixelBlue(q) < min_threshold) threshold.blue=min_threshold; else if ((MagickRealType) GetPixelBlue(q) > max_threshold) threshold.blue=max_threshold; else threshold.blue=(MagickRealType) (QuantumRange* GetPseudoRandomValue(random_info[id])); } if ((channel & OpacityChannel) != 0) { if ((MagickRealType) GetPixelOpacity(q) < min_threshold) threshold.opacity=min_threshold; else if ((MagickRealType) GetPixelOpacity(q) > max_threshold) threshold.opacity=max_threshold; else threshold.opacity=(MagickRealType) (QuantumRange* GetPseudoRandomValue(random_info[id])); } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) { if ((MagickRealType) GetPixelIndex(indexes+x) < min_threshold) threshold.index=min_threshold; else if ((MagickRealType) GetPixelIndex(indexes+x) > max_threshold) threshold.index=max_threshold; else threshold.index=(MagickRealType) (QuantumRange* GetPseudoRandomValue(random_info[id])); } if ((channel & RedChannel) != 0) SetPixelRed(q,(MagickRealType) GetPixelRed(q) <= threshold.red ? 0 : QuantumRange); if ((channel & GreenChannel) != 0) SetPixelGreen(q,(MagickRealType) GetPixelGreen(q) <= threshold.green ? 0 : QuantumRange); if ((channel & BlueChannel) != 0) SetPixelBlue(q,(MagickRealType) GetPixelBlue(q) <= threshold.blue ? 0 : QuantumRange); if ((channel & OpacityChannel) != 0) SetPixelOpacity(q,(MagickRealType) GetPixelOpacity(q) <= threshold.opacity ? 0 : QuantumRange); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,(MagickRealType) GetPixelIndex(indexes+x) <= threshold.index ? 0 : QuantumRange); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_RandomThresholdImageChannel) #endif proceed=SetImageProgress(image,ThresholdImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); random_info=DestroyRandomInfoThreadSet(random_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W h i t e T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WhiteThresholdImage() is like ThresholdImage() but forces all pixels above % the threshold into white while leaving all pixels at or below the threshold % unchanged. % % The format of the WhiteThresholdImage method is: % % MagickBooleanType WhiteThresholdImage(Image *image,const char *threshold) % MagickBooleanType WhiteThresholdImageChannel(Image *image, % const ChannelType channel,const char *threshold, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel or channels to be thresholded. % % o threshold: Define the threshold value. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType WhiteThresholdImage(Image *image, const char *threshold) { MagickBooleanType status; status=WhiteThresholdImageChannel(image,DefaultChannels,threshold, &image->exception); return(status); } MagickExport MagickBooleanType WhiteThresholdImageChannel(Image *image, const ChannelType channel,const char *thresholds,ExceptionInfo *exception) { #define ThresholdImageTag "Threshold/Image" CacheView *image_view; GeometryInfo geometry_info; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket threshold; MagickStatusType flags; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (thresholds == (const char *) NULL) return(MagickTrue); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); flags=ParseGeometry(thresholds,&geometry_info); GetMagickPixelPacket(image,&threshold); threshold.red=geometry_info.rho; threshold.green=geometry_info.sigma; if ((flags & SigmaValue) == 0) threshold.green=threshold.red; threshold.blue=geometry_info.xi; if ((flags & XiValue) == 0) threshold.blue=threshold.red; threshold.opacity=geometry_info.psi; if ((flags & PsiValue) == 0) threshold.opacity=threshold.red; threshold.index=geometry_info.chi; if ((flags & ChiValue) == 0) threshold.index=threshold.red; if ((flags & PercentValue) != 0) { threshold.red*=(MagickRealType) (QuantumRange/100.0); threshold.green*=(MagickRealType) (QuantumRange/100.0); threshold.blue*=(MagickRealType) (QuantumRange/100.0); threshold.opacity*=(MagickRealType) (QuantumRange/100.0); threshold.index*=(MagickRealType) (QuantumRange/100.0); } if ((IsMagickGray(&threshold) == MagickFalse) && (IsGrayColorspace(image->colorspace) != MagickFalse)) (void) SetImageColorspace(image,sRGBColorspace); /* White threshold image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if (((channel & RedChannel) != 0) && ((MagickRealType) GetPixelRed(q) > threshold.red)) SetPixelRed(q,QuantumRange); if (((channel & GreenChannel) != 0) && ((MagickRealType) GetPixelGreen(q) > threshold.green)) SetPixelGreen(q,QuantumRange); if (((channel & BlueChannel) != 0) && ((MagickRealType) GetPixelBlue(q) > threshold.blue)) SetPixelBlue(q,QuantumRange); if (((channel & OpacityChannel) != 0) && ((MagickRealType) GetPixelOpacity(q) > threshold.opacity)) SetPixelOpacity(q,QuantumRange); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace) && ((MagickRealType) GetPixelIndex(indexes+x)) > threshold.index) SetPixelIndex(indexes+x,QuantumRange); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_WhiteThresholdImageChannel) #endif proceed=SetImageProgress(image,ThresholdImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); }
covariance.c
/** * covariance.c: This file was adapted from PolyBench/GPU 1.0 test * suite to run on GPU with OpenMP 4.0 pragmas and OpenCL driver. * * http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU * * Contacts: Marcio M Pereira <mpereira@ic.unicamp.br> * Rafael Cardoso F Sousa <rafael.cardoso@students.ic.unicamp.br> * Luís Felipe Mattos <ra107822@students.ic.unicamp.br> */ #include <assert.h> #include <math.h> #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include <unistd.h> #ifdef _OPENMP #include <omp.h> #endif #include "BenchmarksUtil.h" #define BENCHMARK_NAME "COVAR" // define the error threshold for the results "not matching" #define PERCENT_DIFF_ERROR_THRESHOLD 1.05 #ifdef RUN_POLYBENCH_SIZE #define SIZE 2048 #elif RUN_TEST #define SIZE 1100 #elif RUN_BENCHMARK #define SIZE 9600 #else #define SIZE 1000 #endif /* Problem size */ #define M SIZE #define N SIZE #define sqrt_of_array_cell(x, j) sqrt(x[j]) #define FLOAT_N 3214212.01 #define EPS 0.005 /* Can switch DATA_TYPE between float and double */ typedef float DATA_TYPE; void init_arrays(DATA_TYPE *data) { int i, j; for (i = 1; i < (M + 1); i++) { for (j = 1; j < (N + 1); j++) { data[i * (N + 1) + j] = ((DATA_TYPE)i * j) / M; } } } int compareResults(DATA_TYPE *symmat, DATA_TYPE *symmat_outputFromGpu) { int i, j, fail; fail = 0; for (i = 1; i < (M + 1); i++) { for (j = 1; j < (N + 1); j++) { if (percentDiff(symmat[i * (N + 1) + j], symmat_outputFromGpu[i * (N + 1) + j]) > PERCENT_DIFF_ERROR_THRESHOLD) { fail++; } } } printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f " "Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail); return fail; } void covariance(DATA_TYPE *data, DATA_TYPE *symmat, DATA_TYPE *mean) { int i, j, j1, j2; /* Determine mean of column vectors of input data matrix */ for (j = 1; j < (M + 1); j++) { mean[j] = 0.0; for (i = 1; i < (N + 1); i++) { mean[j] += data[i * (M + 1) + j]; } mean[j] /= FLOAT_N; } /* Center the column vectors. */ for (i = 1; i < (N + 1); i++) { for (j = 1; j < (M + 1); j++) { data[i * (M + 1) + j] -= mean[j]; } } /* Calculate the m * m covariance matrix. */ for (j1 = 1; j1 < (M + 1); j1++) { for (j2 = j1; j2 < (M + 1); j2++) { symmat[j1 * (M + 1) + j2] = 0.0; for (i = 1; i < N + 1; i++) { symmat[j1 * (M + 1) + j2] += data[i * (M + 1) + j1] * data[i * (M + 1) + j2]; } symmat[j2 * (M + 1) + j1] = symmat[j1 * (M + 1) + j2]; } } } void covariance_OMP(DATA_TYPE *data, DATA_TYPE *data2, DATA_TYPE *symmat, DATA_TYPE *mean) { /* Determine mean of column vectors of input data matrix */ #pragma omp target \ data map(to: data[:(M+1)*(N+1)]) \ map(alloc: data2[:(M+1)*(N+1)]) \ map(alloc: mean[:(M+1)]) \ map(tofrom: symmat[:(M+1)*(N+1)]) \ device(DEVICE_ID) { #pragma omp target teams distribute parallel for device(DEVICE_ID) for (int j = 1; j < (M + 1); j++) { mean[j] = 0.0; for (int i = 1; i < (N + 1); i++) { mean[j] += data[i * (M + 1) + j]; } mean[j] /= FLOAT_N; } /* Center the column vectors. */ #pragma omp target teams distribute parallel for collapse(2) device(DEVICE_ID) for (int i = 1; i < (N + 1); i++) { for (int j = 1; j < (M + 1); j++) { data2[i * (M + 1) + j] = data[i * (M + 1) + j] - mean[j]; } } /* Calculate the m * m covariance matrix. */ #pragma omp target teams distribute parallel for device(DEVICE_ID) for (int j1 = 1; j1 < (M + 1); j1++) { for (int j2 = j1; j2 < (M + 1); j2++) { symmat[j1 * (M + 1) + j2] = 0.0; for (int i = 1; i < N + 1; i++) { symmat[j1 * (M + 1) + j2] += data2[i * (M + 1) + j1] * data2[i * (M + 1) + j2]; } symmat[j2 * (M + 1) + j1] = symmat[j1 * (M + 1) + j2]; } } } } int main() { double t_start, t_end; int fail = 0; DATA_TYPE *data; DATA_TYPE *data_GPU; DATA_TYPE *data2_GPU; DATA_TYPE *symmat; DATA_TYPE *mean; DATA_TYPE *mean_GPU; DATA_TYPE *symmat_outputFromGpu; data = (DATA_TYPE *)calloc((M + 1) * (N + 1), sizeof(DATA_TYPE)); data2_GPU = (DATA_TYPE *)calloc((M + 1) * (N + 1), sizeof(DATA_TYPE)); symmat = (DATA_TYPE *)calloc((M + 1) * (M + 1), sizeof(DATA_TYPE)); mean = (DATA_TYPE *)calloc((M + 1), sizeof(DATA_TYPE)); symmat_outputFromGpu = (DATA_TYPE *)calloc((M + 1) * (M + 1), sizeof(DATA_TYPE)); mean_GPU = (DATA_TYPE *)calloc((M + 1), sizeof(DATA_TYPE)); //fprintf(stdout, "<< Covariance Computation size: %d>>\n", SIZE); printBenchmarkInfo(BENCHMARK_NAME, SIZE); init_arrays(data); t_start = rtclock(); covariance_OMP(data, data2_GPU, symmat_outputFromGpu, mean_GPU); t_end = rtclock(); fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start); #ifdef RUN_TEST t_start = rtclock(); covariance(data, symmat, mean); t_end = rtclock(); fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start); fail = compareResults(symmat, symmat_outputFromGpu); #endif free(data); free(symmat); free(mean); free(symmat_outputFromGpu); return fail; }
control_tool_no_ompt_support.c
// RUN: %libomp-compile-and-run #include <omp.h> int main() { #pragma omp parallel num_threads(1) { omp_control_tool(omp_control_tool_flush, 1, NULL); } return 0; }
util.c
#include <stdio.h> #include <stdlib.h> #include <time.h> #include <assert.h> #include "util.h" FLOAT_TYPE myrand() { return rand()/(1.0*RAND_MAX)-0.5; } void set_poly(const int n, FLOAT_TYPE * const restrict P) { int i; srand(time(NULL)); P[0] = 0.0; for (i=1; i<=n ; ++i) { P[i] = myrand()/n; } } void print_poly(const int n, const FLOAT_TYPE *P) { int i; printf("Sum = "); for (i=0; i<=n ; ++i) { printf("%+10.4e*x^%i ",P[i],i); } printf("\n"); } void set_matrix(const size_t n, FLOAT_TYPE * const restrict A) { size_t j, k; const FLOAT_TYPE value = 1.0; /* Parallel initialization to use "first-touch" * memory binding to threads */ #pragma omp parallel for private(j,k) schedule(static,LB_LEN) for (j = 0; j < n; j += TMPLEN*CHUNKSIZE) { for (k = j; k < j+TMPLEN*CHUNKSIZE; k++) { A[k] = value; } } }
sdf.h
/* * Generate SDF * by R. Falque * 11/02/2020 */ #ifndef SDF_H #define SDF_H #include <Eigen/Core> #include <unsupported/Eigen/CXX11/Tensor> #include <vector> #include <string> #include <iostream> #include <sstream> #include "EigenTools/getMinMax.h" #include "EigenTools/nanoflannWrapper.h" #include "sgn.h" #include "IO/writePNG.h" #include "IO/process_folder.h" // polyscope wrapper class SDF { private: Eigen::MatrixXd vertices_; Eigen::MatrixXd normals_; int grid_resolution_; double bounding_box_scale_; Eigen::Tensor<double, 3> SDF_; double grid_size_; Eigen::Vector3d source_; public: SDF(Eigen::MatrixXd & vertices, Eigen::MatrixXd & normals, int grid_resolution, double bounding_box_scale) { vertices_ = vertices; normals_ = normals; grid_resolution_ = grid_resolution; bounding_box_scale_ = bounding_box_scale; init(); } // destructor ~SDF() { } //accessors inline Eigen::Tensor<double, 3> get_SDF(){return SDF_;}; inline double get_grid_size(){return grid_size_;}; inline Eigen::Vector3d get_source(){return source_;}; void init() { Eigen::Vector3d min_point, max_point; getMinMax(vertices_, min_point, max_point); //double bounding_box_size = (max_point - min_point).norm() * bounding_box_scale; double bounding_box_size = (max_point - min_point).maxCoeff() * bounding_box_scale_; // diagonal versus max direction double leaf_size = bounding_box_size/(grid_resolution_-1); double inv_leaf_size = 1.0/leaf_size; Eigen::Vector3i min_box, max_box, number_of_bins; min_box << floor(min_point(0)*inv_leaf_size), floor(min_point(1)*inv_leaf_size) , floor(min_point(2)*inv_leaf_size); max_box << floor(max_point(0)*inv_leaf_size), floor(max_point(1)*inv_leaf_size) , floor(max_point(2)*inv_leaf_size); number_of_bins << max_box(0) - min_box(0) + 1, max_box(1) - min_box(1) + 1, max_box(2) - min_box(2) + 1; SDF_.resize(number_of_bins(0), number_of_bins(1), number_of_bins(2)); nanoflann_wrapper tree(vertices_); for (int x = 0; x < number_of_bins(0); ++x) for (int y = 0; y < number_of_bins(1); ++y) { #pragma omp parallel for for (int z = 0; z < number_of_bins(2); ++z) { std::vector< int > closest_point; Eigen::Vector3d point; point << x, y, z; point *= leaf_size; point += min_point; closest_point = tree.return_k_closest_points(point, 1); double sign = ( vertices_.col(closest_point[0]) - point ).dot( normals_.col(closest_point[0]) ); sign /= abs(sign); SDF_(x, y, z) = ( vertices_.col(closest_point[0]) - point ).norm() * sign; } } grid_size_ = leaf_size; source_ = min_point; } inline bool generate_graph(Eigen::MatrixXd & vertices, Eigen::MatrixXi & edges) { std::vector< Eigen::Vector3d > vertices_vector; std::vector< Eigen::Vector2i > edges_vector; Eigen::Vector3d centroid; Eigen::Tensor<int, 3> grid_indices(SDF_.dimension(0), SDF_.dimension(1), SDF_.dimension(2)); // build vertices for (int x = 0; x < SDF_.dimension(0); ++x) for (int y = 0; y < SDF_.dimension(1); ++y) for (int z = 0; z < SDF_.dimension(2); ++z) { centroid << x, y, z; centroid *= grid_size_; centroid += source_; vertices_vector.push_back(centroid); grid_indices(x, y, z) = vertices_vector.size(); } // build edges for (int x = 0; x < SDF_.dimension(0); ++x) for (int y = 0; y < SDF_.dimension(1); ++y) for (int z = 0; z < SDF_.dimension(2); ++z) { // case on x if (x-1>=0) { Eigen::Vector2i edge_temp; edge_temp << grid_indices(x, y, z), grid_indices(x-1, y, z); edges_vector.push_back(edge_temp); } if (x+1<SDF_.dimension(0)) { Eigen::Vector2i edge_temp; edge_temp << grid_indices(x, y, z), grid_indices(x+1, y, z); edges_vector.push_back(edge_temp); } // case on y if (y-1>=0) { Eigen::Vector2i edge_temp; edge_temp << grid_indices(x, y, z), grid_indices(x, y-1, z); edges_vector.push_back(edge_temp); } if (y+1<SDF_.dimension(1)) { Eigen::Vector2i edge_temp; edge_temp << grid_indices(x, y, z), grid_indices(x, y+1, z); edges_vector.push_back(edge_temp); } // case on z if (z-1>=0) { Eigen::Vector2i edge_temp; edge_temp << grid_indices(x, y, z), grid_indices(x, y, z-1); edges_vector.push_back(edge_temp); } if (z+1<SDF_.dimension(2)) { Eigen::Vector2i edge_temp; edge_temp << grid_indices(x, y, z), grid_indices(x, y, z+1); edges_vector.push_back(edge_temp); } } vertices.resize(3, vertices_vector.size()); for (int i=0; i< vertices_vector.size(); i++) vertices.col(i) = vertices_vector[i]; edges.resize(2, edges_vector.size()); for (int i=0; i< edges_vector.size(); i++) edges.col(i) = edges_vector[i]; return true; }; inline bool print_to_folder(std::string folder_name) { bool folder_exist = does_folder_exist(folder_name); if (!folder_exist) { std::cout << "Error: the folder does not exist\n"; create_folder(folder_name); } empty_folder(folder_name); #pragma omp parallel for for (int i=0; i<SDF_.dimension(2); i++) { Eigen::Matrix<double, Eigen::Dynamic, Eigen::Dynamic> slice; Eigen::Tensor<double, 2> tensor_slice; Eigen::array<long int,3> offset = {0,0,i}; //Starting point Eigen::array<long int,3> extent = {SDF_.dimension(0),SDF_.dimension(1),0}; //Finish point tensor_slice = SDF_.slice(offset, extent).reshape(Eigen::array<long int,2>{SDF_.dimension(0),SDF_.dimension(1)}); slice = Eigen::Map<const Eigen::Matrix<double, Eigen::Dynamic, Eigen::Dynamic>> (tensor_slice.data(), tensor_slice.dimension(0),tensor_slice.dimension(1)); //slice /= slice.maxCoeff() ; Eigen::MatrixXd R, G, B; R = slice; R = R.cwiseMax(0); R /= R.maxCoeff(); G = -slice; G = G.cwiseMax(0); G /= G.maxCoeff(); B = G; std::stringstream ss; ss << std::setw(3) << std::setfill('0') << i; std::string s = ss.str(); std::string file_name = folder_name + s + ".png"; writePNG(R, G, B, file_name); } std::cout << "Progress: Stack of images written in :" << folder_name << std::endl; return true; }; }; #endif
dftcommon.c
// Copyright Naoki Shibata and contributors 2010 - 2021. // Distributed under the Boost Software License, Version 1.0. // (See accompanying file LICENSE.txt or copy at // http://www.boost.org/LICENSE_1_0.txt) #include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <string.h> #include <ctype.h> #include <inttypes.h> #include <assert.h> #include <math.h> #ifdef _OPENMP #include <omp.h> #endif #include "misc.h" #include "sleef.h" #define IMPORT_IS_EXPORT #include "sleefdft.h" #include "dispatchparam.h" #include "dftcommon.h" #include "common.h" #include "arraymap.h" #define MAGIC_FLOAT 0x31415926 #define MAGIC_DOUBLE 0x27182818 #define MAGIC2D_FLOAT 0x22360679 #define MAGIC2D_DOUBLE 0x17320508 const char *configStr[] = { "ST", "ST stream", "MT", "MT stream" }; static int parsePathStr(char *p, int *path, int *config, int pathLenMax, int log2len) { int pathLen = 0, l2l = 0; for(;;) { while(*p == ' ') p++; if (*p == '\0') break; if (!isdigit((int)*p)) return -1; pathLen++; if (pathLen >= pathLenMax) return -2; int n = 0; while(isdigit((int)*p)) n = n * 10 + *p++ - '0'; if (n > MAXBUTWIDTH) return -6; path[pathLen-1] = n; l2l += n; config[pathLen-1] = 0; if (*p != '(') continue; int c; for(c=3;c>=0;c--) if (strncmp(p+1, configStr[c], strlen(configStr[c])) == 0) break; if (c == -1) return -3; p += strlen(configStr[c]) + 1; if (*p != ')') return -4; p++; config[pathLen-1] = c; } if (l2l != log2len) return -5; return pathLen; } EXPORT void SleefDFT_setPath(SleefDFT *p, char *pathStr) { assert(p != NULL && (p->magic == MAGIC_FLOAT || p->magic == MAGIC_DOUBLE)); int path[32], config[32]; int pathLen = parsePathStr(pathStr, path, config, 31, p->log2len); if (pathLen < 0) { if ((p->mode & SLEEF_MODE_VERBOSE) != 0) printf("Error %d in parsing path string : %s\n", pathLen, pathStr); return; } for(uint32_t j = 0;j <= p->log2len;j++) p->bestPath[j] = 0; for(int level = p->log2len, j=0;level > 0 && j < pathLen;) { p->bestPath[level] = path[j]; p->bestPathConfig[level] = config[j]; level -= path[j]; j++; } p->pathLen = 0; for(int j = p->log2len;j >= 0;j--) if (p->bestPath[j] != 0) p->pathLen++; if ((p->mode & SLEEF_MODE_VERBOSE) != 0) { printf("Set path : "); for(int j = p->log2len;j >= 0;j--) if (p->bestPath[j] != 0) printf("%d(%s) ", p->bestPath[j], configStr[p->bestPathConfig[j]]); printf("\n"); } } void freeTables(SleefDFT *p) { for(int N=1;N<=MAXBUTWIDTH;N++) { for(uint32_t level=N;level<=p->log2len;level++) { Sleef_free(p->tbl[N][level]); } free(p->tbl[N]); p->tbl[N] = NULL; } } EXPORT void SleefDFT_dispose(SleefDFT *p) { if (p != NULL && (p->magic == MAGIC2D_FLOAT || p->magic == MAGIC2D_DOUBLE)) { Sleef_free(p->tBuf); SleefDFT_dispose(p->instH); if (p->hlen != p->vlen) SleefDFT_dispose(p->instV); p->magic = 0; free(p); return; } assert(p != NULL && (p->magic == MAGIC_FLOAT || p->magic == MAGIC_DOUBLE)); if (p->log2len <= 1) { p->magic = 0; free(p); return; } if ((p->mode & SLEEF_MODE_REAL) != 0) { Sleef_free(p->rtCoef1); Sleef_free(p->rtCoef0); p->rtCoef0 = p->rtCoef1 = NULL; } for(int level = p->log2len;level >= 1;level--) { Sleef_free(p->perm[level]); } free(p->perm); p->perm = NULL; freeTables(p); p->magic = 0; free(p); } uint32_t ilog2(uint32_t q) { static const uint32_t tab[] = {0,1,2,2,3,3,3,3,4,4,4,4,4,4,4,4}; uint32_t r = 0,qq; if (q & 0xffff0000) r = 16; q >>= r; qq = q | (q >> 1); qq |= (qq >> 2); qq = ((qq & 0x10) >> 4) | ((qq & 0x100) >> 7) | ((qq & 0x1000) >> 10); return r + tab[qq] * 4 + tab[q >> (tab[qq] * 4)] - 1; } // char *dftPlanFilePath = NULL; char *archID = NULL; uint64_t planMode = SLEEF_PLAN_REFERTOENVVAR; ArrayMap *planMap = NULL; int planFilePathSet = 0, planFileLoaded = 0; #ifdef _OPENMP omp_lock_t planMapLock; int planMapLockInitialized = 0; #endif static void initPlanMapLock() { #ifdef _OPENMP #pragma omp critical { if (!planMapLockInitialized) { planMapLockInitialized = 1; omp_init_lock(&planMapLock); } } #endif } static void planMap_clear() { if (planMap != NULL) ArrayMap_dispose(planMap); planMap = NULL; } EXPORT void SleefDFT_setPlanFilePath(const char *path, const char *arch, uint64_t mode) { initPlanMapLock(); if ((mode & SLEEF_PLAN_RESET) != 0) { planMap_clear(); planFileLoaded = 0; planFilePathSet = 0; } if (dftPlanFilePath != NULL) free(dftPlanFilePath); if (path != NULL) { dftPlanFilePath = malloc(strlen(path)+10); strcpy(dftPlanFilePath, path); } else { dftPlanFilePath = NULL; } if (archID != NULL) free(archID); if (arch == NULL) arch = Sleef_getCpuIdString(); archID = malloc(strlen(arch)+10); strcpy(archID, arch); planMode = mode; planFilePathSet = 1; } static void loadPlanFromFile() { if (planFilePathSet == 0 && (planMode & SLEEF_PLAN_REFERTOENVVAR) != 0) { char *s = getenv(ENVVAR); if (s != NULL) SleefDFT_setPlanFilePath(s, NULL, planMode); } if (planMap != NULL) ArrayMap_dispose(planMap); if (dftPlanFilePath != NULL && (planMode & SLEEF_PLAN_RESET) == 0) { planMap = ArrayMap_load(dftPlanFilePath, archID, PLANFILEID, (planMode & SLEEF_PLAN_NOLOCK) == 0); } if (planMap == NULL) planMap = initArrayMap(); planFileLoaded = 1; } static void savePlanToFile() { assert(planFileLoaded); if ((planMode & SLEEF_PLAN_READONLY) == 0 && dftPlanFilePath != NULL) { ArrayMap_save(planMap, dftPlanFilePath, archID, PLANFILEID); } } #define CATBIT 8 #define BASETYPEIDBIT 2 #define LOG2LENBIT 8 #define DIRBIT 1 #define BUTSTATBIT 16 static uint64_t keyButStat(int baseTypeID, int log2len, int dir, int butStat) { dir = (dir & SLEEF_MODE_BACKWARD) == 0; int cat = 0; uint64_t k = 0; k = (k << BUTSTATBIT) | (butStat & ~(~(uint64_t)0 << BUTSTATBIT)); k = (k << LOG2LENBIT) | (log2len & ~(~(uint64_t)0 << LOG2LENBIT)); k = (k << DIRBIT) | (dir & ~(~(uint64_t)0 << LOG2LENBIT)); k = (k << BASETYPEIDBIT) | (baseTypeID & ~(~(uint64_t)0 << BASETYPEIDBIT)); k = (k << CATBIT) | (cat & ~(~(uint64_t)0 << CATBIT)); return k; } #define LEVELBIT LOG2LENBIT #define BUTCONFIGBIT 8 #define TRANSCONFIGBIT 8 static uint64_t keyTrans(int baseTypeID, int hlen, int vlen, int transConfig) { int max = MAX(hlen, vlen), min = MIN(hlen, vlen); int cat = 2; uint64_t k = 0; k = (k << TRANSCONFIGBIT) | (transConfig & ~(~(uint64_t)0 << TRANSCONFIGBIT)); k = (k << LOG2LENBIT) | (max & ~(~(uint64_t)0 << LOG2LENBIT)); k = (k << LOG2LENBIT) | (min & ~(~(uint64_t)0 << LOG2LENBIT)); k = (k << BASETYPEIDBIT) | (baseTypeID & ~(~(uint64_t)0 << BASETYPEIDBIT)); k = (k << CATBIT) | (cat & ~(~(uint64_t)0 << CATBIT)); return k; } static uint64_t keyPath(int baseTypeID, int log2len, int dir, int level, int config) { dir = (dir & SLEEF_MODE_BACKWARD) == 0; int cat = 3; uint64_t k = 0; k = (k << BUTCONFIGBIT) | (config & ~(~(uint64_t)0 << BUTCONFIGBIT)); k = (k << LEVELBIT) | (level & ~(~(uint64_t)0 << LEVELBIT)); k = (k << LOG2LENBIT) | (log2len & ~(~(uint64_t)0 << LOG2LENBIT)); k = (k << DIRBIT) | (dir & ~(~(uint64_t)0 << LOG2LENBIT)); k = (k << BASETYPEIDBIT) | (baseTypeID & ~(~(uint64_t)0 << BASETYPEIDBIT)); k = (k << CATBIT) | (cat & ~(~(uint64_t)0 << CATBIT)); return k; } static uint64_t keyPathConfig(int baseTypeID, int log2len, int dir, int level, int config) { dir = (dir & SLEEF_MODE_BACKWARD) == 0; int cat = 4; uint64_t k = 0; k = (k << BUTCONFIGBIT) | (config & ~(~(uint64_t)0 << BUTCONFIGBIT)); k = (k << LEVELBIT) | (level & ~(~(uint64_t)0 << LEVELBIT)); k = (k << LOG2LENBIT) | (log2len & ~(~(uint64_t)0 << LOG2LENBIT)); k = (k << DIRBIT) | (dir & ~(~(uint64_t)0 << LOG2LENBIT)); k = (k << BASETYPEIDBIT) | (baseTypeID & ~(~(uint64_t)0 << BASETYPEIDBIT)); k = (k << CATBIT) | (cat & ~(~(uint64_t)0 << CATBIT)); return k; } static uint64_t planMap_getU64(uint64_t key) { char *s = ArrayMap_get(planMap, key); if (s == NULL) return 0; uint64_t ret; if (sscanf(s, "%" SCNx64, &ret) != 1) return 0; return ret; } static void planMap_putU64(uint64_t key, uint64_t value) { char *s = malloc(100); sprintf(s, "%" PRIx64, value); s = ArrayMap_put(planMap, key, s); if (s != NULL) free(s); } int PlanManager_loadMeasurementResultsP(SleefDFT *p, int pathCat) { assert(p != NULL && (p->magic == MAGIC_FLOAT || p->magic == MAGIC_DOUBLE)); initPlanMapLock(); #ifdef _OPENMP omp_set_lock(&planMapLock); #endif if (!planFileLoaded) loadPlanFromFile(); int stat = planMap_getU64(keyButStat(p->baseTypeID, p->log2len, p->mode, pathCat+10)); if (stat == 0) { #ifdef _OPENMP omp_unset_lock(&planMapLock); #endif return 0; } int ret = 1; for(int j = p->log2len;j >= 0;j--) { p->bestPath[j] = planMap_getU64(keyPath(p->baseTypeID, p->log2len, p->mode, j, pathCat)); p->bestPathConfig[j] = planMap_getU64(keyPathConfig(p->baseTypeID, p->log2len, p->mode, j, pathCat)); if (p->bestPath[j] > MAXBUTWIDTH) ret = 0; } p->pathLen = 0; for(int j = p->log2len;j >= 0;j--) if (p->bestPath[j] != 0) p->pathLen++; #ifdef _OPENMP omp_unset_lock(&planMapLock); #endif return ret; } void PlanManager_saveMeasurementResultsP(SleefDFT *p, int pathCat) { assert(p != NULL && (p->magic == MAGIC_FLOAT || p->magic == MAGIC_DOUBLE)); initPlanMapLock(); #ifdef _OPENMP omp_set_lock(&planMapLock); #endif if (!planFileLoaded) loadPlanFromFile(); if (planMap_getU64(keyButStat(p->baseTypeID, p->log2len, p->mode, pathCat+10)) != 0) { #ifdef _OPENMP omp_unset_lock(&planMapLock); #endif return; } for(int j = p->log2len;j >= 0;j--) { planMap_putU64(keyPath(p->baseTypeID, p->log2len, p->mode, j, pathCat), p->bestPath[j]); planMap_putU64(keyPathConfig(p->baseTypeID, p->log2len, p->mode, j, pathCat), p->bestPathConfig[j]); } planMap_putU64(keyButStat(p->baseTypeID, p->log2len, p->mode, pathCat+10), 1); if ((planMode & SLEEF_PLAN_READONLY) == 0) savePlanToFile(); #ifdef _OPENMP omp_unset_lock(&planMapLock); #endif } int PlanManager_loadMeasurementResultsT(SleefDFT *p) { assert(p != NULL && (p->magic == MAGIC2D_FLOAT || p->magic == MAGIC2D_DOUBLE)); initPlanMapLock(); #ifdef _OPENMP omp_set_lock(&planMapLock); #endif if (!planFileLoaded) loadPlanFromFile(); p->tmNoMT = planMap_getU64(keyTrans(p->baseTypeID, p->log2hlen, p->log2vlen, 0)); p->tmMT = planMap_getU64(keyTrans(p->baseTypeID, p->log2hlen, p->log2vlen, 1)); #ifdef _OPENMP omp_unset_lock(&planMapLock); #endif return p->tmNoMT != 0; } void PlanManager_saveMeasurementResultsT(SleefDFT *p) { assert(p != NULL && (p->magic == MAGIC2D_FLOAT || p->magic == MAGIC2D_DOUBLE)); initPlanMapLock(); #ifdef _OPENMP omp_set_lock(&planMapLock); #endif if (!planFileLoaded) loadPlanFromFile(); planMap_putU64(keyTrans(p->baseTypeID, p->log2hlen, p->log2vlen, 0), p->tmNoMT); planMap_putU64(keyTrans(p->baseTypeID, p->log2hlen, p->log2vlen, 1), p->tmMT ); if ((planMode & SLEEF_PLAN_READONLY) == 0) savePlanToFile(); #ifdef _OPENMP omp_unset_lock(&planMapLock); #endif }
threads.c
#include <stdio.h> #include <omp.h> #include <string.h> #include <stdlib.h> int main() { //Determine which GPU type (NVIDIA or AMD) char* nvidia= "sm"; char* aomp_gpu= getenv("AOMP_GPU"); int isAMDGPU = 1; int masterWarpThread = -1; if(aomp_gpu && strstr(aomp_gpu, nvidia) != NULL) isAMDGPU = 0; int thread_id[1024] ; for (int i=0; i < 1024; i++) thread_id[i] = -1; //#pragma omp target map (tofrom: thread_id) #pragma omp target parallel for num_threads(1024) for (int i=0; i< 1024; i++) { if (i >950) printf ("Thread: %d\n", omp_get_thread_num()); thread_id[i] = omp_get_thread_num(); } // SPMD: if (thread_id[1023] == 1023 ) { int maxThrd = -1; for (int i=0; i < 1024; i++) { if (thread_id[i] > maxThrd) maxThrd = thread_id[i]; } printf("Max thread id %d\n", maxThrd); //Determine execution Mode if (maxThrd == 1023) printf("Running in SPMD Mode\n"); else printf("Running in Generic Mode\n"); //Verify Results int passed = 0; //Check SPMD results if (maxThrd == 1023) passed = 1; else{ //Check generic results if(isAMDGPU) maxThrd += 64; else maxThrd += 32; if (maxThrd == 1023) passed = 1; } if (passed){ printf("Passed!\n"); return 0; } else{ printf("Failed\n"); return 1; } }
depth_first_search.c
#include <omp.h> #include <time.h> #include "problem.h" #include "depth_first_search.h" struct ProblemSolution run_recursive_depth_first_search(const struct ProblemInstance* instance) { struct ProblemSolution best_solution = { .array = malloc(sizeof(int) * instance->n), .size = instance->n, .cost = 0, .is_valid = false, }; // would be deallocated by do_depth_first_search struct ProblemSolution* blank_solution = malloc(sizeof(struct ProblemSolution)); blank_solution->array = malloc(sizeof(int) * instance->n); blank_solution->size = instance->n; blank_solution->cost = 0; blank_solution->is_valid = false; for (int i = 0; i < instance->n; i++) { blank_solution->array[i] = 0; } #pragma omp parallel { #pragma omp single { do_recursive_depth_first_search(instance, &best_solution, blank_solution, instance->a, 0); } } if (best_solution.is_valid == false) { printf("Panic! Depth First Search has faild. Algorithm is not correct.\n"); } // !! we know that at least one cut is exist // !! that is why the below pointer dereference must be valid return best_solution; } void do_recursive_depth_first_search ( const struct ProblemInstance* instance, struct ProblemSolution* best_solution, struct ProblemSolution* current_solution, int graph_capacity, int depth ) { struct ProblemSolution* include_solution; struct ProblemSolution* exclude_solution; if (graph_capacity == 0 || depth == instance->n) { calculate_cut_cost(current_solution, instance); if (best_solution->is_valid == false || best_solution->cost > current_solution->cost) { // temporary allocates memory on the heap validate_solution(current_solution, instance); if (current_solution->is_valid) { #pragma omp critical { best_solution->is_valid = true; best_solution->cost = current_solution->cost; for (int i = 0; i < instance->n; i++) { best_solution->array[i] = current_solution->array[i]; } } } } free(current_solution->array); free(current_solution); return; } include_solution = malloc(sizeof(struct ProblemSolution)); include_solution->array = malloc(sizeof(int) * instance->n); include_solution->size = instance->n; include_solution->cost = 0; include_solution->is_valid = false; exclude_solution = malloc(sizeof(struct ProblemSolution)); exclude_solution->array = malloc(sizeof(int) * instance->n); exclude_solution->size = instance->n; exclude_solution->cost = 0; exclude_solution->is_valid = false; for (int i = 0; i < instance->n; i++) { include_solution->array[i] = current_solution->array[i]; exclude_solution->array[i] = current_solution->array[i]; } free(current_solution->array); free(current_solution); include_solution->array[depth] = 1; exclude_solution->array[depth] = 0; depth++; #pragma omp task { do_recursive_depth_first_search(instance, best_solution, include_solution, graph_capacity - 1, depth); } #pragma omp task { do_recursive_depth_first_search(instance, best_solution, exclude_solution, graph_capacity , depth); } }
pr68640.c
/* { dg-do compile } */ /* { dg-options "-O2 -fopenmp -fdump-tree-ealias-all" } */ #define N 1024 int foo (int *__restrict__ ap) { int *bp = ap; #pragma omp parallel for for (unsigned int idx = 0; idx < N; idx++) ap[idx] = bp[idx]; } /* { dg-final { scan-tree-dump-times "clique 1 base 1" 2 "ealias" } } */ /* { dg-final { scan-tree-dump-times "(?n)clique 1 base 0" 2 "ealias" } } */
latency_ctx.h
/* * Copyright (c) 2018 Intel Corporation. All rights reserved. * This software is available to you under the BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ static inline void streaming_put_latency_ctx(int len, perf_metrics_t *metric_info, int streaming_node) { double start = 0.0, end = 0.0; unsigned long int i; int dest = partner_node(metric_info); static int check_once = 0; if (!check_once) { /* check to see whether sender and receiver are the same process */ if (dest == metric_info->my_node) { fprintf(stderr, "Warning: Sender and receiver are the same " "process (%d)\n", dest); } /* hostname validation for all sender and receiver processes */ int status = check_hostname_validation(metric_info); if (status != 0) return; check_once++; } shmem_barrier_all(); if (streaming_node) { #pragma omp parallel default(none) firstprivate(len, dest) private(i) \ shared(metric_info, start, end) num_threads(metric_info->nthreads) { const int thread_id = omp_get_thread_num(); shmem_ctx_t ctx; int err = shmem_ctx_create(SHMEM_CTX_PRIVATE, &ctx); if (err) { printf("PE %d, Thr. %d: Error, context creation failed\n", metric_info->my_node, thread_id); shmem_global_exit(1); } for (i = 0; i < metric_info->warmup; i++) { #ifdef USE_NONBLOCKING_API shmem_ctx_putmem_nbi(ctx, metric_info->dest + thread_id * len, metric_info->src + thread_id * len, len, dest); #else shmem_ctx_putmem(ctx, metric_info->dest + thread_id * len, metric_info->src + thread_id * len, len, dest); #endif shmem_ctx_quiet(ctx); } shmem_ctx_destroy(ctx); } } shmem_barrier_all(); if (streaming_node) { #pragma omp parallel default(none) firstprivate(len, dest) private(i) \ shared(metric_info, start, end) num_threads(metric_info->nthreads) { const int thread_id = omp_get_thread_num(); shmem_ctx_t ctx; int err = shmem_ctx_create(SHMEM_CTX_PRIVATE, &ctx); if (err) { printf("PE %d, Thr. %d: Error, context creation failed\n", metric_info->my_node, thread_id); shmem_global_exit(1); } #pragma omp barrier #pragma omp master { start = perf_shmemx_wtime(); } for (i = 0; i < metric_info->trials; i++) { #ifdef USE_NONBLOCKING_API shmem_ctx_putmem_nbi(ctx, metric_info->dest + thread_id * len, metric_info->src + thread_id * len, len, dest); #else shmem_ctx_putmem(ctx, metric_info->dest + thread_id * len, metric_info->src + thread_id * len, len, dest); #endif shmem_ctx_quiet(ctx); } shmem_ctx_destroy(ctx); } } shmem_barrier_all(); if (streaming_node) { end = perf_shmemx_wtime(); calc_and_print_results(start, end, len, metric_info); } shmem_barrier_all(); } static inline void streaming_get_latency_ctx(int len, perf_metrics_t *metric_info, int streaming_node) { double start = 0.0, end = 0.0; unsigned long int i; int dest = partner_node(metric_info); static int check_once = 0; if (!check_once) { /* check to see whether sender and receiver are the same process */ if (dest == metric_info->my_node) { fprintf(stderr, "Warning: Sender and receiver are the same " "process (%d)\n", dest); } /* hostname validation for all sender and receiver processes */ int status = check_hostname_validation(metric_info); if (status != 0) return; check_once++; } shmem_barrier_all(); if (streaming_node) { #pragma omp parallel default(none) firstprivate(len, dest) private(i) \ shared(metric_info, start, end) num_threads(metric_info->nthreads) { const int thread_id = omp_get_thread_num(); shmem_ctx_t ctx; int err = shmem_ctx_create(SHMEM_CTX_PRIVATE, &ctx); if (err) { printf("PE %d, Thr. %d: Error, context creation failed\n", metric_info->my_node, thread_id); shmem_global_exit(1); } for (i = 0; i < metric_info->warmup; i++) { #ifdef USE_NONBLOCKING_API shmem_ctx_getmem_nbi(ctx, metric_info->dest + thread_id * len, metric_info->src + thread_id * len, len, dest); shmem_ctx_quiet(ctx); #else shmem_ctx_getmem(ctx, metric_info->dest + thread_id * len, metric_info->src + thread_id * len, len, dest); #endif } shmem_ctx_destroy(ctx); } } shmem_barrier_all(); if (streaming_node) { #pragma omp parallel default(none) firstprivate(len, dest) private(i) \ shared(metric_info, start, end) num_threads(metric_info->nthreads) { const int thread_id = omp_get_thread_num(); shmem_ctx_t ctx; int err = shmem_ctx_create(SHMEM_CTX_PRIVATE, &ctx); if (err) { printf("PE %d, Thr. %d: Error, context creation failed\n", metric_info->my_node, thread_id); shmem_global_exit(1); } #pragma omp barrier #pragma omp master { start = perf_shmemx_wtime(); } for (i = 0; i < metric_info->trials; i++) { #ifdef USE_NONBLOCKING_API shmem_ctx_getmem_nbi(ctx, metric_info->dest + thread_id * len, metric_info->src + thread_id * len, len, dest); shmem_ctx_quiet(ctx); #else shmem_ctx_getmem(ctx, metric_info->dest + thread_id * len, metric_info->src + thread_id * len, len, dest); #endif } shmem_ctx_destroy(ctx); } } shmem_barrier_all(); if (streaming_node) { end = perf_shmemx_wtime(); calc_and_print_results(start, end, len, metric_info); } shmem_barrier_all(); }
zbatch.c
#include "kog.h" #include "mem.h" #ifdef USE_MSR #include "msr.h" #endif /* USE_MSR */ #include "wre.h" int main(int argc, char *argv[]) { if (4 != argc) { (void)fprintf(stderr, "%s n #batches infile\n", argv[0]); return EXIT_FAILURE; } const size_t n = atoz(argv[1]); if (!n) { perror("atoz(n)"); return EXIT_FAILURE; } // TODO: FIXME for n not a power of two if (_mm_popcnt_u64(n) > (__int64)1) { perror("n not a power of two"); return EXIT_FAILURE; } const size_t b = atoz(argv[2]); if (!b) { perror("atoz(b)"); return EXIT_FAILURE; } FILE *const f = fopen(argv[3], "rb"); if (!f) { perror("fopen"); return EXIT_FAILURE; } Zmem *const z = Zalloc(n); if (!z) return EXIT_FAILURE; Tout *const t = Talloc(n); if (!t) return EXIT_FAILURE; (void)fprintf(stdout, "\"ZBATCH\",\"WTIMEs\",\"K2\",\"RE\",\"OU\",\"OV\""); #ifdef USE_MSR (void)fprintf(stdout, ",\"A_M\""); #endif /* USE_MSR */ (void)fprintf(stdout, "\n"); (void)fflush(stdout); const size_t V = n2V(n); #ifdef USE_MSR const size_t mt = (size_t)omp_get_max_threads(); FILE *const mf = fopen("zmsr.csv", "w"); if (!mf) { perror("fopen(zmsr.csv)"); return EXIT_FAILURE; } (void)fprintf(mf, "\"ZBATCH\",\"TIX\",\"MPERF\",\"APERF\"\n"); (void)fflush(mf); #endif /* USE_MSR */ for (size_t j = (size_t)1u; j <= b; ++j) { if (n != fread(z->r.A11, sizeof(double), n, f)) { perror("fread(A11r)"); return EXIT_FAILURE; } if (n != fread(z->i.A11, sizeof(double), n, f)) { perror("fread(A11i)"); return EXIT_FAILURE; } if (n != fread(z->r.A21, sizeof(double), n, f)) { perror("fread(A21r)"); return EXIT_FAILURE; } if (n != fread(z->i.A21, sizeof(double), n, f)) { perror("fread(A21i)"); return EXIT_FAILURE; } if (n != fread(z->r.A12, sizeof(double), n, f)) { perror("fread(A12r)"); return EXIT_FAILURE; } if (n != fread(z->i.A12, sizeof(double), n, f)) { perror("fread(A12i)"); return EXIT_FAILURE; } if (n != fread(z->r.A22, sizeof(double), n, f)) { perror("fread(A22r)"); return EXIT_FAILURE; } if (n != fread(z->i.A22, sizeof(double), n, f)) { perror("fread(A22i)"); return EXIT_FAILURE; } double #ifdef USE_MSR avg = 0.0, #endif /* USE_MSR */ w = omp_get_wtime(); #ifdef _OPENMP #ifdef USE_MSR #pragma omp parallel default(none) shared(j,V,z,mf) reduction(+:avg) #else /* !USE_MSR */ #pragma omp parallel default(none) shared(V,z) #endif /* ?USE_MSR */ { #endif /* _OPENMP */ #ifdef USE_MSR const int tix = omp_get_thread_num(); const int cfd = msr_open(msr_mycpu()); uint64_t aperf = UINT64_C(0), mperf = UINT64_C(0); if (cfd >= 0) { (void)msr_read(cfd, IA32_MPERF, &mperf); (void)msr_read(cfd, IA32_APERF, &aperf); } #endif /* USE_MSR */ #ifdef _OPENMP #pragma omp for #endif /* _OPENMP */ for (size_t i = (size_t)0u; i < V; ++i) { const size_t k = (i << VLlg); z8svd2_ ((z->r.A11 + k), (z->i.A11 + k), (z->r.A21 + k), (z->i.A21 + k), (z->r.A12 + k), (z->i.A12 + k), (z->r.A22 + k), (z->i.A22 + k), (z->r.U11 + k), (z->i.U11 + k), (z->r.U21 + k), (z->i.U21 + k), (z->r.U12 + k), (z->i.U12 + k), (z->r.U22 + k), (z->i.U22 + k), (z->r.V11 + k), (z->i.V11 + k), (z->r.V21 + k), (z->i.V21 + k), (z->r.V12 + k), (z->i.V12 + k), (z->r.V22 + k), (z->i.V22 + k), (z->v.S1 + k), (z->v.S2 + k), (z->v.s + k)); } #ifdef USE_MSR if (cfd >= 0) { uint64_t mval = UINT64_C(0), aval = UINT64_C(0); (void)msr_read(cfd, IA32_MPERF, &mval); (void)msr_read(cfd, IA32_APERF, &aval); (void)msr_close(cfd); mperf = ((mval > mperf) ? (mval - mperf) : UINT64_C(0)); aperf = ((aval > aperf) ? (aval - aperf) : UINT64_C(0)); avg = ((mperf && aperf && (mperf > aperf)) ? (((double)aperf) / mperf) : 1.0); } else avg = 1.0; #pragma omp critical { (void)fprintf(mf, "%zu,%d,%lu,%lu\n", j, tix, mperf, aperf); (void)fflush(mf); } #endif /* USE_MSR */ #ifdef _OPENMP } #endif /* _OPENMP */ w = omp_get_wtime() - w; #ifdef USE_MSR avg /= mt; #endif /* USE_MSR */ wzre (n, t->K2, t->RE, t->OU, t->OV, z->r.A11, z->i.A11, z->r.A21, z->i.A21, z->r.A12, z->i.A12, z->r.A22, z->i.A22, z->r.U11, z->i.U11, z->r.U21, z->i.U21, z->r.U12, z->i.U12, z->r.U22, z->i.U22, z->r.V11, z->i.V11, z->r.V21, z->i.V21, z->r.V12, z->i.V12, z->r.V22, z->i.V22, z->v.S1, z->v.S2, z->v.s); // TODO: FIXME for n not a power of two for (size_t k = n >> 1u; k; k >>= 1u) { #ifdef _OPENMP #pragma omp parallel for default(none) shared(k,t) #endif /* _OPENMP */ for (size_t i = (size_t)0u; i < k; ++i) { (t->K2)[i] = fmaxw((t->K2)[i], (t->K2)[i + k]); (t->RE)[i] = fmaxw((t->RE)[i], (t->RE)[i + k]); (t->OU)[i] = fmaxw((t->OU)[i], (t->OU)[i + k]); (t->OV)[i] = fmaxw((t->OV)[i], (t->OV)[i + k]); } } (void)Bwre(stdout, j, w, *(t->K2), *(t->RE), *(t->OU), *(t->OV), #ifdef USE_MSR &avg #else /* !USE_MSR */ (const double*)NULL #endif /* ?USE_MSR */ ); } #ifdef USE_MSR (void)fclose(mf); #endif /* USE_MSR */ (void)Tfree(t); (void)Zfree(z); return (fclose(f) ? EXIT_FAILURE : EXIT_SUCCESS); }
preprocessing.c
/** * @file preprocessing.c * @author Sylvan Brocard (sbrocard@upmem.com) * @brief Function for data preprocessing * */ #include <float.h> #include "../trees.h" void preprocessing(Params *p, float **features_float) { /* ALL OF THE FOLLOWING SHOULD REALLY BE HANDLED BY NUMPY */ // int ipoint, ifeature; // float *features_mins = (float *) malloc(p->nfeatures* sizeof(*p->features_mins)); // float *features_maxes = (float *) malloc(p->nfeatures* sizeof(*p->features_maxes)); // /* finding feature-wise minima */ // #pragma omp parallel for // for(ifeature = 0; ifeature < p->nfeatures; ifeature++) // features_mins[ifeature] = FLT_MAX; // #pragma omp parallel for collapse(2) \ // reduction(min : features_mins[:p->nfeatures]) // for (ifeature = 0; ifeature < p->nfeatures; ifeature++) // for(ipoint = 0; ipoint < p->npoints; ipoint++) // if(features_float[ipoint][ifeature] < p->features_mins[ifeature]) // features_mins[ifeature] = features_float[ipoint][ifeature]; // /* finding featre-wise maxima */ // #pragma omp parallel for // for(ifeature = 0; ifeature < p->nfeatures; ifeature++) // features_maxes[ifeature] = FLT_MIN; // #pragma omp parallel for collapse(2) \ // reduction(min : features_maxes[:p->nfeatures]) // for (ifeature = 0; ifeature < p->nfeatures; ifeature++) // for(ipoint = 0; ipoint < p->npoints; ipoint++) // if(features_float[ipoint][ifeature] < p->features_mins[ifeature]) // features_mins[ifeature] = features_float[ipoint][ifeature]; }
4538.c
/* POLYBENCH/GPU-OPENMP * * This file is a part of the Polybench/GPU-OpenMP suite * * Contact: * William Killian <killian@udel.edu> * * Copyright 2013, The University of Delaware */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4096x4096. */ #include "convolution-2d.h" /* Array initialization. */ static void init_array (int ni, int nj, DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj)) { // printf("Initializing Array\n"); int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nj; j++) { A[i][j] = ((DATA_TYPE) (i + j) / nj); } } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int ni, int nj, DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj)) { int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nj; j++) { fprintf(stderr, DATA_PRINTF_MODIFIER, B[i][j]); if ((i * NJ + j) % 20 == 0) fprintf(stderr, "\n"); } fprintf(stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_conv2d(int ni, int nj, DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj), DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj)) { int i, j; #pragma scop for (i = 1; i < _PB_NI - 1; ++i) { #pragma omp target teams distribute thread_limit(256) for (j = 1; j < _PB_NJ - 1; ++j) { B[i][j] = 0.2 * A[i-1][j-1] + 0.5 * A[i-1][j] + -0.8 * A[i-1][j+1] + -0.3 * A[ i ][j-1] + 0.6 * A[ i ][j] + -0.9 * A[ i ][j+1] + 0.4 * A[i+1][j-1] + 0.7 * A[i+1][j] + 0.1 * A[i+1][j+1]; } } #pragma endscop // printf("Kernal computation complete !!\n"); } int main(int argc, char** argv) { /* Retrieve problem size. */ int ni = NI; int nj = NJ; /* Variable declaration/allocation. */ POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NJ, ni, nj); POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NI, NJ, ni, nj); /* Initialize array(s). */ init_array (ni, nj, POLYBENCH_ARRAY(A)); /* Start timer. */ //polybench_start_instruments; polybench_timer_start(); /* Run kernel. */ kernel_conv2d (ni, nj, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B)); /* Stop and print timer. */ polybench_timer_stop(); polybench_timer_print(); //polybench_stop_instruments; //polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(ni, nj, POLYBENCH_ARRAY(B))); /* Be clean. */ POLYBENCH_FREE_ARRAY(A); POLYBENCH_FREE_ARRAY(B); return 0; }
heat.c
#include <omp.h> /* The MIT License (MIT) Copyright (c) 2015 Alexander Vondrous Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # The Heat Chindogu The heat chindogu is application for your laptop to convert electric energy into heat, which is especially usefull in the winter season during commute in a train to warm your fingers. Warning: This application will drain your battery. This heat chindogu follows the ten chindogu rules 1. A Chindogu cannot be for real use 2. A Chindogu must exist 3. Inherent in every Chindogu is the spirit of anarchy 4. Chindogus are tools for everyday life 5. Chindogu are not for sale 6. Humour must not be the sole reason for creating a Chindogu 7. Chindogu is not propaganda 8. Chindogu are never taboo 9. Chindogus cannot be patented 10. Chindogu are without prejudice Programming information - OpenMP is necessary - The only output is heat - No user interaction is necessary. - This programm seems not to have a memory leak. - The functional test is not integrated on Jenkins or what so ever because you need a laptop and a heat sensor. - A feature would be to also use your GPU to produce even more heat. */ #include <omp.h> int main (int argc, char *argv[]) { int i; #pragma omp parallel { while (1) { i++; } } return 0; }
lazy_matrix.h
#pragma once #include <cassert> #include <iostream> #include <typeinfo> #include <vector> using sz_t = std::size_t; template <typename T> using TDVec = std::vector<std::vector<T>>; template <typename T> using List = std::initializer_list<std::initializer_list<T>>; namespace policy { /** * @brief policy that take row as first priority */ class row_major { public: template <typename R1> decltype(auto) operator()(const sz_t &i, const sz_t &j, const R1 &a) const { return a._array[i * a.size_y + j]; } template <typename R1> decltype(auto) operator()(const sz_t &i, const sz_t &j, R1 &a) { return a._array[i * a.size_y + j]; } }; /** * @brief policy that take column as first priority */ class column_major { public: template <typename R1> decltype(auto) operator()(const sz_t &i, const sz_t &j, const R1 &a) const { return a._array[j * a.size_x + i]; } template <typename R1> decltype(auto) operator()(const sz_t &i, const sz_t &j, R1 &a) { return a._array[j * a.size_x + i]; } }; }; // namespace policy /** * @brief Functor for adding corresponding position */ struct _add { template <typename R1, typename R2> decltype(auto) operator()(const R1 &op1, const R2 &op2, const sz_t &i, const sz_t &j) const { return op1(i, j) + op2(i, j); } }; /** * @brief Functor for subtracting corresponding position */ struct _sub { template <typename R1, typename R2> decltype(auto) operator()(const R1 &op1, const R2 &op2, const sz_t &i, const sz_t &j) const { return op1(i, j) - op2(i, j); } }; /** * @brief Functor for dividing corresponding position */ struct _ediv { template <typename R1, typename R2> decltype(auto) operator()(const R1 &op1, const R2 &op2, const sz_t &i, const sz_t &j) const { return op1(i, j) / op2(i, j); } }; /** * @brief Functor for diving with a scalar */ struct _sdiv { template <typename R1, typename R2> decltype(auto) operator()(const R1 &op1, const R2 &op2, const sz_t &i, const sz_t &j) const { return op1(i, j) / op2; } }; /** * @brief Functor for multiplying corresponding position */ struct _emul { template <typename R1, typename R2> decltype(auto) operator()(const R1 &op1, const R2 &op2, const sz_t &i, const sz_t &j) const { return op1(i, j) * op2(i, j); } }; /** * @brief Functor for multiplying with scalar */ struct _smul { template <typename R1, typename R2> decltype(auto) operator()(const R1 &op1, const R2 &op2, const sz_t &i, const sz_t &j) const { return op1(i, j) * op2; } }; /** * @brief Functor for getting (i,j)th element of Standard Matrix-Matrix * multiplication */ struct _std_mul { template <typename R1, typename R2> decltype(auto) operator()(const R1 &op1, const R2 &op2, const sz_t &i, const sz_t &j) const { return (_std_mul::operator()(op1, op2, op2.shape().first - 1, i, j)); } template <typename R1, typename R2> decltype(auto) operator()(const R1 &op1, const R2 &op2, const sz_t &k, const sz_t &i, const sz_t &j) const { if (k == 0) { #pragma omp parallel single { #pragma omp task decltype(auto) temp1 = op1(i, k); #pragma omp task decltype(auto) temp2 = op2(k, j); #pragma omp taskwait return temp1 * temp2; } } else { #pragma omp parallel single { #pragma omp task decltype(auto) temp1 = op1(i, k); #pragma omp task decltype(auto) temp2 = op2(k, j); #pragma omp task decltype(auto) temp3 = _std_mul::operator()(op1, op2, k - 1, i, j); #pragma omp taskwait return temp1 * temp2 + temp3; } } } }; /** * @brief Class for expression. * * @tparam R1 First Parameter * @tparam R2 Fecond Parameter * @tparam Op Functor for atithematic operations */ template <typename R1, typename R2, typename Op> class expr { private: const R1 &op1; const R2 &op2; const sz_t size_x; const sz_t size_y; Op op; public: /** * @brief Constructs the object. * * @param[in] a First Operand * @param[in] b Second Operand * @param[in] f Functor for Operator */ expr(const R1 &a, const R2 &b, Op f) : op1(a), op2(b), size_x(a.shape().first), size_y(b.shape().second), op(f) {} /** * @brief Gives the dimensions of the resultant expression * * @return return a pair whose first value is the number of rows and * the second value is the number of columns */ decltype(auto) shape() const { return std::make_pair(size_x, size_y); } /** * @brief Oveloading operator << to use std:: cout */ friend std::ostream &operator<<(std::ostream &out, expr<R1, R2, Op> &other) { sz_t size_x = other.shape().first; sz_t size_y = other.shape().second; #pragma omp parallel for for (sz_t i = 0; i < size_x; i++) { for (sz_t j = 0; j < size_y; j++) { out << other(i, j) << ' '; } out << std::endl; } return out; } /** * Operator + Overloading for Standard Matrix Addition */ template <typename F> decltype(auto) operator+(const F &other) { assert(shape() == other.shape()); return expr<expr<R1, R2, Op>, F, _add>(*this, other, _add()); } /** * Operator - Overloading for Standard Matrix Subtraction */ template <typename F> decltype(auto) operator-(const F &other) { assert(shape() == other.shape()); return expr<expr<R1, R2, Op>, F, _sub>(*this, other, _sub()); } /** * Operator / Overloading for Element-Wise Division */ template <typename F> decltype(auto) operator/(const F &other) { assert(shape() == other.shape()); return expr<expr<R1, R2, Op>, F, _ediv>(*this, other, _ediv()); } /** * Operator * Overloading for Element-Wise Multiplication */ template <typename F> decltype(auto) operator*(const F &other) { assert(shape() == other.shape()); return expr<expr<R1, R2, Op>, F, _emul>(*this, other, _emul()); } /** * Operator % Overloading for Standard Matrix Multiplication */ template <typename F> decltype(auto) operator%(const F &other) { assert(size_y == other.shape().second); return expr<expr<R1, R2, Op>, F, _std_mul>(*this, other, _std_mul()); } /** * Operator () overloading for gitting the (i,j)th element of the * expression */ decltype(auto) operator()(const sz_t &i, const sz_t &j) const { return op(op1, op2, i, j); } }; /** * @brief Class for lazy matrix. * * @tparam T Data type of the matrix * @tparam policy User case assign how data will be accessed takes * value policy:: row_major or policy::column_major */ template <typename T, typename ploy = policy::row_major> class lazy_matrix { private: std::vector<T> _array; const sz_t size_x; const sz_t size_y; friend ploy; ploy pol; public: /** * @brief Constructs the object. */ lazy_matrix() : size_x(0), size_y(0) {} /** * @brief Constructs the object. * * @param[in] n Number of rows in the matrix * @param[in] m Number of columns in the matrix */ lazy_matrix(const std::size_t &n, const std::size_t &m) : _array(n * m), size_x(n), size_y(m) {} /** * @brief Constructs the object. * * @param[in] n Number of rows in the matrix * @param[in] m Number of columns in the matrix * @param[in] val The initial value */ lazy_matrix(const std::size_t &n, const std::size_t &m, const T &val) : _array(n * m, val), size_x(n), size_y(m) {} /** * @brief Vector initialization * * @param[in] vec 2D Vector input */ lazy_matrix(const TDVec<T> &vec) : size_x(vec.size()), size_y((*vec.begin()).size()) { if (typeid(ploy) == typeid(policy::row_major)) { for (auto &it : vec) { _array.insert(_array.end(), it.begin(), it.end()); } } else { for (sz_t j = 0; j < size_y; j++) { for (sz_t i = 0; i < size_x; i++) { _array.push_back(vec[i][j]); } } } } /** * @brief Initialization with 2D list * * @param[in] l 2D Initializer_list input */ lazy_matrix(const List<T> &l) : size_x(l.size()), size_y((*l.begin()).size()) { if (typeid(ploy) == typeid(policy::row_major)) { for (auto &it : l) { _array.insert(_array.end(), it.begin(), it.end()); } } else { for (sz_t j = 0; j < size_y; j++) { for (sz_t i = 0; i < size_x; i++) { _array.push_back(*((*(l.begin() + i)).begin() + j)); } } } } /** * @brief Initialization with an expression * * @param[in] exp The expression initializer * * @tparam R1 Left operand of the expression * @tparam R2 Right operand of the expression * @tparam R3 Functor for performing arithematic operation */ template <typename R1, typename R2, typename R3> lazy_matrix(const expr<R1, R2, R3> &exp) : size_x(exp.shape().first), size_y(exp.shape().second) { if (typeid(ploy) == typeid(policy::row_major)) { for (sz_t i = 0; i < size_x; i++) { for (sz_t j = 0; j < size_y; j++) { _array.push_back(exp(i, j)); } } } else { for (sz_t j = 0; j < size_y; j++) { for (sz_t i = 0; i < size_x; i++) { _array.push_back(exp(i, j)); } } } } /** * @brief Gives the dimensions of the matrix */ decltype(auto) shape() const { return std::make_pair(size_x, size_y); } /** * @brief function for getting processed data i.e. member _array which is * processed under row_major or column_major; */ decltype(auto) pcd_data() const { return _array; } /** * Operator () Overloading for getting the (i,j)th element */ inline const T operator()(const std::size_t i, const std::size_t j) const { return pol(i, j, *this); } inline T &operator()(const std::size_t i, const std::size_t j) { return pol(i, j, *this); } /** * @brief Oveloading operator << to use std:: cout */ friend std::ostream &operator<<(std::ostream &out, lazy_matrix<T, ploy> &other) { sz_t size_x = other.shape().first; sz_t size_y = other.shape().second; #pragma omp parallel for for (sz_t i = 0; i < size_x; i++) { for (sz_t j = 0; j < size_y; j++) { out << other(i, j) << ' '; } out << std::endl; } return out; } /** * @brief Overloading operator = for a assignment of lazy matrix * * @param[in] other reference to the matrix or expression which is to be * assigned * * @tparam R1 expression type or matrix type */ template <typename R1> lazy_matrix operator=(const R1 &other) { assert(shape() == other.shape()); lazy_matrix<T, ploy> temp(size_x, size_y); #pragma omp parallel for for (sz_t i = 0; i < size_x; i++) { for (sz_t j = 0; j < size_y; j++) { temp(i, j) = other(i, j); } } #pragma omp parallel for for (sz_t i = 0; i < size_x; i++) { for (sz_t j = 0; j < size_y; j++) { (*this)(i, j) = temp(i, j); } } return *this; } /** * @brief Overloading operator == for a comparing equality with other * matrix * * @param[in] other reference to the matrix or expression which is to be * compared * * @tparam R2 matrix or expression * * @return true if eaual else false */ template <typename R1> bool operator==(const R1 &other) { if (shape() != other.shape() && typeid(T) != typeid(other(0, 0))) { return false; } for (sz_t i = 0; i < size_x; i++) { for (sz_t j = 0; j < size_y; j++) { if ((*this)(i, j) != other(i, j)) { return false; } } } return true; } /** * @brief Operator + Overloading for Standard Matrix Addition * * @param[in] other reference to the matrix or expression which is to be * added * * @tparam R1 matrix or expression */ template <typename R1> decltype(auto) operator+(const R1 &other) { assert(shape() == other.shape()); return expr<lazy_matrix<T, ploy>, R1, _add>(*this, other, _add()); } /** * @brief assignment after adding */ template <typename R1> void operator+=(const R1 &other) { *this = *this + other; } /** * @brief Operator - Overloading for Standard Matrix Subtraction * * @param[in] other reference to the matrix or expression which is to be * subtracted * * @tparam R1 matrix or expression */ template <typename R1> decltype(auto) operator-(const R1 &other) { assert(shape() == other.shape()); return expr<lazy_matrix<T, ploy>, R1, _sub>(*this, other, _sub()); } /** * @brief assignment after subtracting */ template <typename R1> void operator-=(const R1 &other) { *this = *this - other; } /** * @brief Operator / Overloading for Element-Wise Division * * @param[in] other reference to the matrix or expression which is to be * divided * * @tparam R1 matrix or expression */ template <typename R1> decltype(auto) operator/(const R1 &other) { assert(shape() == other.shape()); return expr<lazy_matrix<T, ploy>, R1, _ediv>(*this, other, _ediv()); } /** * @brief assignment after element-wise division */ template <typename R1> void operator/=(const R1 &other) { *this = *this / other; } /** * @brief Operator * Overloading for Element-Wise Multiplication * * @param[in] other reference to the matrix or expression which is to be * multiplied * * @tparam R1 matrix or expression */ template <typename R1> decltype(auto) operator*(const R1 &other) { assert(shape() == other.shape()); return expr<lazy_matrix<T, ploy>, R1, _emul>(*this, other, _emul()); } /** * @brief assignment after element-wise multiplication */ template <typename R1> void operator*=(const R1 &other) { auto temp = (*this) * other; *this = temp; } /** * @brief Operator -%Overloading for Standard Matrix Multiplication * * @param[in] other reference to the matrix or expression which is to be * multiplied * * @tparam R1 matrix or expression */ template <typename R1> decltype(auto) operator%(const R1 &other) { assert(shape().second == other.shape().first); return expr<lazy_matrix<T, ploy>, R1, _std_mul>(*this, other, _std_mul()); } /** * @brief assignment after standard matrix multiplication */ template <typename R1> void operator%=(const R1 &other) { auto temp = (*this) % other; *this = temp; } };
GB_binop__bget_uint16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__bget_uint16) // A.*B function (eWiseMult): GB (_AemultB_08__bget_uint16) // A.*B function (eWiseMult): GB (_AemultB_02__bget_uint16) // A.*B function (eWiseMult): GB (_AemultB_04__bget_uint16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__bget_uint16) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__bget_uint16) // C+=b function (dense accum): GB (_Cdense_accumb__bget_uint16) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bget_uint16) // C=scalar+B GB (_bind1st__bget_uint16) // C=scalar+B' GB (_bind1st_tran__bget_uint16) // C=A+scalar GB (_bind2nd__bget_uint16) // C=A'+scalar GB (_bind2nd_tran__bget_uint16) // C type: uint16_t // A type: uint16_t // A pattern? 0 // B type: uint16_t // B pattern? 0 // BinaryOp: cij = GB_BITGET (aij, bij, uint16_t, 16) #define GB_ATYPE \ uint16_t #define GB_BTYPE \ uint16_t #define GB_CTYPE \ uint16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint16_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint16_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_BITGET (x, y, uint16_t, 16) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 1 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BGET || GxB_NO_UINT16 || GxB_NO_BGET_UINT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__bget_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__bget_uint16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__bget_uint16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint16_t uint16_t bwork = (*((uint16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *restrict Cx = (uint16_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *restrict Cx = (uint16_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__bget_uint16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint16_t alpha_scalar ; uint16_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint16_t *) alpha_scalar_in)) ; beta_scalar = (*((uint16_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__bget_uint16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__bget_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__bget_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__bget_uint16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__bget_uint16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t x = (*((uint16_t *) x_input)) ; uint16_t *Bx = (uint16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint16_t bij = GBX (Bx, p, false) ; Cx [p] = GB_BITGET (x, bij, uint16_t, 16) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__bget_uint16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t *Ax = (uint16_t *) Ax_input ; uint16_t y = (*((uint16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint16_t aij = GBX (Ax, p, false) ; Cx [p] = GB_BITGET (aij, y, uint16_t, 16) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_BITGET (x, aij, uint16_t, 16) ; \ } GrB_Info GB (_bind1st_tran__bget_uint16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t x = (*((const uint16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_BITGET (aij, y, uint16_t, 16) ; \ } GrB_Info GB (_bind2nd_tran__bget_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t y = (*((const uint16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
Launcher.h
// SPDX-FileCopyrightText: 2020 CERN // SPDX-License-Identifier: Apache-2.0 /** * @file Launcher.h * @brief Backend-dependent abstraction for parallel function execution * @author Andrei Gheata (andrei.gheata@cern.ch) */ #ifndef COPCORE_LAUNCHER_H_ #define COPCORE_LAUNCHER_H_ #include <CopCore/Global.h> namespace copcore { #ifdef COPCORE_CUDA_COMPILER namespace kernel_launcher_impl { template <class Function, class... Args> __global__ void kernel_dispatch(int data_size, Function device_func_select, const Args... args) { // Initiate a grid-size loop to maximize reuse of threads and CPU compatibility, keeping adressing within warps // unit-stride for (auto id = blockIdx.x * blockDim.x + threadIdx.x; id < data_size; id += blockDim.x * gridDim.x) { device_func_select(id, args...); } } } // End namespace kernel_launcher_impl #endif template <BackendType backend> class LauncherBase { public: using LaunchGrid_t = launch_grid<backend>; using Stream_t = typename copcore::StreamType<backend>::value_type; protected: int fDeviceId{0}; ///< device id (GPU for CUDA, socket for CPU) // LaunchGrid_t fGrid{{0}, {0}}; ///< launch grid to be used if set by user Stream_t fStream{0}; ///< stream id for CUDA, not used for CPU) public: LauncherBase(Stream_t stream) : fStream{stream} {} // void SetDevice(int device) { fDeviceId = device; } int GetDevice() const { return fDeviceId; } void SetStream(Stream_t stream) { fStream = stream; } Stream_t GetStream() const { return fStream; } }; // end class LauncherBase /** @brief Launcher for generic backend */ template <BackendType backend> class Launcher : protected LauncherBase<backend> { public: using LaunchGrid_t = launch_grid<backend>; /** @brief Generic backend launch method. Implementation done in specializations */ template <class FunctionPtr, class... Args> int Run(FunctionPtr, int, LaunchGrid_t, const Args &...) const { // Not implemented backend launches will end-up here std::string backend_name(copcore::BackendName(backend)); COPCORE_EXCEPTION("Launcher::Launch: No implementation available for " + backend_name); return 1; } }; #ifdef COPCORE_CUDA_COMPILER /** @brief Specialization of Launcher for the CUDA backend */ template <> class Launcher<BackendType::CUDA> : public LauncherBase<BackendType::CUDA> { private: int fNumSMs; ///< number of streaming multi-processors public: Launcher(Stream_t stream = 0) : LauncherBase(stream) { cudaGetDevice(&fDeviceId); cudaDeviceGetAttribute(&fNumSMs, cudaDevAttrMultiProcessorCount, fDeviceId); } template <class DeviceFunctionPtr, class... Args> int Run(DeviceFunctionPtr func, int n_elements, LaunchGrid_t grid, const Args &... args) const { constexpr unsigned int warpsPerSM = 32; // we should target a reasonable occupancy constexpr unsigned int block_size = 256; // Compute the launch grid. if (!n_elements) return 0; // Adjust automatically the execution grid. Optimal occupancy: // nMaxThreads = fNumSMs * warpsPerSM * 32; grid_size = nmaxthreads / block_size // if n_elements < nMaxThreads we reduce the grid size to minimize null threads LaunchGrid_t exec_grid{grid}; if (grid[1].x == 0) { unsigned int grid_size = std::min(warpsPerSM * fNumSMs * 32 / block_size, (n_elements + block_size - 1) / block_size); exec_grid[0].x = grid_size; exec_grid[1].x = block_size; } //std::cout << "grid_size = " << exec_grid[0].x << " block_size = " << exec_grid[1].x << std::endl; // launch the kernel kernel_launcher_impl::kernel_dispatch<<<exec_grid[0], exec_grid[1], 0, fStream>>>(n_elements, func, args...); COPCORE_CUDA_CHECK(cudaGetLastError()); return 0; } void WaitStream() const { if (fStream) COPCORE_CUDA_CHECK(cudaStreamSynchronize(fStream)); else COPCORE_CUDA_CHECK(cudaDeviceSynchronize()); } static void WaitDevice() { COPCORE_CUDA_CHECK(cudaDeviceSynchronize()); } }; // End class Launcher<BackendType::CUDA> #endif /** @brief Specialization of Launcher for the CPU backend */ template <> class Launcher<BackendType::CPU> : public LauncherBase<BackendType::CPU> { public: Launcher(Stream_t stream = 0) : LauncherBase(stream) {} template <class HostFunctionPtr, class... Args> int Run(HostFunctionPtr func, int n_elements, LaunchGrid_t /*grid*/, const Args &... args) const { #pragma omp parallel for for (int i = 0; i < n_elements; ++i) { func(i, args...); } return 0; } void WaitStream() const {} static void WaitDevice() {} }; // End class Launcher<BackendType::CPU> } // End namespace copcore #endif // COPCORE_LAUNCHER_H_
IonisationBox.c
// Re-write of find_HII_bubbles.c for being accessible within the MCMC int INIT_ERFC_INTERPOLATION = 1; int INIT_RECOMBINATIONS = 1; double *ERFC_VALS, *ERFC_VALS_DIFF; float absolute_delta_z; float overdense_small_min, overdense_small_bin_width, overdense_small_bin_width_inv; float overdense_large_min, overdense_large_bin_width, overdense_large_bin_width_inv; float prev_overdense_small_min, prev_overdense_small_bin_width, prev_overdense_small_bin_width_inv; float prev_overdense_large_min, prev_overdense_large_bin_width, prev_overdense_large_bin_width_inv; float log10Mturn_min, log10Mturn_max, log10Mturn_bin_width, log10Mturn_bin_width_inv; float log10Mturn_min_MINI, log10Mturn_max_MINI, log10Mturn_bin_width_MINI, log10Mturn_bin_width_inv_MINI; int EvaluateSplineTable(bool MINI_HALOS, int dens_type, float curr_dens, float filtered_Mturn, float filtered_Mturn_MINI, float *Splined_Fcoll, float *Splined_Fcoll_MINI); void InterpolationRange(int dens_type, float R, float L, float *min_density, float *max_density); int ComputeIonizedBox(float redshift, float prev_redshift, struct UserParams *user_params, struct CosmoParams *cosmo_params, struct AstroParams *astro_params, struct FlagOptions *flag_options, struct PerturbedField *perturbed_field, struct PerturbedField *previous_perturbed_field, struct IonizedBox *previous_ionize_box, struct TsBox *spin_temp, struct PerturbHaloField *halos, struct IonizedBox *box) { int status; Try{ // This Try brackets the whole function, so we don't indent. LOG_DEBUG("input values:"); LOG_DEBUG("redshift=%f, prev_redshift=%f", redshift, prev_redshift); #if LOG_LEVEL >= DEBUG_LEVEL writeUserParams(user_params); writeCosmoParams(cosmo_params); writeAstroParams(flag_options, astro_params); writeFlagOptions(flag_options); #endif // Makes the parameter structs visible to a variety of functions/macros // Do each time to avoid Python garbage collection issues Broadcast_struct_global_PS(user_params,cosmo_params); Broadcast_struct_global_UF(user_params,cosmo_params); omp_set_num_threads(user_params->N_THREADS); // Other parameters used in the code int i,j,k,x,y,z, LAST_FILTER_STEP, first_step_R, short_completely_ionised,i_halo; int counter, N_halos_in_cell; unsigned long long ct; float growth_factor, pixel_mass, cell_length_factor, M_MIN, prev_growth_factor; float erfc_denom, erfc_denom_cell, res_xH, Splined_Fcoll, xHII_from_xrays, curr_dens, massofscaleR, ION_EFF_FACTOR, growth_factor_dz; float Splined_Fcoll_MINI, prev_dens, ION_EFF_FACTOR_MINI, prev_Splined_Fcoll, prev_Splined_Fcoll_MINI; float ave_M_coll_cell, ave_N_min_cell, pixel_volume, density_over_mean; double global_xH, ST_over_PS, f_coll, R, stored_R, f_coll_min; double ST_over_PS_MINI, f_coll_MINI, f_coll_min_MINI; double t_ast, Gamma_R_prefactor, rec, dNrec, sigmaMmax; double Gamma_R_prefactor_MINI; float fabs_dtdz, ZSTEP, z_eff; const float dz = 0.01; float dens_val, prev_dens_val; int overdense_int,status_int; int something_finite_or_infinite = 0; int log10_Mturnover_MINI_int, log10_Mturnover_int; int *overdense_int_boundexceeded_threaded = calloc(user_params->N_THREADS,sizeof(int)); if(user_params->USE_INTERPOLATION_TABLES) { overdense_large_min = global_params.CRIT_DENS_TRANSITION*0.999; overdense_large_bin_width = 1./((double)NSFR_high-1.)*(Deltac-overdense_large_min); overdense_large_bin_width_inv = 1./overdense_large_bin_width; prev_overdense_large_min = global_params.CRIT_DENS_TRANSITION*0.999; prev_overdense_large_bin_width = 1./((double)NSFR_high-1.)*(Deltac-prev_overdense_large_min); prev_overdense_large_bin_width_inv = 1./prev_overdense_large_bin_width; } double ave_log10_Mturnover, ave_log10_Mturnover_MINI; float Mlim_Fstar, Mlim_Fesc; float Mlim_Fstar_MINI, Mlim_Fesc_MINI; float Mcrit_atom, log10_Mcrit_atom, log10_Mcrit_mol; fftwf_complex *log10_Mturnover_unfiltered=NULL, *log10_Mturnover_filtered=NULL; fftwf_complex *log10_Mturnover_MINI_unfiltered=NULL, *log10_Mturnover_MINI_filtered=NULL; float log10_Mturnover, log10_Mturnover_MINI, Mcrit_LW, Mcrit_RE, Mturnover, Mturnover_MINI; float min_density, max_density; float prev_min_density, prev_max_density; float stored_redshift, adjustment_factor; gsl_rng * r[user_params->N_THREADS]; LOG_SUPER_DEBUG("initing heat"); init_heat(); float TK; TK = T_RECFAST(redshift,0); LOG_SUPER_DEBUG("inited heat"); init_ps(); LOG_SUPER_DEBUG("defined parameters"); pixel_volume = pow(user_params->BOX_LEN/((float)(user_params->HII_DIM)), 3); if(flag_options->USE_MASS_DEPENDENT_ZETA) { ION_EFF_FACTOR = global_params.Pop2_ion * astro_params->F_STAR10 * astro_params->F_ESC10; ION_EFF_FACTOR_MINI = global_params.Pop3_ion * astro_params->F_STAR7_MINI * astro_params->F_ESC7_MINI; } else { ION_EFF_FACTOR = astro_params->HII_EFF_FACTOR; ION_EFF_FACTOR_MINI = 0.; } // For recombinations if(flag_options->INHOMO_RECO) { if(INIT_RECOMBINATIONS) { init_MHR(); INIT_RECOMBINATIONS=0; } if (prev_redshift < 1) //deal with first redshift ZSTEP = (1. + redshift) * (global_params.ZPRIME_STEP_FACTOR - 1.); else ZSTEP = prev_redshift - redshift; #pragma omp parallel shared(box) private(ct) num_threads(user_params->N_THREADS) { #pragma omp for for (ct=0; ct<HII_TOT_NUM_PIXELS; ct++) { box->Gamma12_box[ct] = 0.0; box->MFP_box[ct] = 0.0; } } } else { ZSTEP = 0.2; } #pragma omp parallel shared(box) private(ct) num_threads(user_params->N_THREADS) { #pragma omp for for (ct=0; ct<HII_TOT_NUM_PIXELS; ct++) { box->z_re_box[ct] = -1.0; } } fabs_dtdz = fabs(dtdz(redshift))/1e15; //reduce to have good precision t_ast = astro_params->t_STAR * t_hubble(redshift); growth_factor_dz = dicke(redshift-dz); // Modify the current sampled redshift to a redshift which matches the expected filling factor given our astrophysical parameterisation. // This is the photon non-conservation correction if(flag_options->PHOTON_CONS) { adjust_redshifts_for_photoncons(astro_params,flag_options,&redshift,&stored_redshift,&absolute_delta_z); LOG_DEBUG("PhotonCons data:"); LOG_DEBUG("original redshift=%f, updated redshift=%f delta-z = %f", stored_redshift, redshift, absolute_delta_z); if(isfinite(redshift)==0 || isfinite(absolute_delta_z)==0) { LOG_ERROR("Updated photon non-conservation redshift is either infinite or NaN!"); Throw(ParameterError); } } Splined_Fcoll = 0.; Splined_Fcoll_MINI = 0.; double ArgBinWidth, InvArgBinWidth, erfc_arg_val, erfc_arg_min, erfc_arg_max; int erfc_arg_val_index, ERFC_NUM_POINTS; erfc_arg_val = 0.; erfc_arg_val_index = 0; // Setup an interpolation table for the error function, helpful for calcluating the collapsed fraction // (only for the default model, i.e. mass-independent ionising efficiency) erfc_arg_min = -15.0; erfc_arg_max = 15.0; ERFC_NUM_POINTS = 10000; ArgBinWidth = (erfc_arg_max - erfc_arg_min)/((double)ERFC_NUM_POINTS - 1.); InvArgBinWidth = 1./ArgBinWidth; if(!flag_options->USE_MASS_DEPENDENT_ZETA && INIT_ERFC_INTERPOLATION) { ERFC_VALS = calloc(ERFC_NUM_POINTS,sizeof(double)); ERFC_VALS_DIFF = calloc(ERFC_NUM_POINTS,sizeof(double)); #pragma omp parallel shared(ERFC_VALS,erfc_arg_min,ArgBinWidth) private(i,erfc_arg_val) num_threads(user_params->N_THREADS) { #pragma omp for for(i=0;i<ERFC_NUM_POINTS;i++) { erfc_arg_val = erfc_arg_min + ArgBinWidth*(double)i; ERFC_VALS[i] = splined_erfc(erfc_arg_val); } } #pragma omp parallel shared(ERFC_VALS_DIFF,ERFC_VALS) private(i) num_threads(user_params->N_THREADS) { #pragma omp for for(i=0;i<(ERFC_NUM_POINTS-1);i++) { ERFC_VALS_DIFF[i] = ERFC_VALS[i+1] - ERFC_VALS[i]; } } INIT_ERFC_INTERPOLATION = 0; } LOG_SUPER_DEBUG("erfc interpolation done"); ///////////////////////////////// BEGIN INITIALIZATION ////////////////////////////////// // perform a very rudimentary check to see if we are underresolved and not using the linear approx if ((user_params->BOX_LEN > user_params->DIM) && !(global_params.EVOLVE_DENSITY_LINEARLY)){ LOG_WARNING("Resolution is likely too low for accurate evolved density fields\n It Is recommended \ that you either increase the resolution (DIM/Box_LEN) or set the EVOLVE_DENSITY_LINEARLY flag to 1\n"); } // initialize power spectrum growth_factor = dicke(redshift); prev_growth_factor = dicke(prev_redshift); fftwf_complex *deltax_unfiltered, *deltax_unfiltered_original, *deltax_filtered; fftwf_complex *xe_unfiltered, *xe_filtered, *N_rec_unfiltered, *N_rec_filtered; fftwf_complex *prev_deltax_unfiltered, *prev_deltax_filtered; fftwf_complex *M_coll_unfiltered,*M_coll_filtered; deltax_unfiltered = (fftwf_complex *) fftwf_malloc(sizeof(fftwf_complex)*HII_KSPACE_NUM_PIXELS); deltax_unfiltered_original = (fftwf_complex *) fftwf_malloc(sizeof(fftwf_complex)*HII_KSPACE_NUM_PIXELS); deltax_filtered = (fftwf_complex *) fftwf_malloc(sizeof(fftwf_complex)*HII_KSPACE_NUM_PIXELS); if (flag_options->USE_MINI_HALOS){ prev_deltax_unfiltered = (fftwf_complex *) fftwf_malloc(sizeof(fftwf_complex)*HII_KSPACE_NUM_PIXELS); prev_deltax_filtered = (fftwf_complex *) fftwf_malloc(sizeof(fftwf_complex)*HII_KSPACE_NUM_PIXELS); } if(flag_options->USE_TS_FLUCT) { xe_unfiltered = (fftwf_complex *) fftwf_malloc(sizeof(fftwf_complex)*HII_KSPACE_NUM_PIXELS); xe_filtered = (fftwf_complex *) fftwf_malloc(sizeof(fftwf_complex)*HII_KSPACE_NUM_PIXELS); } if (flag_options->INHOMO_RECO){ N_rec_unfiltered = (fftwf_complex *) fftwf_malloc(sizeof(fftwf_complex)*HII_KSPACE_NUM_PIXELS); // cumulative number of recombinations N_rec_filtered = (fftwf_complex *) fftwf_malloc(sizeof(fftwf_complex)*HII_KSPACE_NUM_PIXELS); } if(flag_options->USE_MASS_DEPENDENT_ZETA) { xi_SFR = calloc(NGL_SFR+1,sizeof(float)); wi_SFR = calloc(NGL_SFR+1,sizeof(float)); if(user_params->USE_INTERPOLATION_TABLES) { log10_overdense_spline_SFR = calloc(NSFR_low,sizeof(double)); Overdense_spline_SFR = calloc(NSFR_high,sizeof(float)); log10_Nion_spline = calloc(NSFR_low,sizeof(float)); Nion_spline = calloc(NSFR_high,sizeof(float)); if (flag_options->USE_MINI_HALOS){ prev_log10_overdense_spline_SFR = calloc(NSFR_low,sizeof(double)); prev_Overdense_spline_SFR = calloc(NSFR_high,sizeof(float)); log10_Nion_spline = calloc(NSFR_low*NMTURN,sizeof(float)); Nion_spline = calloc(NSFR_high*NMTURN,sizeof(float)); log10_Nion_spline_MINI = calloc(NSFR_low*NMTURN,sizeof(float)); Nion_spline_MINI = calloc(NSFR_high*NMTURN,sizeof(float)); prev_log10_Nion_spline = calloc(NSFR_low*NMTURN,sizeof(float)); prev_Nion_spline = calloc(NSFR_high*NMTURN,sizeof(float)); prev_log10_Nion_spline_MINI = calloc(NSFR_low*NMTURN,sizeof(float)); prev_Nion_spline_MINI = calloc(NSFR_high*NMTURN,sizeof(float)); } } if (flag_options->USE_MINI_HALOS){ Mturns = calloc(NMTURN,sizeof(float)); Mturns_MINI = calloc(NMTURN,sizeof(float)); } } // Calculate the density field for this redshift if the initial conditions/cosmology are changing if(flag_options->PHOTON_CONS) { adjustment_factor = dicke(redshift)/dicke(stored_redshift); } else { adjustment_factor = 1.; } #pragma omp parallel shared(deltax_unfiltered,perturbed_field,adjustment_factor) private(i,j,k) num_threads(user_params->N_THREADS) { #pragma omp for for (i=0; i<user_params->HII_DIM; i++){ for (j=0; j<user_params->HII_DIM; j++){ for (k=0; k<user_params->HII_DIM; k++){ *((float *)deltax_unfiltered + HII_R_FFT_INDEX(i,j,k)) = (perturbed_field->density[HII_R_INDEX(i,j,k)])*adjustment_factor; } } } } LOG_SUPER_DEBUG("density field calculated"); // keep the unfiltered density field in an array, to save it for later memcpy(deltax_unfiltered_original, deltax_unfiltered, sizeof(fftwf_complex)*HII_KSPACE_NUM_PIXELS); i=0; // Newer setup to be performed in parallel int thread_num; for(thread_num = 0; thread_num < user_params->N_THREADS; thread_num++){ // Original defaults with gsl_rng_mt19937 and SEED = 0, thus start with this and iterate for all other threads by their thread number r[thread_num] = gsl_rng_alloc(gsl_rng_mt19937); gsl_rng_set(r[thread_num], thread_num); } pixel_mass = RtoM(L_FACTOR*user_params->BOX_LEN/(float)(user_params->HII_DIM)); cell_length_factor = L_FACTOR; if(flag_options->USE_HALO_FIELD && (global_params.FIND_BUBBLE_ALGORITHM == 2) && ((user_params->BOX_LEN/(float)(user_params->HII_DIM) < 1))) { cell_length_factor = 1.; } if (prev_redshift < 1){ LOG_DEBUG("first redshift, do some initialization"); previous_ionize_box->z_re_box = (float *) calloc(HII_TOT_NUM_PIXELS, sizeof(float)); #pragma omp parallel shared(previous_ionize_box) private(i,j,k) num_threads(user_params->N_THREADS) { #pragma omp for for (i=0; i<user_params->HII_DIM; i++){ for (j=0; j<user_params->HII_DIM; j++){ for (k=0; k<user_params->HII_DIM; k++){ previous_ionize_box->z_re_box[HII_R_INDEX(i, j, k)] = -1.0; } } } } if (flag_options->INHOMO_RECO) previous_ionize_box->dNrec_box = (float *) calloc(HII_TOT_NUM_PIXELS, sizeof(float)); } //set the minimum source mass if (flag_options->USE_MASS_DEPENDENT_ZETA) { if (flag_options->USE_MINI_HALOS){ ave_log10_Mturnover = 0.; ave_log10_Mturnover_MINI = 0.; // this is the first z, and the previous_ionize_box are empty if (prev_redshift < 1){ previous_ionize_box->Gamma12_box = (float *) calloc(HII_TOT_NUM_PIXELS, sizeof(float)); // really painful to get the length... counter = 1; R=fmax(global_params.R_BUBBLE_MIN, (cell_length_factor*user_params->BOX_LEN/(float)user_params->HII_DIM)); while ((R - fmin(astro_params->R_BUBBLE_MAX, L_FACTOR*user_params->BOX_LEN)) <= FRACT_FLOAT_ERR ){ if(R >= fmin(astro_params->R_BUBBLE_MAX, L_FACTOR*user_params->BOX_LEN)) { stored_R = R/(global_params.DELTA_R_HII_FACTOR); } R*= global_params.DELTA_R_HII_FACTOR; counter += 1; } previous_ionize_box->Fcoll = (float *) calloc(HII_TOT_NUM_PIXELS*counter, sizeof(float)); previous_ionize_box->Fcoll_MINI = (float *) calloc(HII_TOT_NUM_PIXELS*counter, sizeof(float)); previous_ionize_box->mean_f_coll = 0.0; previous_ionize_box->mean_f_coll_MINI = 0.0; #pragma omp parallel shared(prev_deltax_unfiltered) private(i,j,k) num_threads(user_params->N_THREADS) { #pragma omp for for (i=0; i<user_params->HII_DIM; i++){ for (j=0; j<user_params->HII_DIM; j++){ for (k=0; k<user_params->HII_DIM; k++){ *((float *)prev_deltax_unfiltered + HII_R_FFT_INDEX(i,j,k)) = -1.5; } } } } } else{ #pragma omp parallel shared(prev_deltax_unfiltered,previous_perturbed_field) private(i,j,k) num_threads(user_params->N_THREADS) { #pragma omp for for (i=0; i<user_params->HII_DIM; i++){ for (j=0; j<user_params->HII_DIM; j++){ for (k=0; k<user_params->HII_DIM; k++){ *((float *)prev_deltax_unfiltered + HII_R_FFT_INDEX(i,j,k)) = previous_perturbed_field->density[HII_R_INDEX(i,j,k)]; } } } } } LOG_SUPER_DEBUG("previous density field calculated"); // fields added for minihalos Mcrit_atom = atomic_cooling_threshold(redshift); log10_Mcrit_atom = log10(Mcrit_atom); log10_Mcrit_mol = log10(lyman_werner_threshold(redshift, 0.)); log10_Mturnover_unfiltered = (fftwf_complex *) fftwf_malloc(sizeof(fftwf_complex)*HII_KSPACE_NUM_PIXELS); log10_Mturnover_filtered = (fftwf_complex *) fftwf_malloc(sizeof(fftwf_complex)*HII_KSPACE_NUM_PIXELS); log10_Mturnover_MINI_unfiltered = (fftwf_complex *) fftwf_malloc(sizeof(fftwf_complex)*HII_KSPACE_NUM_PIXELS); log10_Mturnover_MINI_filtered = (fftwf_complex *) fftwf_malloc(sizeof(fftwf_complex)*HII_KSPACE_NUM_PIXELS); if (!log10_Mturnover_unfiltered || !log10_Mturnover_filtered || !log10_Mturnover_MINI_unfiltered || !log10_Mturnover_MINI_filtered){// || !Mcrit_RE_grid || !Mcrit_LW_grid) LOG_ERROR("Error allocating memory for Mturnover or Mturnover_MINI boxes"); Throw(MemoryAllocError); } LOG_SUPER_DEBUG("Calculating and outputting Mcrit boxes for atomic and molecular halos..."); #pragma omp parallel shared(redshift,previous_ionize_box,spin_temp,Mcrit_atom,log10_Mturnover_unfiltered,log10_Mturnover_MINI_unfiltered)\ private(x,y,z,Mcrit_RE,Mcrit_LW,Mturnover,Mturnover_MINI,log10_Mturnover,log10_Mturnover_MINI) num_threads(user_params->N_THREADS) { #pragma omp for reduction(+:ave_log10_Mturnover,ave_log10_Mturnover_MINI) for (x=0; x<user_params->HII_DIM; x++){ for (y=0; y<user_params->HII_DIM; y++){ for (z=0; z<user_params->HII_DIM; z++){ Mcrit_RE = reionization_feedback(redshift, previous_ionize_box->Gamma12_box[HII_R_INDEX(x, y, z)], previous_ionize_box->z_re_box[HII_R_INDEX(x, y, z)]); Mcrit_LW = lyman_werner_threshold(redshift, spin_temp->J_21_LW_box[HII_R_INDEX(x, y, z)]); //*((float *)Mcrit_RE_grid + HII_R_FFT_INDEX(x,y,z)) = Mcrit_RE; //*((float *)Mcrit_LW_grid + HII_R_FFT_INDEX(x,y,z)) = Mcrit_LW; Mturnover = Mcrit_RE > Mcrit_atom ? Mcrit_RE : Mcrit_atom; Mturnover_MINI = Mcrit_RE > Mcrit_LW ? Mcrit_RE : Mcrit_LW; log10_Mturnover = log10(Mturnover); log10_Mturnover_MINI = log10(Mturnover_MINI); *((float *)log10_Mturnover_unfiltered + HII_R_FFT_INDEX(x,y,z)) = log10_Mturnover; *((float *)log10_Mturnover_MINI_unfiltered + HII_R_FFT_INDEX(x,y,z)) = log10_Mturnover_MINI; ave_log10_Mturnover += log10_Mturnover; ave_log10_Mturnover_MINI += log10_Mturnover_MINI; } } } } box->log10_Mturnover_ave = ave_log10_Mturnover/(double) HII_TOT_NUM_PIXELS; box->log10_Mturnover_MINI_ave = ave_log10_Mturnover_MINI/(double) HII_TOT_NUM_PIXELS; Mturnover = pow(10., box->log10_Mturnover_ave); Mturnover_MINI = pow(10., box->log10_Mturnover_MINI_ave); M_MIN = global_params.M_MIN_INTEGRAL; Mlim_Fstar_MINI = Mass_limit_bisection(M_MIN, 1e16, astro_params->ALPHA_STAR, astro_params->F_STAR7_MINI * pow(1e3,astro_params->ALPHA_STAR)); Mlim_Fesc_MINI = Mass_limit_bisection(M_MIN, 1e16, astro_params->ALPHA_ESC, astro_params->F_ESC7_MINI * pow(1e3, astro_params->ALPHA_ESC)); LOG_SUPER_DEBUG("average turnover masses are %.2f and %.2f for ACGs and MCGs", box->log10_Mturnover_ave, box->log10_Mturnover_MINI_ave); } else{ M_MIN = astro_params->M_TURN/50.; Mturnover = astro_params->M_TURN; box->log10_Mturnover_ave = log10(Mturnover); box->log10_Mturnover_MINI_ave = log10(Mturnover); } Mlim_Fstar = Mass_limit_bisection(M_MIN, 1e16, astro_params->ALPHA_STAR, astro_params->F_STAR10); Mlim_Fesc = Mass_limit_bisection(M_MIN, 1e16, astro_params->ALPHA_ESC, astro_params->F_ESC10); } else { //set the minimum source mass if (astro_params->ION_Tvir_MIN < 9.99999e3) { // neutral IGM M_MIN = (float)TtoM(redshift, astro_params->ION_Tvir_MIN, 1.22); } else { // ionized IGM M_MIN = (float)TtoM(redshift, astro_params->ION_Tvir_MIN, 0.6); } } LOG_SUPER_DEBUG("minimum source mass has been set: %f", M_MIN); if(user_params->USE_INTERPOLATION_TABLES) { if(user_params->FAST_FCOLL_TABLES){ initialiseSigmaMInterpTable(fmin(MMIN_FAST,M_MIN),1e20); } else{ if(!flag_options->USE_TS_FLUCT) { initialiseSigmaMInterpTable(M_MIN,1e20); } else if(flag_options->USE_MINI_HALOS){ initialiseSigmaMInterpTable(global_params.M_MIN_INTEGRAL/50.,1e20); } } } LOG_SUPER_DEBUG("sigma table has been initialised"); // check for WDM if (global_params.P_CUTOFF && ( M_MIN < M_J_WDM())){ LOG_WARNING("The default Jeans mass of %e Msun is smaller than the scale supressed by the effective pressure of WDM.", M_MIN); M_MIN = M_J_WDM(); LOG_WARNING("Setting a new effective Jeans mass from WDM pressure supression of %e Msun", M_MIN); } // ARE WE USING A DISCRETE HALO FIELD (identified in the ICs with FindHaloes.c and evolved with PerturbHaloField.c) if(flag_options->USE_HALO_FIELD) { M_coll_unfiltered = (fftwf_complex *) fftwf_malloc(sizeof(fftwf_complex)*HII_KSPACE_NUM_PIXELS); M_coll_filtered = (fftwf_complex *) fftwf_malloc(sizeof(fftwf_complex)*HII_KSPACE_NUM_PIXELS); #pragma omp parallel shared(M_coll_unfiltered) private(ct) num_threads(user_params->N_THREADS) { #pragma omp for for (ct=0; ct<HII_TOT_FFT_NUM_PIXELS; ct++){ *((float *)M_coll_unfiltered + ct) = 0; } } #pragma omp parallel shared(M_coll_unfiltered,halos) \ private(i_halo,x,y,z) num_threads(user_params->N_THREADS) { #pragma omp for for (i_halo=0; i_halo<halos->n_halos; i_halo++){ x = halos->halo_coords[0+3*i_halo]; y = halos->halo_coords[1+3*i_halo]; z = halos->halo_coords[2+3*i_halo]; #pragma omp atomic *((float *)M_coll_unfiltered + HII_R_FFT_INDEX(x, y, z)) += halos->halo_masses[i_halo]; } } } // end of the USE_HALO_FIELD option // lets check if we are going to bother with computing the inhmogeneous field at all... global_xH = 0.0; // Determine the normalisation for the excursion set algorithm if (flag_options->USE_MASS_DEPENDENT_ZETA) { if (flag_options->USE_MINI_HALOS){ if (previous_ionize_box->mean_f_coll * ION_EFF_FACTOR < 1e-4){ box->mean_f_coll = Nion_General(redshift,M_MIN,Mturnover,astro_params->ALPHA_STAR,astro_params->ALPHA_ESC, astro_params->F_STAR10,astro_params->F_ESC10,Mlim_Fstar,Mlim_Fesc); } else{ box->mean_f_coll = previous_ionize_box->mean_f_coll + \ Nion_General(redshift,M_MIN,Mturnover,astro_params->ALPHA_STAR,astro_params->ALPHA_ESC, astro_params->F_STAR10,astro_params->F_ESC10,Mlim_Fstar,Mlim_Fesc) - \ Nion_General(prev_redshift,M_MIN,Mturnover,astro_params->ALPHA_STAR,astro_params->ALPHA_ESC, astro_params->F_STAR10,astro_params->F_ESC10,Mlim_Fstar,Mlim_Fesc); } if (previous_ionize_box->mean_f_coll_MINI * ION_EFF_FACTOR_MINI < 1e-4){ box->mean_f_coll_MINI = Nion_General_MINI(redshift,M_MIN,Mturnover_MINI,Mcrit_atom, astro_params->ALPHA_STAR,astro_params->ALPHA_ESC,astro_params->F_STAR7_MINI, astro_params->F_ESC7_MINI,Mlim_Fstar_MINI,Mlim_Fesc_MINI); } else{ box->mean_f_coll_MINI = previous_ionize_box->mean_f_coll_MINI + \ Nion_General_MINI(redshift,M_MIN,Mturnover_MINI,Mcrit_atom,astro_params->ALPHA_STAR, astro_params->ALPHA_ESC,astro_params->F_STAR7_MINI,astro_params->F_ESC7_MINI ,Mlim_Fstar_MINI,Mlim_Fesc_MINI) - \ Nion_General_MINI(prev_redshift,M_MIN,Mturnover_MINI,Mcrit_atom,astro_params->ALPHA_STAR, astro_params->ALPHA_ESC,astro_params->F_STAR7_MINI,astro_params->F_ESC7_MINI, Mlim_Fstar_MINI,Mlim_Fesc_MINI); } f_coll_min = Nion_General(global_params.Z_HEAT_MAX,M_MIN,Mturnover,astro_params->ALPHA_STAR, astro_params->ALPHA_ESC,astro_params->F_STAR10,astro_params->F_ESC10,Mlim_Fstar,Mlim_Fesc); f_coll_min_MINI = Nion_General_MINI(global_params.Z_HEAT_MAX,M_MIN,Mturnover_MINI,Mcrit_atom, astro_params->ALPHA_STAR,astro_params->ALPHA_ESC,astro_params->F_STAR7_MINI, astro_params->F_ESC7_MINI,Mlim_Fstar_MINI,Mlim_Fesc_MINI); } else{ box->mean_f_coll = Nion_General(redshift,M_MIN,Mturnover,astro_params->ALPHA_STAR,astro_params->ALPHA_ESC, astro_params->F_STAR10,astro_params->F_ESC10,Mlim_Fstar,Mlim_Fesc); box->mean_f_coll_MINI = 0.; f_coll_min = Nion_General(global_params.Z_HEAT_MAX,M_MIN,Mturnover,astro_params->ALPHA_STAR,astro_params->ALPHA_ESC, astro_params->F_STAR10,astro_params->F_ESC10,Mlim_Fstar,Mlim_Fesc); } } else { box->mean_f_coll = FgtrM_General(redshift, M_MIN); } if(isfinite(box->mean_f_coll)==0) { LOG_ERROR("Mean collapse fraction is either infinite or NaN!"); Throw(ParameterError); } LOG_SUPER_DEBUG("excursion set normalisation, mean_f_coll: %e", box->mean_f_coll); if (flag_options->USE_MINI_HALOS){ if(isfinite(box->mean_f_coll_MINI)==0) { LOG_ERROR("Mean collapse fraction of MINI is either infinite or NaN!"); Throw(ParameterError); } LOG_SUPER_DEBUG("excursion set normalisation, mean_f_coll_MINI: %e", box->mean_f_coll_MINI); } if (box->mean_f_coll * ION_EFF_FACTOR + box->mean_f_coll_MINI * ION_EFF_FACTOR_MINI< global_params.HII_ROUND_ERR){ // way too small to ionize anything... // printf( "The mean collapse fraction is %e, which is much smaller than the effective critical collapse fraction of %e\n I will just declare everything to be neutral\n", mean_f_coll, f_coll_crit); // find the neutral fraction if(flag_options->USE_TS_FLUCT) { #pragma omp parallel shared(box,spin_temp) private(ct) num_threads(user_params->N_THREADS) { #pragma omp for reduction(+:global_xH) for (ct=0; ct<HII_TOT_NUM_PIXELS; ct++){ box->xH_box[ct] = 1.-spin_temp->x_e_box[ct]; // convert from x_e to xH global_xH += box->xH_box[ct]; box->temp_kinetic_all_gas[ct] = spin_temp->Tk_box[ct]; } } global_xH /= (double)HII_TOT_NUM_PIXELS; } else { global_xH = 1. - xion_RECFAST(redshift, 0); #pragma omp parallel shared(box,global_xH,TK) private(ct) num_threads(user_params->N_THREADS) { #pragma omp for for (ct=0; ct<HII_TOT_NUM_PIXELS; ct++){ box->xH_box[ct] = global_xH; box->temp_kinetic_all_gas[ct] = TK; } } } } else { // Take the ionisation fraction from the X-ray ionisations from Ts.c (only if the calculate spin temperature flag is set) if (flag_options->USE_TS_FLUCT) { #pragma omp parallel shared(xe_unfiltered, spin_temp) private(i, j, k) num_threads(user_params->N_THREADS) { #pragma omp for for (i = 0; i < user_params->HII_DIM; i++) { for (j = 0; j < user_params->HII_DIM; j++) { for (k = 0; k < user_params->HII_DIM; k++) { *((float *) xe_unfiltered + HII_R_FFT_INDEX(i, j, k)) = spin_temp->x_e_box[HII_R_INDEX(i, j, k)]; } } } } } LOG_SUPER_DEBUG("calculated ionization fraction"); if (flag_options->INHOMO_RECO) { #pragma omp parallel shared(N_rec_unfiltered, previous_ionize_box) private(i, j, k) num_threads(user_params->N_THREADS) { #pragma omp for for (i = 0; i < user_params->HII_DIM; i++) { for (j = 0; j < user_params->HII_DIM; j++) { for (k = 0; k < user_params->HII_DIM; k++) { *((float *) N_rec_unfiltered + HII_R_FFT_INDEX(i, j, k)) = previous_ionize_box->dNrec_box[HII_R_INDEX(i, j, k)]; } } } } } dft_r2c_cube(user_params->USE_FFTW_WISDOM, user_params->HII_DIM, user_params->N_THREADS, deltax_unfiltered); LOG_SUPER_DEBUG("FFTs performed"); if(flag_options->USE_MINI_HALOS){ dft_r2c_cube(user_params->USE_FFTW_WISDOM, user_params->HII_DIM, user_params->N_THREADS, prev_deltax_unfiltered); dft_r2c_cube(user_params->USE_FFTW_WISDOM, user_params->HII_DIM, user_params->N_THREADS, log10_Mturnover_MINI_unfiltered); dft_r2c_cube(user_params->USE_FFTW_WISDOM, user_params->HII_DIM, user_params->N_THREADS, log10_Mturnover_unfiltered); LOG_SUPER_DEBUG("MINI HALO ffts performed"); } if (flag_options->USE_HALO_FIELD){ dft_r2c_cube(user_params->USE_FFTW_WISDOM, user_params->HII_DIM, user_params->N_THREADS, M_coll_unfiltered); LOG_SUPER_DEBUG("HALO_FIELD ffts performed"); } if(flag_options->USE_TS_FLUCT) { dft_r2c_cube(user_params->USE_FFTW_WISDOM, user_params->HII_DIM, user_params->N_THREADS, xe_unfiltered); LOG_SUPER_DEBUG("Ts ffts performed"); } if (flag_options->INHOMO_RECO) { dft_r2c_cube(user_params->USE_FFTW_WISDOM, user_params->HII_DIM, user_params->N_THREADS, N_rec_unfiltered); } // remember to add the factor of VOLUME/TOT_NUM_PIXELS when converting from // real space to k-space // Note: we will leave off factor of VOLUME, in anticipation of the inverse FFT below #pragma omp parallel shared(deltax_unfiltered,xe_unfiltered,N_rec_unfiltered,prev_deltax_unfiltered,\ log10_Mturnover_unfiltered,log10_Mturnover_MINI_unfiltered,M_coll_unfiltered) \ private(ct) num_threads(user_params->N_THREADS) { #pragma omp for for (ct=0; ct<HII_KSPACE_NUM_PIXELS; ct++){ deltax_unfiltered[ct] /= (HII_TOT_NUM_PIXELS+0.0); if(flag_options->USE_TS_FLUCT) { xe_unfiltered[ct] /= (double)HII_TOT_NUM_PIXELS; } if (flag_options->INHOMO_RECO){ N_rec_unfiltered[ct] /= (double)HII_TOT_NUM_PIXELS; } if(flag_options->USE_HALO_FIELD) { M_coll_unfiltered[ct] /= (double)HII_TOT_NUM_PIXELS; } if(flag_options->USE_MINI_HALOS){ prev_deltax_unfiltered[ct] /= (HII_TOT_NUM_PIXELS+0.0); log10_Mturnover_unfiltered[ct] /= (HII_TOT_NUM_PIXELS+0.0); log10_Mturnover_MINI_unfiltered[ct] /= (HII_TOT_NUM_PIXELS+0.0); } } } LOG_SUPER_DEBUG("deltax unfiltered calculated"); // ************************************************************************************* // // ***************** LOOP THROUGH THE FILTER RADII (in Mpc) *************************** // // ************************************************************************************* // // set the max radius we will use, making sure we are always sampling the same values of radius // (this avoids aliasing differences w redshift) short_completely_ionised = 0; // loop through the filter radii (in Mpc) erfc_denom_cell = 1; //dummy value R=fmax(global_params.R_BUBBLE_MIN, (cell_length_factor*user_params->BOX_LEN/(float)user_params->HII_DIM)); while ((R - fmin(astro_params->R_BUBBLE_MAX, L_FACTOR * user_params->BOX_LEN)) <= FRACT_FLOAT_ERR) { R *= global_params.DELTA_R_HII_FACTOR; if (R >= fmin(astro_params->R_BUBBLE_MAX, L_FACTOR * user_params->BOX_LEN)) { stored_R = R / (global_params.DELTA_R_HII_FACTOR); } } LOG_DEBUG("set max radius: %f", R); R=fmin(astro_params->R_BUBBLE_MAX, L_FACTOR*user_params->BOX_LEN); LAST_FILTER_STEP = 0; first_step_R = 1; double R_temp = (double) (astro_params->R_BUBBLE_MAX); counter = 0; while (!LAST_FILTER_STEP && (M_MIN < RtoM(R)) ){ LOG_ULTRA_DEBUG("while loop for until RtoM(R)=%f reaches M_MIN=%f", RtoM(R), M_MIN); // Check if we are the last filter step if ( ((R/(global_params.DELTA_R_HII_FACTOR) - cell_length_factor*(user_params->BOX_LEN)/(float)(user_params->HII_DIM)) <= FRACT_FLOAT_ERR) || \ ((R/(global_params.DELTA_R_HII_FACTOR) - global_params.R_BUBBLE_MIN) <= FRACT_FLOAT_ERR) ) { LAST_FILTER_STEP = 1; R = fmax(cell_length_factor*user_params->BOX_LEN/(double)(user_params->HII_DIM), global_params.R_BUBBLE_MIN); } // Copy all relevant quantities from memory into new arrays to be smoothed and FFT'd. if (flag_options->USE_TS_FLUCT) { memcpy(xe_filtered, xe_unfiltered, sizeof(fftwf_complex) * HII_KSPACE_NUM_PIXELS); } if (flag_options->INHOMO_RECO) { memcpy(N_rec_filtered, N_rec_unfiltered, sizeof(fftwf_complex) * HII_KSPACE_NUM_PIXELS); } if (flag_options->USE_HALO_FIELD) { memcpy(M_coll_filtered, M_coll_unfiltered, sizeof(fftwf_complex) * HII_KSPACE_NUM_PIXELS); } memcpy(deltax_filtered, deltax_unfiltered, sizeof(fftwf_complex)*HII_KSPACE_NUM_PIXELS); if(flag_options->USE_MINI_HALOS){ memcpy(prev_deltax_filtered, prev_deltax_unfiltered, sizeof(fftwf_complex)*HII_KSPACE_NUM_PIXELS); memcpy(log10_Mturnover_MINI_filtered, log10_Mturnover_MINI_unfiltered, sizeof(fftwf_complex)*HII_KSPACE_NUM_PIXELS); memcpy(log10_Mturnover_filtered, log10_Mturnover_unfiltered, sizeof(fftwf_complex)*HII_KSPACE_NUM_PIXELS); } if (!LAST_FILTER_STEP || ((R - cell_length_factor * (user_params->BOX_LEN / (double) (user_params->HII_DIM))) > FRACT_FLOAT_ERR)) { if (flag_options->USE_TS_FLUCT) { filter_box(xe_filtered, 1, global_params.HII_FILTER, R); } if (flag_options->INHOMO_RECO) { filter_box(N_rec_filtered, 1, global_params.HII_FILTER, R); } if (flag_options->USE_HALO_FIELD) { filter_box(M_coll_filtered, 1, global_params.HII_FILTER, R); } filter_box(deltax_filtered, 1, global_params.HII_FILTER, R); if(flag_options->USE_MINI_HALOS){ filter_box(prev_deltax_filtered, 1, global_params.HII_FILTER, R); filter_box(log10_Mturnover_MINI_filtered, 1, global_params.HII_FILTER, R); filter_box(log10_Mturnover_filtered, 1, global_params.HII_FILTER, R); } } // Perform FFTs dft_c2r_cube(user_params->USE_FFTW_WISDOM, user_params->HII_DIM, user_params->N_THREADS, deltax_filtered); if(flag_options->USE_MINI_HALOS){ dft_c2r_cube(user_params->USE_FFTW_WISDOM, user_params->HII_DIM, user_params->N_THREADS, prev_deltax_filtered); dft_c2r_cube(user_params->USE_FFTW_WISDOM, user_params->HII_DIM, user_params->N_THREADS, log10_Mturnover_MINI_filtered); dft_c2r_cube(user_params->USE_FFTW_WISDOM, user_params->HII_DIM, user_params->N_THREADS, log10_Mturnover_filtered); } if (flag_options->USE_HALO_FIELD) { dft_c2r_cube(user_params->USE_FFTW_WISDOM, user_params->HII_DIM, user_params->N_THREADS, M_coll_filtered); } if (flag_options->USE_TS_FLUCT) { dft_c2r_cube(user_params->USE_FFTW_WISDOM, user_params->HII_DIM, user_params->N_THREADS, xe_filtered); } if (flag_options->INHOMO_RECO) { dft_c2r_cube(user_params->USE_FFTW_WISDOM, user_params->HII_DIM, user_params->N_THREADS, N_rec_filtered); } // Check if this is the last filtering scale. If so, we don't need deltax_unfiltered anymore. // We will re-read it to get the real-space field, which we will use to set the residual neutral fraction ST_over_PS = 0; ST_over_PS_MINI = 0; f_coll = 0; f_coll_MINI = 0; massofscaleR = RtoM(R); if(!user_params->USE_INTERPOLATION_TABLES) { sigmaMmax = sigma_z0(massofscaleR); } if (!flag_options->USE_HALO_FIELD) { if (flag_options->USE_MASS_DEPENDENT_ZETA) { min_density = max_density = 0.0; #pragma omp parallel shared(deltax_filtered) private(x, y, z) num_threads(user_params->N_THREADS) { #pragma omp for reduction(max:max_density) reduction(min:min_density) for (x = 0; x < user_params->HII_DIM; x++) { for (y = 0; y < user_params->HII_DIM; y++) { for (z = 0; z < user_params->HII_DIM; z++) { // delta cannot be less than -1 *((float *) deltax_filtered + HII_R_FFT_INDEX(x, y, z)) = FMAX( *((float *) deltax_filtered + HII_R_FFT_INDEX(x, y, z)), -1. + FRACT_FLOAT_ERR); if (*((float *) deltax_filtered + HII_R_FFT_INDEX(x, y, z)) < min_density) { min_density = *((float *) deltax_filtered + HII_R_FFT_INDEX(x, y, z)); } if (*((float *) deltax_filtered + HII_R_FFT_INDEX(x, y, z)) > max_density) { max_density = *((float *) deltax_filtered + HII_R_FFT_INDEX(x, y, z)); } } } } } if(user_params->USE_INTERPOLATION_TABLES) { InterpolationRange(1,R,user_params->BOX_LEN,&min_density, &max_density); } if (flag_options->USE_MINI_HALOS){ // do the same for prev prev_min_density = prev_max_density = 0.0; #pragma omp parallel shared(prev_deltax_filtered) private(x, y, z) num_threads(user_params->N_THREADS) { #pragma omp for reduction(max:prev_max_density) reduction(min:prev_min_density) for (x=0; x<user_params->HII_DIM; x++){ for (y=0; y<user_params->HII_DIM; y++){ for (z=0; z<user_params->HII_DIM; z++){ // delta cannot be less than -1 *((float *)prev_deltax_filtered + HII_R_FFT_INDEX(x,y,z)) = \ FMAX(*((float *)prev_deltax_filtered + HII_R_FFT_INDEX(x,y,z)) , -1.+FRACT_FLOAT_ERR); if( *((float *)prev_deltax_filtered + HII_R_FFT_INDEX(x,y,z)) < prev_min_density ) { prev_min_density = *((float *)prev_deltax_filtered + HII_R_FFT_INDEX(x,y,z)); } if( *((float *)prev_deltax_filtered + HII_R_FFT_INDEX(x,y,z)) > prev_max_density ) { prev_max_density = *((float *)prev_deltax_filtered + HII_R_FFT_INDEX(x,y,z)); } } } } } if(user_params->USE_INTERPOLATION_TABLES) { InterpolationRange(2,R,user_params->BOX_LEN,&prev_min_density, &prev_max_density); } // do the same for logM log10Mturn_min = 999; log10Mturn_max = 0.0; log10Mturn_min_MINI = 999; log10Mturn_max_MINI = 0.0; #pragma omp parallel shared(log10_Mturnover_filtered,log10_Mturnover_MINI_filtered,log10_Mcrit_atom,log10_Mcrit_mol) private(x, y, z) num_threads(user_params->N_THREADS) { #pragma omp for reduction(max:log10Mturn_max,log10Mturn_max_MINI) reduction(min:log10Mturn_min,log10Mturn_min_MINI) for (x=0; x<user_params->HII_DIM; x++){ for (y=0; y<user_params->HII_DIM; y++){ for (z=0; z<user_params->HII_DIM; z++){ if (*((float *)log10_Mturnover_filtered + HII_R_FFT_INDEX(x,y,z)) < log10_Mcrit_atom) *((float *)log10_Mturnover_filtered + HII_R_FFT_INDEX(x,y,z)) = log10_Mcrit_atom; if (*((float *)log10_Mturnover_filtered + HII_R_FFT_INDEX(x,y,z)) > LOG10_MTURN_MAX) *((float *)log10_Mturnover_filtered + HII_R_FFT_INDEX(x,y,z)) = LOG10_MTURN_MAX; // Mturnover cannot be less than Mcrit_mol if (*((float *)log10_Mturnover_MINI_filtered + HII_R_FFT_INDEX(x,y,z)) < log10_Mcrit_mol) *((float *)log10_Mturnover_MINI_filtered + HII_R_FFT_INDEX(x,y,z)) = log10_Mcrit_mol; if (*((float *)log10_Mturnover_MINI_filtered + HII_R_FFT_INDEX(x,y,z)) > LOG10_MTURN_MAX) *((float *)log10_Mturnover_MINI_filtered + HII_R_FFT_INDEX(x,y,z)) = LOG10_MTURN_MAX; if (*((float *)log10_Mturnover_filtered + HII_R_FFT_INDEX(x,y,z)) < log10Mturn_min) log10Mturn_min = *((float *)log10_Mturnover_filtered + HII_R_FFT_INDEX(x,y,z)); if (*((float *)log10_Mturnover_filtered + HII_R_FFT_INDEX(x,y,z)) > log10Mturn_max) log10Mturn_max = *((float *)log10_Mturnover_filtered + HII_R_FFT_INDEX(x,y,z)); if (*((float *)log10_Mturnover_MINI_filtered + HII_R_FFT_INDEX(x,y,z)) < log10Mturn_min_MINI) log10Mturn_min_MINI = *((float *)log10_Mturnover_MINI_filtered + HII_R_FFT_INDEX(x,y,z)); if (*((float *)log10_Mturnover_MINI_filtered + HII_R_FFT_INDEX(x,y,z)) > log10Mturn_max_MINI) log10Mturn_max_MINI = *((float *)log10_Mturnover_MINI_filtered + HII_R_FFT_INDEX(x,y,z)); } } } } if(user_params->USE_INTERPOLATION_TABLES) { log10Mturn_min = log10Mturn_min *0.99; log10Mturn_max = log10Mturn_max *1.01; log10Mturn_min_MINI = log10Mturn_min_MINI *0.99; log10Mturn_max_MINI = log10Mturn_max_MINI *1.01; log10Mturn_bin_width = (log10Mturn_max - log10Mturn_min) / NMTURN; log10Mturn_bin_width_inv = 1./log10Mturn_bin_width; log10Mturn_bin_width_MINI = (log10Mturn_max_MINI - log10Mturn_min_MINI) / NMTURN; log10Mturn_bin_width_inv_MINI = 1./log10Mturn_bin_width_MINI; } } initialiseGL_Nion(NGL_SFR, M_MIN,massofscaleR); if(user_params->USE_INTERPOLATION_TABLES) { if(flag_options->USE_MINI_HALOS){ initialise_Nion_General_spline_MINI(redshift,Mcrit_atom,min_density,max_density,massofscaleR,M_MIN, log10Mturn_min,log10Mturn_max,log10Mturn_min_MINI,log10Mturn_max_MINI, astro_params->ALPHA_STAR,astro_params->ALPHA_ESC,astro_params->F_STAR10, astro_params->F_ESC10,Mlim_Fstar,Mlim_Fesc,astro_params->F_STAR7_MINI, astro_params->F_ESC7_MINI,Mlim_Fstar_MINI, Mlim_Fesc_MINI, user_params->FAST_FCOLL_TABLES); if (previous_ionize_box->mean_f_coll_MINI * ION_EFF_FACTOR_MINI + previous_ionize_box->mean_f_coll * ION_EFF_FACTOR > 1e-4){ initialise_Nion_General_spline_MINI_prev(prev_redshift,Mcrit_atom,prev_min_density,prev_max_density, massofscaleR,M_MIN,log10Mturn_min,log10Mturn_max,log10Mturn_min_MINI, log10Mturn_max_MINI,astro_params->ALPHA_STAR,astro_params->ALPHA_ESC, astro_params->F_STAR10,astro_params->F_ESC10,Mlim_Fstar,Mlim_Fesc, astro_params->F_STAR7_MINI,astro_params->F_ESC7_MINI, Mlim_Fstar_MINI, Mlim_Fesc_MINI, user_params->FAST_FCOLL_TABLES); } } else{ initialise_Nion_General_spline(redshift,min_density,max_density,massofscaleR,astro_params->M_TURN, astro_params->ALPHA_STAR,astro_params->ALPHA_ESC,astro_params->F_STAR10, astro_params->F_ESC10,Mlim_Fstar,Mlim_Fesc, user_params->FAST_FCOLL_TABLES); } } } else { erfc_denom = 2. * (pow(sigma_z0(M_MIN), 2) - pow(sigma_z0(massofscaleR), 2)); if (erfc_denom < 0) { // our filtering scale has become too small break; } erfc_denom = sqrt(erfc_denom); erfc_denom = 1. / (growth_factor * erfc_denom); } } // Determine the global averaged f_coll for the overall normalisation // Reset value of int check to see if we are over-stepping our interpolation table for (i = 0; i < user_params->N_THREADS; i++) { overdense_int_boundexceeded_threaded[i] = 0; } // renormalize the collapse fraction so that the mean matches ST, // since we are using the evolved (non-linear) density field #pragma omp parallel shared(deltax_filtered,N_rec_filtered,xe_filtered,overdense_int_boundexceeded_threaded,log10_Nion_spline,Nion_spline,erfc_denom,erfc_arg_min,\ erfc_arg_max,InvArgBinWidth,ArgBinWidth,ERFC_VALS_DIFF,ERFC_VALS,log10_Mturnover_filtered,log10Mturn_min,log10Mturn_bin_width_inv, \ log10_Mturnover_MINI_filtered,log10Mturn_bin_width_inv_MINI,log10_Nion_spline_MINI,prev_deltax_filtered,previous_ionize_box,ION_EFF_FACTOR,\ prev_overdense_small_min,prev_overdense_small_bin_width_inv,prev_log10_Nion_spline,prev_log10_Nion_spline_MINI,prev_overdense_large_min,\ prev_overdense_large_bin_width_inv,prev_Nion_spline,prev_Nion_spline_MINI,box,counter,M_coll_filtered,massofscaleR,pixel_volume,sigmaMmax,\ M_MIN,growth_factor,Mlim_Fstar,Mlim_Fesc,Mcrit_atom,Mlim_Fstar_MINI,Mlim_Fesc_MINI,prev_growth_factor) \ private(x,y,z,curr_dens,Splined_Fcoll,Splined_Fcoll_MINI,dens_val,overdense_int,erfc_arg_val,erfc_arg_val_index,log10_Mturnover,\ log10_Mturnover_int,log10_Mturnover_MINI,log10_Mturnover_MINI_int,prev_dens,prev_Splined_Fcoll,prev_Splined_Fcoll_MINI,\ prev_dens_val,density_over_mean,status_int) \ num_threads(user_params->N_THREADS) { #pragma omp for reduction(+:f_coll,f_coll_MINI) for (x = 0; x < user_params->HII_DIM; x++) { for (y = 0; y < user_params->HII_DIM; y++) { for (z = 0; z < user_params->HII_DIM; z++) { // delta cannot be less than -1 *((float *) deltax_filtered + HII_R_FFT_INDEX(x, y, z)) = FMAX( *((float *) deltax_filtered + HII_R_FFT_INDEX(x, y, z)), -1. + FRACT_FLOAT_ERR); // <N_rec> cannot be less than zero if (flag_options->INHOMO_RECO) { *((float *) N_rec_filtered + HII_R_FFT_INDEX(x, y, z)) = FMAX(*((float *) N_rec_filtered + HII_R_FFT_INDEX(x, y, z)), 0.0); } // x_e has to be between zero and unity if (flag_options->USE_TS_FLUCT) { *((float *) xe_filtered + HII_R_FFT_INDEX(x, y, z)) = FMAX(*((float *) xe_filtered + HII_R_FFT_INDEX(x, y, z)), 0.); *((float *) xe_filtered + HII_R_FFT_INDEX(x, y, z)) = FMIN(*((float *) xe_filtered + HII_R_FFT_INDEX(x, y, z)), 0.999); } if(flag_options->USE_HALO_FIELD) { // collapsed mass cannot be less than zero *((float *)M_coll_filtered + HII_R_FFT_INDEX(x,y,z)) = FMAX( *((float *)M_coll_filtered + HII_R_FFT_INDEX(x,y,z)) , 0.0); density_over_mean = 1.0 + *((float *)deltax_filtered + HII_R_FFT_INDEX(x,y,z)); Splined_Fcoll = *((float *)M_coll_filtered + HII_R_FFT_INDEX(x,y,z)) / (massofscaleR*density_over_mean); Splined_Fcoll *= (4/3.0)*PI*pow(R,3) / pixel_volume; } else { curr_dens = *((float *) deltax_filtered + HII_R_FFT_INDEX(x, y, z)); if (flag_options->USE_MASS_DEPENDENT_ZETA) { if (flag_options->USE_MINI_HALOS){ log10_Mturnover = *((float *)log10_Mturnover_filtered + HII_R_FFT_INDEX(x,y,z)); log10_Mturnover_MINI = *((float *)log10_Mturnover_MINI_filtered + HII_R_FFT_INDEX(x,y,z)); if(user_params->USE_INTERPOLATION_TABLES) { status_int = EvaluateSplineTable(flag_options->USE_MINI_HALOS,1,curr_dens,log10_Mturnover,log10_Mturnover_MINI, &Splined_Fcoll,&Splined_Fcoll_MINI); if(status_int > 0) { overdense_int_boundexceeded_threaded[omp_get_thread_num()] = status_int; } } else { Splined_Fcoll = Nion_ConditionalM(growth_factor,log(M_MIN),log(massofscaleR),sigmaMmax,Deltac,curr_dens, pow(10.,log10_Mturnover),astro_params->ALPHA_STAR, astro_params->ALPHA_ESC,astro_params->F_STAR10, astro_params->F_ESC10,Mlim_Fstar,Mlim_Fesc, user_params->FAST_FCOLL_TABLES); Splined_Fcoll_MINI = Nion_ConditionalM_MINI(growth_factor,log(M_MIN),log(massofscaleR),sigmaMmax,Deltac,curr_dens, pow(10.,log10_Mturnover_MINI),Mcrit_atom,astro_params->ALPHA_STAR, astro_params->ALPHA_ESC,astro_params->F_STAR7_MINI,astro_params->F_ESC7_MINI, Mlim_Fstar_MINI,Mlim_Fesc_MINI, user_params->FAST_FCOLL_TABLES); } prev_dens = *((float *)prev_deltax_filtered + HII_R_FFT_INDEX(x,y,z)); if (previous_ionize_box->mean_f_coll_MINI * ION_EFF_FACTOR_MINI + previous_ionize_box->mean_f_coll * ION_EFF_FACTOR > 1e-4){ if(user_params->USE_INTERPOLATION_TABLES) { status_int = EvaluateSplineTable(flag_options->USE_MINI_HALOS,2,prev_dens,log10_Mturnover,log10_Mturnover_MINI, &prev_Splined_Fcoll,&prev_Splined_Fcoll_MINI); if(status_int > 0) { overdense_int_boundexceeded_threaded[omp_get_thread_num()] = status_int; } } else { prev_Splined_Fcoll = Nion_ConditionalM(prev_growth_factor,log(M_MIN),log(massofscaleR),sigmaMmax,Deltac,prev_dens, pow(10.,log10_Mturnover),astro_params->ALPHA_STAR, astro_params->ALPHA_ESC,astro_params->F_STAR10, astro_params->F_ESC10,Mlim_Fstar,Mlim_Fesc, user_params->FAST_FCOLL_TABLES); prev_Splined_Fcoll_MINI = Nion_ConditionalM_MINI(prev_growth_factor,log(M_MIN),log(massofscaleR),sigmaMmax,Deltac,prev_dens, pow(10.,log10_Mturnover_MINI),Mcrit_atom,astro_params->ALPHA_STAR, astro_params->ALPHA_ESC,astro_params->F_STAR7_MINI,astro_params->F_ESC7_MINI, Mlim_Fstar_MINI,Mlim_Fesc_MINI, user_params->FAST_FCOLL_TABLES); } } else{ prev_Splined_Fcoll = 0.; prev_Splined_Fcoll_MINI = 0.; } } else{ if(user_params->USE_INTERPOLATION_TABLES) { status_int = EvaluateSplineTable(flag_options->USE_MINI_HALOS,1,curr_dens,0.,0.,&Splined_Fcoll,&Splined_Fcoll_MINI); if(status_int > 0) { overdense_int_boundexceeded_threaded[omp_get_thread_num()] = status_int; } } else { Splined_Fcoll = Nion_ConditionalM(growth_factor,log(M_MIN),log(massofscaleR),sigmaMmax,Deltac,curr_dens, astro_params->M_TURN,astro_params->ALPHA_STAR, astro_params->ALPHA_ESC,astro_params->F_STAR10, astro_params->F_ESC10,Mlim_Fstar,Mlim_Fesc, user_params->FAST_FCOLL_TABLES); } } } else { erfc_arg_val = (Deltac - curr_dens) * erfc_denom; if (erfc_arg_val < erfc_arg_min || erfc_arg_val > erfc_arg_max) { Splined_Fcoll = splined_erfc(erfc_arg_val); } else { erfc_arg_val_index = (int) floor((erfc_arg_val - erfc_arg_min) * InvArgBinWidth); Splined_Fcoll = ERFC_VALS[erfc_arg_val_index] + \ (erfc_arg_val - (erfc_arg_min + ArgBinWidth * (double) erfc_arg_val_index)) * ERFC_VALS_DIFF[erfc_arg_val_index] *InvArgBinWidth; } } } // save the value of the collasped fraction into the Fcoll array if (flag_options->USE_MINI_HALOS){ if (Splined_Fcoll > 1.) Splined_Fcoll = 1.; if (Splined_Fcoll < 0.) Splined_Fcoll = 1e-40; if (prev_Splined_Fcoll > 1.) prev_Splined_Fcoll = 1.; if (prev_Splined_Fcoll < 0.) prev_Splined_Fcoll = 1e-40; box->Fcoll[counter * HII_TOT_NUM_PIXELS + HII_R_INDEX(x,y,z)] = \ previous_ionize_box->Fcoll[counter * HII_TOT_NUM_PIXELS + HII_R_INDEX(x,y,z)] + Splined_Fcoll - prev_Splined_Fcoll; if (box->Fcoll[counter * HII_TOT_NUM_PIXELS + HII_R_INDEX(x,y,z)] >1.) box->Fcoll[counter * HII_TOT_NUM_PIXELS + HII_R_INDEX(x,y,z)] = 1.; //if (box->Fcoll[counter * HII_TOT_NUM_PIXELS + HII_R_INDEX(x,y,z)] <0.) box->Fcoll[counter * HII_TOT_NUM_PIXELS + HII_R_INDEX(x,y,z)] = 1e-40; //if (box->Fcoll[counter * HII_TOT_NUM_PIXELS + HII_R_INDEX(x,y,z)] < previous_ionize_box->Fcoll[counter * HII_TOT_NUM_PIXELS + HII_R_INDEX(x,y,z)]) // box->Fcoll[counter * HII_TOT_NUM_PIXELS + HII_R_INDEX(x,y,z)] = previous_ionize_box->Fcoll[counter * HII_TOT_NUM_PIXELS + HII_R_INDEX(x,y,z)]; f_coll += box->Fcoll[counter * HII_TOT_NUM_PIXELS + HII_R_INDEX(x,y,z)]; if(isfinite(f_coll)==0) { LOG_ERROR("f_coll is either infinite or NaN!(%d,%d,%d)%g,%g,%g,%g,%g,%g,%g,%g,%g",\ x,y,z,curr_dens,prev_dens,previous_ionize_box->Fcoll[counter * HII_TOT_NUM_PIXELS + HII_R_INDEX(x,y,z)],\ Splined_Fcoll, prev_Splined_Fcoll, curr_dens, prev_dens, \ log10_Mturnover, *((float *)log10_Mturnover_filtered + HII_R_FFT_INDEX(x,y,z))); Throw(ParameterError); } if (Splined_Fcoll_MINI > 1.) Splined_Fcoll_MINI = 1.; if (Splined_Fcoll_MINI < 0.) Splined_Fcoll_MINI = 1e-40; if (prev_Splined_Fcoll_MINI > 1.) prev_Splined_Fcoll_MINI = 1.; if (prev_Splined_Fcoll_MINI < 0.) prev_Splined_Fcoll_MINI = 1e-40; box->Fcoll_MINI[counter * HII_TOT_NUM_PIXELS + HII_R_INDEX(x,y,z)] = \ previous_ionize_box->Fcoll_MINI[counter * HII_TOT_NUM_PIXELS + HII_R_INDEX(x,y,z)] + Splined_Fcoll_MINI - prev_Splined_Fcoll_MINI; if (box->Fcoll_MINI[counter * HII_TOT_NUM_PIXELS + HII_R_INDEX(x,y,z)] >1.) box->Fcoll_MINI[counter * HII_TOT_NUM_PIXELS + HII_R_INDEX(x,y,z)] = 1.; //if (box->Fcoll_MINI[counter * HII_TOT_NUM_PIXELS + HII_R_INDEX(x,y,z)] <0.) box->Fcoll_MINI[counter * HII_TOT_NUM_PIXELS + HII_R_INDEX(x,y,z)] = 1e-40; //if (box->Fcoll_MINI[counter * HII_TOT_NUM_PIXELS + HII_R_INDEX(x,y,z)] < previous_ionize_box->Fcoll_MINI[counter * HII_TOT_NUM_PIXELS + HII_R_INDEX(x,y,z)]) // box->Fcoll_MINI[counter * HII_TOT_NUM_PIXELS + HII_R_INDEX(x,y,z)] = previous_ionize_box->Fcoll_MINI[counter * HII_TOT_NUM_PIXELS + HII_R_INDEX(x,y,z)]; f_coll_MINI += box->Fcoll_MINI[counter * HII_TOT_NUM_PIXELS + HII_R_INDEX(x,y,z)]; if(isfinite(f_coll_MINI)==0) { LOG_ERROR("f_coll_MINI is either infinite or NaN!(%d,%d,%d)%g,%g,%g,%g,%g,%g,%g",\ x,y,z,curr_dens, prev_dens, previous_ionize_box->Fcoll_MINI[counter * HII_TOT_NUM_PIXELS + HII_R_INDEX(x,y,z)],\ Splined_Fcoll_MINI, prev_Splined_Fcoll_MINI, log10_Mturnover_MINI,\ *((float *)log10_Mturnover_MINI_filtered + HII_R_FFT_INDEX(x,y,z))); LOG_DEBUG("%g,%g",previous_ionize_box->Fcoll[counter * HII_TOT_NUM_PIXELS + HII_R_INDEX(x,y,z)],\ previous_ionize_box->Fcoll_MINI[counter * HII_TOT_NUM_PIXELS + HII_R_INDEX(x,y,z)]); LOG_DEBUG("%g,%g,%g,%g,%g,%g,%g,%g,",log10Mturn_min, log10Mturn_max, log10Mturn_bin_width, \ log10Mturn_bin_width_inv, log10Mturn_max_MINI, log10Mturn_min_MINI, \ log10Mturn_bin_width_MINI, log10Mturn_bin_width_inv_MINI); LOG_DEBUG("%g,%g,%g,%g,%d",curr_dens, overdense_small_min, overdense_small_bin_width_inv, dens_val, overdense_int); LOG_DEBUG("%d,%g,%g,%g",log10_Mturnover_MINI_int, log10_Mturnover_MINI, log10Mturn_min_MINI, log10Mturn_bin_width_inv_MINI); LOG_DEBUG("%g", *((float *)log10_Mturnover_MINI_filtered + HII_R_FFT_INDEX(x,y,z))); LOG_DEBUG("%d", counter); LOG_DEBUG("%g,%g,%g,%g",log10_Nion_spline_MINI[overdense_int + NSFR_low* log10_Mturnover_MINI_int ], \ log10_Nion_spline_MINI[overdense_int +1+ NSFR_low* log10_Mturnover_MINI_int ], \ log10_Nion_spline_MINI[overdense_int + NSFR_low*(log10_Mturnover_MINI_int+1)], \ log10_Nion_spline_MINI[overdense_int +1+ NSFR_low*(log10_Mturnover_MINI_int+1)]); Throw(ParameterError); } } else{ box->Fcoll[counter * HII_TOT_NUM_PIXELS + HII_R_INDEX(x,y,z)] = Splined_Fcoll; f_coll += Splined_Fcoll; } } } } } // end loop through Fcoll box for (i = 0; i < user_params->N_THREADS; i++) { if (overdense_int_boundexceeded_threaded[i] == 1) { LOG_ERROR("I have overstepped my allocated memory for one of the interpolation tables for the nion_splines"); Throw(ParameterError); } } if(isfinite(f_coll)==0) { LOG_ERROR("f_coll is either infinite or NaN!"); Throw(ParameterError); } f_coll /= (double) HII_TOT_NUM_PIXELS; if(isfinite(f_coll_MINI)==0) { LOG_ERROR("f_coll_MINI is either infinite or NaN!"); Throw(ParameterError); } f_coll_MINI /= (double) HII_TOT_NUM_PIXELS; // To avoid ST_over_PS becoming nan when f_coll = 0, I set f_coll = FRACT_FLOAT_ERR. if (flag_options->USE_MASS_DEPENDENT_ZETA) { if (f_coll <= f_coll_min) f_coll = f_coll_min; if (flag_options->USE_MINI_HALOS){ if (f_coll_MINI <= f_coll_min_MINI) f_coll_MINI = f_coll_min_MINI; } } else { if (f_coll <= FRACT_FLOAT_ERR) f_coll = FRACT_FLOAT_ERR; } ST_over_PS = box->mean_f_coll/f_coll; ST_over_PS_MINI = box->mean_f_coll_MINI/f_coll_MINI; ////////////////////////////// MAIN LOOP THROUGH THE BOX /////////////////////////////////// // now lets scroll through the filtered box Gamma_R_prefactor = (R*CMperMPC) * SIGMA_HI * global_params.ALPHA_UVB / (global_params.ALPHA_UVB+2.75) * N_b0 * ION_EFF_FACTOR / 1.0e-12; Gamma_R_prefactor_MINI = (R*CMperMPC) * SIGMA_HI * global_params.ALPHA_UVB / (global_params.ALPHA_UVB+2.75) * N_b0 * ION_EFF_FACTOR_MINI / 1.0e-12; if(flag_options->PHOTON_CONS) { // Used for recombinations, which means we want to use the original redshift not the adjusted redshift Gamma_R_prefactor *= pow(1+stored_redshift, 2); Gamma_R_prefactor_MINI *= pow(1+stored_redshift, 2); } else { Gamma_R_prefactor *= pow(1+redshift, 2); Gamma_R_prefactor_MINI *= pow(1+redshift, 2); } Gamma_R_prefactor /= t_ast; Gamma_R_prefactor_MINI /= t_ast; if (global_params.FIND_BUBBLE_ALGORITHM != 2 && global_params.FIND_BUBBLE_ALGORITHM != 1) { // center method LOG_ERROR("Incorrect choice of find bubble algorithm: %i", global_params.FIND_BUBBLE_ALGORITHM); Throw(ValueError); } #pragma omp parallel shared(deltax_filtered,N_rec_filtered,xe_filtered,box,ST_over_PS,pixel_mass,M_MIN,r,f_coll_min,Gamma_R_prefactor,\ ION_EFF_FACTOR,ION_EFF_FACTOR_MINI,LAST_FILTER_STEP,counter,ST_over_PS_MINI,f_coll_min_MINI,Gamma_R_prefactor_MINI,TK) \ private(x,y,z,curr_dens,Splined_Fcoll,f_coll,ave_M_coll_cell,ave_N_min_cell,N_halos_in_cell,rec,xHII_from_xrays,res_xH,\ Splined_Fcoll_MINI,f_coll_MINI) \ num_threads(user_params->N_THREADS) { #pragma omp for for (x = 0; x < user_params->HII_DIM; x++) { for (y = 0; y < user_params->HII_DIM; y++) { for (z = 0; z < user_params->HII_DIM; z++) { curr_dens = *((float *)deltax_filtered + HII_R_FFT_INDEX(x,y,z)); Splined_Fcoll = box->Fcoll[counter * HII_TOT_NUM_PIXELS + HII_R_INDEX(x,y,z)]; f_coll = ST_over_PS * Splined_Fcoll; if (flag_options->USE_MINI_HALOS){ Splined_Fcoll_MINI = box->Fcoll_MINI[counter * HII_TOT_NUM_PIXELS + HII_R_INDEX(x,y,z)]; f_coll_MINI = ST_over_PS_MINI * Splined_Fcoll_MINI; } else{ f_coll_MINI = 0.; } if (LAST_FILTER_STEP){ ave_M_coll_cell = (f_coll + f_coll_MINI) * pixel_mass * (1. + curr_dens); ave_N_min_cell = ave_M_coll_cell / M_MIN; // ave # of M_MIN halos in cell if(user_params->NO_RNG) { N_halos_in_cell = 1.; } else { N_halos_in_cell = (int) gsl_ran_poisson(r[omp_get_thread_num()], global_params.N_POISSON); } } if (flag_options->USE_MASS_DEPENDENT_ZETA) { if (f_coll <= f_coll_min) f_coll = f_coll_min; if (flag_options->USE_MINI_HALOS){ if (f_coll_MINI <= f_coll_min_MINI) f_coll_MINI = f_coll_min_MINI; } } if (flag_options->INHOMO_RECO) { rec = (*((float *) N_rec_filtered + HII_R_FFT_INDEX(x, y, z))); // number of recombinations per mean baryon rec /= (1. + curr_dens); // number of recombinations per baryon inside <R> } else { rec = 0.; } // adjust the denominator of the collapse fraction for the residual electron fraction in the neutral medium if (flag_options->USE_TS_FLUCT){ xHII_from_xrays = *((float *)xe_filtered + HII_R_FFT_INDEX(x,y,z)); } else { xHII_from_xrays = 0.; } // check if fully ionized! if ( (f_coll * ION_EFF_FACTOR + f_coll_MINI * ION_EFF_FACTOR_MINI> (1. - xHII_from_xrays)*(1.0+rec)) ){ //IONIZED!! // if this is the first crossing of the ionization barrier for this cell (largest R), record the gamma // this assumes photon-starved growth of HII regions... breaks down post EoR if (flag_options->INHOMO_RECO && (box->xH_box[HII_R_INDEX(x,y,z)] > FRACT_FLOAT_ERR) ){ box->Gamma12_box[HII_R_INDEX(x,y,z)] = Gamma_R_prefactor * f_coll + Gamma_R_prefactor_MINI * f_coll_MINI; box->MFP_box[HII_R_INDEX(x,y,z)] = R; } // keep track of the first time this cell is ionized (earliest time) if (previous_ionize_box->z_re_box[HII_R_INDEX(x,y,z)] < 0){ box->z_re_box[HII_R_INDEX(x,y,z)] = redshift; } else{ box->z_re_box[HII_R_INDEX(x,y,z)] = previous_ionize_box->z_re_box[HII_R_INDEX(x,y,z)]; } // FLAG CELL(S) AS IONIZED if (global_params.FIND_BUBBLE_ALGORITHM == 2) // center method box->xH_box[HII_R_INDEX(x,y,z)] = 0; if (global_params.FIND_BUBBLE_ALGORITHM == 1) // sphere method update_in_sphere(box->xH_box, user_params->HII_DIM, R/(user_params->BOX_LEN), \ x/(user_params->HII_DIM+0.0), y/(user_params->HII_DIM+0.0), z/(user_params->HII_DIM+0.0)); } // end ionized // If not fully ionized, then assign partial ionizations else if (LAST_FILTER_STEP && (box->xH_box[HII_R_INDEX(x, y, z)] > TINY)) { if (f_coll>1) f_coll=1; if (f_coll_MINI>1) f_coll_MINI=1; if (!flag_options->USE_HALO_FIELD){ if(ave_N_min_cell < global_params.N_POISSON) { f_coll = N_halos_in_cell * ( ave_M_coll_cell / (float)global_params.N_POISSON ) / (pixel_mass*(1. + curr_dens)); if (flag_options->USE_MINI_HALOS){ f_coll_MINI = f_coll * (f_coll_MINI * ION_EFF_FACTOR_MINI) / (f_coll * ION_EFF_FACTOR + f_coll_MINI * ION_EFF_FACTOR_MINI); f_coll = f_coll - f_coll_MINI; } else{ f_coll_MINI = 0.; } } if (ave_M_coll_cell < (M_MIN / 5.)) { f_coll = 0.; f_coll_MINI = 0.; } } if (f_coll>1) f_coll=1; if (f_coll_MINI>1) f_coll_MINI=1; res_xH = 1. - f_coll * ION_EFF_FACTOR - f_coll_MINI * ION_EFF_FACTOR_MINI; // put the partial ionization here because we need to exclude xHII_from_xrays... if (flag_options->USE_TS_FLUCT){ box->temp_kinetic_all_gas[HII_R_INDEX(x,y,z)] = ComputePartiallyIoinizedTemperature(spin_temp->Tk_box[HII_R_INDEX(x,y,z)], res_xH); } else{ box->temp_kinetic_all_gas[HII_R_INDEX(x,y,z)] = ComputePartiallyIoinizedTemperature(TK, res_xH); } res_xH -= xHII_from_xrays; // and make sure fraction doesn't blow up for underdense pixels if (res_xH < 0) res_xH = 0; else if (res_xH > 1) res_xH = 1; box->xH_box[HII_R_INDEX(x, y, z)] = res_xH; } // end partial ionizations at last filtering step } // k } // j } // i } if (first_step_R) { R = stored_R; first_step_R = 0; } else { R /= (global_params.DELTA_R_HII_FACTOR); } if (flag_options->USE_MINI_HALOS) counter += 1; } #pragma omp parallel shared(box,spin_temp,redshift,deltax_unfiltered_original,TK) private(x,y,z) num_threads(user_params->N_THREADS) { #pragma omp for for (x=0; x<user_params->HII_DIM; x++){ for (y=0; y<user_params->HII_DIM; y++){ for (z=0; z<user_params->HII_DIM; z++){ if ((box->z_re_box[HII_R_INDEX(x,y,z)]>0) && (box->xH_box[HII_R_INDEX(x,y,z)] < TINY)){ box->temp_kinetic_all_gas[HII_R_INDEX(x,y,z)] = ComputeFullyIoinizedTemperature(box->z_re_box[HII_R_INDEX(x,y,z)], \ redshift, *((float *)deltax_unfiltered_original + HII_R_FFT_INDEX(x,y,z))); // Below sometimes (very rare though) can happen when the density drops too fast and to below T_HI if (flag_options->USE_TS_FLUCT){ if (box->temp_kinetic_all_gas[HII_R_INDEX(x,y,z)] < spin_temp->Tk_box[HII_R_INDEX(x,y,z)]) box->temp_kinetic_all_gas[HII_R_INDEX(x,y,z)] = spin_temp->Tk_box[HII_R_INDEX(x,y,z)]; } else{ if (box->temp_kinetic_all_gas[HII_R_INDEX(x,y,z)] < TK) box->temp_kinetic_all_gas[HII_R_INDEX(x,y,z)] = TK; } } } } } } for (x=0; x<user_params->HII_DIM; x++){ for (y=0; y<user_params->HII_DIM; y++){ for (z=0; z<user_params->HII_DIM; z++){ if(isfinite(box->temp_kinetic_all_gas[HII_R_INDEX(x,y,z)])==0){ LOG_ERROR("Tk after fully ioinzation is either infinite or a Nan. Something has gone wrong "\ "in the temperature calculation: z_re=%.4f, redshift=%.4f, curr_dens=%.4e", box->z_re_box[HII_R_INDEX(x,y,z)], redshift, curr_dens); Throw(ParameterError); } } } } // find the neutral fraction if (LOG_LEVEL >= DEBUG_LEVEL) { global_xH = 0; #pragma omp parallel shared(box) private(ct) num_threads(user_params->N_THREADS) { #pragma omp for reduction(+:global_xH) for (ct = 0; ct < HII_TOT_NUM_PIXELS; ct++) { global_xH += box->xH_box[ct]; } } global_xH /= (float) HII_TOT_NUM_PIXELS; } if (isfinite(global_xH) == 0) { LOG_ERROR( "Neutral fraction is either infinite or a Nan. Something has gone wrong in the ionisation calculation!"); Throw(ParameterError); } // update the N_rec field if (flag_options->INHOMO_RECO) { #pragma omp parallel shared(perturbed_field, adjustment_factor, stored_redshift, redshift, box, previous_ionize_box, \ fabs_dtdz, ZSTEP, something_finite_or_infinite) \ private(x, y, z, curr_dens, z_eff, dNrec) num_threads(user_params->N_THREADS) { #pragma omp for for (x = 0; x < user_params->HII_DIM; x++) { for (y = 0; y < user_params->HII_DIM; y++) { for (z = 0; z < user_params->HII_DIM; z++) { // use the original density and redshift for the snapshot (not the adjusted redshift) // Only want to use the adjusted redshift for the ionisation field curr_dens = 1.0 + (perturbed_field->density[HII_R_INDEX(x, y, z)]) / adjustment_factor; z_eff = pow(curr_dens, 1.0 / 3.0); if (flag_options->PHOTON_CONS) { z_eff *= (1 + stored_redshift); } else { z_eff *= (1 + redshift); } dNrec = splined_recombination_rate(z_eff - 1., box->Gamma12_box[HII_R_INDEX(x, y, z)]) * fabs_dtdz * ZSTEP * (1. - box->xH_box[HII_R_INDEX(x, y, z)]); if (isfinite(dNrec) == 0) { something_finite_or_infinite = 1; } box->dNrec_box[HII_R_INDEX(x, y, z)] = previous_ionize_box->dNrec_box[HII_R_INDEX(x, y, z)] + dNrec; } } } } if (something_finite_or_infinite) { LOG_ERROR("Recombinations have returned either an infinite or NaN value."); Throw(ParameterError); } } fftwf_cleanup_threads(); fftwf_cleanup(); fftwf_forget_wisdom(); } destruct_heat(); for (i=0; i<user_params->N_THREADS; i++) { gsl_rng_free (r[i]); } LOG_DEBUG("global_xH = %e",global_xH); fftwf_free(deltax_unfiltered); fftwf_free(deltax_unfiltered_original); fftwf_free(deltax_filtered); if(flag_options->USE_MINI_HALOS){ fftwf_free(prev_deltax_unfiltered); fftwf_free(prev_deltax_filtered); } if(flag_options->USE_TS_FLUCT) { fftwf_free(xe_unfiltered); fftwf_free(xe_filtered); } if (flag_options->INHOMO_RECO){ fftwf_free(N_rec_unfiltered); fftwf_free(N_rec_filtered); } if(flag_options->USE_HALO_FIELD) { fftwf_free(M_coll_unfiltered); fftwf_free(M_coll_filtered); } LOG_SUPER_DEBUG("freed fftw boxes"); if(flag_options->USE_MASS_DEPENDENT_ZETA) { free(xi_SFR); free(wi_SFR); if(user_params->USE_INTERPOLATION_TABLES) { free(log10_overdense_spline_SFR); free(Overdense_spline_SFR); free(log10_Nion_spline); free(Nion_spline); } if(flag_options->USE_MINI_HALOS){ free(Mturns); free(Mturns_MINI); fftwf_free(log10_Mturnover_unfiltered); fftwf_free(log10_Mturnover_filtered); fftwf_free(log10_Mturnover_MINI_unfiltered); fftwf_free(log10_Mturnover_MINI_filtered); if(user_params->USE_INTERPOLATION_TABLES) { free(prev_log10_overdense_spline_SFR); free(prev_Overdense_spline_SFR); free(prev_log10_Nion_spline); free(prev_Nion_spline); free(log10_Nion_spline_MINI); free(Nion_spline_MINI); free(prev_log10_Nion_spline_MINI); free(prev_Nion_spline_MINI); } } //fftwf_free(Mcrit_RE_grid); //fftwf_free(Mcrit_LW_grid); } if (prev_redshift < 1){ free(previous_ionize_box->z_re_box); if (flag_options->USE_MASS_DEPENDENT_ZETA && flag_options->USE_MINI_HALOS){ free(previous_ionize_box->Gamma12_box); free(previous_ionize_box->dNrec_box); free(previous_ionize_box->Fcoll); free(previous_ionize_box->Fcoll_MINI); } } if(!flag_options->USE_TS_FLUCT && user_params->USE_INTERPOLATION_TABLES) { freeSigmaMInterpTable(); } free(overdense_int_boundexceeded_threaded); LOG_DEBUG("finished!\n"); } // End of Try() Catch(status){ return(status); } return(0); } int EvaluateSplineTable(bool MINI_HALOS, int dens_type, float curr_dens, float filtered_Mturn, float filtered_Mturn_MINI, float *Splined_Fcoll, float *Splined_Fcoll_MINI) { int overdense_int,overdense_int_status; float dens_val, small_bin_width, small_bin_width_inv, small_min; float log10_Mturnover, log10_Mturnover_MINI; int log10_Mturnover_int, log10_Mturnover_MINI_int; overdense_int_status = 0; if(MINI_HALOS) { log10_Mturnover = (filtered_Mturn - log10Mturn_min ) * log10Mturn_bin_width_inv; log10_Mturnover_int = (int)floorf( log10_Mturnover ); log10_Mturnover_MINI = (filtered_Mturn_MINI - log10Mturn_min_MINI ) * log10Mturn_bin_width_inv_MINI; log10_Mturnover_MINI_int = (int)floorf( log10_Mturnover_MINI ); } if(dens_type==1) { small_min = overdense_small_min; small_bin_width = overdense_small_bin_width; small_bin_width_inv = overdense_small_bin_width_inv; } if(dens_type==2) { small_min = prev_overdense_small_min; small_bin_width = prev_overdense_small_bin_width; small_bin_width_inv = prev_overdense_small_bin_width_inv; } if (curr_dens < global_params.CRIT_DENS_TRANSITION) { if (curr_dens <= -1.) { *Splined_Fcoll = 0; if(MINI_HALOS) { *Splined_Fcoll_MINI = 0; } } else { dens_val = (log10f(curr_dens + 1.) - small_min) * small_bin_width_inv; overdense_int = (int) floorf(dens_val); if (overdense_int < 0 || (overdense_int + 1) > (NSFR_low - 1)) { overdense_int_status = 1; LOG_INFO("overdense_int in thread %d got value %d (exceeded bounds). Current density=%g", omp_get_thread_num(), overdense_int, dens_val); } if(MINI_HALOS) { if(dens_type==1) { *Splined_Fcoll = ( \ log10_Nion_spline[overdense_int + NSFR_low*log10_Mturnover_int]*( 1 + (float)overdense_int - dens_val ) + \ log10_Nion_spline[overdense_int + 1 + NSFR_low*log10_Mturnover_int]*( dens_val - (float)overdense_int ) \ ) * (1 + (float)log10_Mturnover_int - log10_Mturnover) + \ ( \ log10_Nion_spline[overdense_int + NSFR_low*(log10_Mturnover_int+1)]*( 1 + (float)overdense_int - dens_val ) + \ log10_Nion_spline[overdense_int + 1 + NSFR_low*(log10_Mturnover_int+1)]*( dens_val - (float)overdense_int ) \ ) * (log10_Mturnover - (float)log10_Mturnover_int); *Splined_Fcoll_MINI = ( \ log10_Nion_spline_MINI[overdense_int + NSFR_low*log10_Mturnover_MINI_int]*( 1 + (float)overdense_int - dens_val ) + \ log10_Nion_spline_MINI[overdense_int + 1 + NSFR_low*log10_Mturnover_MINI_int]*( dens_val - (float)overdense_int ) \ ) * (1 + (float)log10_Mturnover_MINI_int - log10_Mturnover_MINI) + \ ( \ log10_Nion_spline_MINI[overdense_int + NSFR_low*(log10_Mturnover_MINI_int+1)]*( 1 + (float)overdense_int - dens_val ) + \ log10_Nion_spline_MINI[overdense_int + 1 + NSFR_low*(log10_Mturnover_MINI_int+1)]*( dens_val - (float)overdense_int ) \ ) * (log10_Mturnover_MINI - (float)log10_Mturnover_MINI_int); } if(dens_type==2) { *Splined_Fcoll = ( \ prev_log10_Nion_spline[overdense_int + NSFR_low*log10_Mturnover_int]*( 1 + (float)overdense_int - dens_val ) + \ prev_log10_Nion_spline[overdense_int + 1 + NSFR_low*log10_Mturnover_int]*( dens_val - (float)overdense_int ) \ ) * (1 + (float)log10_Mturnover_int - log10_Mturnover) + \ ( \ prev_log10_Nion_spline[overdense_int + NSFR_low*(log10_Mturnover_int+1)]*( 1 + (float)overdense_int - dens_val ) + \ prev_log10_Nion_spline[overdense_int + 1 + NSFR_low*(log10_Mturnover_int+1)]*( dens_val - (float)overdense_int ) \ ) * (log10_Mturnover - (float)log10_Mturnover_int); *Splined_Fcoll_MINI = ( \ prev_log10_Nion_spline_MINI[overdense_int + NSFR_low*log10_Mturnover_MINI_int]*( 1 + (float)overdense_int - dens_val ) + \ prev_log10_Nion_spline_MINI[overdense_int + 1 + NSFR_low*log10_Mturnover_MINI_int]*( dens_val - (float)overdense_int ) \ ) * (1 + (float)log10_Mturnover_MINI_int - log10_Mturnover_MINI) + \ ( \ prev_log10_Nion_spline_MINI[overdense_int + NSFR_low*(log10_Mturnover_MINI_int+1)]*( 1 + (float)overdense_int - dens_val ) + \ prev_log10_Nion_spline_MINI[overdense_int + 1 + NSFR_low*(log10_Mturnover_MINI_int+1)]*( dens_val - (float)overdense_int ) \ ) * (log10_Mturnover_MINI - (float)log10_Mturnover_MINI_int); } *Splined_Fcoll_MINI = expf(*Splined_Fcoll_MINI); } else { *Splined_Fcoll = log10_Nion_spline[overdense_int] * (1 + (float) overdense_int - dens_val) + log10_Nion_spline[overdense_int + 1] * (dens_val - (float) overdense_int); } *Splined_Fcoll = expf(*Splined_Fcoll); } } else { if (curr_dens < 0.99 * Deltac) { if(dens_type==1) { dens_val = (curr_dens - overdense_large_min) * overdense_large_bin_width_inv; } if(dens_type==2) { dens_val = (curr_dens - prev_overdense_large_min) * prev_overdense_large_bin_width_inv; } overdense_int = (int) floorf(dens_val); if (overdense_int < 0 || (overdense_int + 1) > (NSFR_high - 1)) { overdense_int_status = 1; LOG_INFO("overdense_int in thread %d got value %d (exceeded bounds). Current density=%g", omp_get_thread_num(), overdense_int, dens_val); } if(MINI_HALOS) { if(dens_type==1) { *Splined_Fcoll = ( \ Nion_spline[overdense_int + NSFR_high* log10_Mturnover_int]*( 1 + (float)overdense_int - dens_val ) + \ Nion_spline[overdense_int + 1 + NSFR_high* log10_Mturnover_int]*( dens_val - (float)overdense_int ) \ ) * (1 + (float)log10_Mturnover_int - log10_Mturnover) + \ ( \ Nion_spline[overdense_int + NSFR_high*(log10_Mturnover_int+1)]*( 1 + (float)overdense_int - dens_val ) + \ Nion_spline[overdense_int+ 1 + NSFR_high*(log10_Mturnover_int+1)]*( dens_val - (float)overdense_int ) \ ) * (log10_Mturnover - (float)log10_Mturnover_int); *Splined_Fcoll_MINI = ( \ Nion_spline_MINI[overdense_int + NSFR_high* log10_Mturnover_MINI_int]*( 1 + (float)overdense_int - dens_val ) + \ Nion_spline_MINI[overdense_int + 1 + NSFR_high* log10_Mturnover_MINI_int]*( dens_val - (float)overdense_int ) \ ) * (1 + (float)log10_Mturnover_MINI_int - log10_Mturnover_MINI) + \ ( \ Nion_spline_MINI[overdense_int + NSFR_high*(log10_Mturnover_MINI_int+1)]*( 1 + (float)overdense_int - dens_val ) + \ Nion_spline_MINI[overdense_int + 1 + NSFR_high*(log10_Mturnover_MINI_int+1)]*( dens_val - (float)overdense_int ) \ ) * (log10_Mturnover_MINI - (float)log10_Mturnover_MINI_int); } if(dens_type==2) { *Splined_Fcoll = ( \ prev_Nion_spline[overdense_int + NSFR_high* log10_Mturnover_int]*( 1 + (float)overdense_int - dens_val ) + \ prev_Nion_spline[overdense_int + 1 + NSFR_high* log10_Mturnover_int]*( dens_val - (float)overdense_int ) \ ) * (1 + (float)log10_Mturnover_int - log10_Mturnover) + \ ( \ prev_Nion_spline[overdense_int + NSFR_high*(log10_Mturnover_int+1)]*( 1 + (float)overdense_int - dens_val ) + \ prev_Nion_spline[overdense_int+ 1 + NSFR_high*(log10_Mturnover_int+1)]*( dens_val - (float)overdense_int ) \ ) * (log10_Mturnover - (float)log10_Mturnover_int); *Splined_Fcoll_MINI = ( \ prev_Nion_spline_MINI[overdense_int + NSFR_high* log10_Mturnover_MINI_int]*( 1 + (float)overdense_int - dens_val ) + \ prev_Nion_spline_MINI[overdense_int + 1 + NSFR_high* log10_Mturnover_MINI_int]*( dens_val - (float)overdense_int ) \ ) * (1 + (float)log10_Mturnover_MINI_int - log10_Mturnover_MINI) + \ ( \ prev_Nion_spline_MINI[overdense_int + NSFR_high*(log10_Mturnover_MINI_int+1)]*( 1 + (float)overdense_int - dens_val ) + \ prev_Nion_spline_MINI[overdense_int + 1 + NSFR_high*(log10_Mturnover_MINI_int+1)]*( dens_val - (float)overdense_int ) \ ) * (log10_Mturnover_MINI - (float)log10_Mturnover_MINI_int); } } else { *Splined_Fcoll = Nion_spline[overdense_int] * (1 + (float) overdense_int - dens_val) + Nion_spline[overdense_int + 1] * (dens_val - (float) overdense_int); } } else { *Splined_Fcoll = 1.; if(MINI_HALOS) { *Splined_Fcoll_MINI = 1.; } } } return overdense_int_status; } void InterpolationRange(int dens_type, float R, float L, float *min_density, float *max_density) { float small_bin_width, small_bin_width_inv, small_min; if (*min_density < 0.) { *min_density = *min_density * 1.001; if (*min_density <= -1.) { // Use MIN_DENSITY_LOW_LIMIT as is it smaller than FRACT_FLOAT_ERR *min_density = -1. + global_params.MIN_DENSITY_LOW_LIMIT; } } else { *min_density = *min_density * 0.999; } if (*max_density < 0.) { *max_density = *max_density * 0.999; } else { *max_density = *max_density * 1.001; } if (global_params.HII_FILTER == 1) { if ((0.413566994 * R * 2. * PI / L) > 1.) { // The sharp k-space filter will set every cell to zero, and the interpolation table using a flexible min/max density will fail. *min_density = -1. + global_params.MIN_DENSITY_LOW_LIMIT; *max_density = global_params.CRIT_DENS_TRANSITION * 1.001; } } small_min = log10(1. + *min_density); if (*max_density > global_params.CRIT_DENS_TRANSITION * 1.001) { small_bin_width = 1 / ((double) NSFR_low - 1.) * (log10(1. + global_params.CRIT_DENS_TRANSITION * 1.001) - small_min); } else { small_bin_width = 1 / ((double) NSFR_low - 1.) * (log10(1. + *max_density) - small_min); } small_bin_width_inv = 1./small_bin_width; if(dens_type==1) { overdense_small_min = small_min; overdense_small_bin_width = small_bin_width; overdense_small_bin_width_inv = small_bin_width_inv; LOG_ULTRA_DEBUG("R=%f, min_density=%f, max_density=%f, overdense_small_min=%f, overdense_small_bin_width=%f",\ R, *min_density, *max_density, small_min, small_bin_width); } if(dens_type==2) { prev_overdense_small_min = small_min; prev_overdense_small_bin_width = small_bin_width; prev_overdense_small_bin_width_inv = small_bin_width_inv; LOG_ULTRA_DEBUG("R=%f, prev_min_density=%f, prev_max_density=%f, prev_overdense_small_min=%f, prev_overdense_small_bin_width=%f",\ R, *min_density, *max_density, small_min, small_bin_width); } }